1604326b4SDaniel Borkmann // SPDX-License-Identifier: GPL-2.0
2604326b4SDaniel Borkmann /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3604326b4SDaniel Borkmann
4604326b4SDaniel Borkmann #include <linux/bpf.h>
503653515SLorenz Bauer #include <linux/btf_ids.h>
6604326b4SDaniel Borkmann #include <linux/filter.h>
7604326b4SDaniel Borkmann #include <linux/errno.h>
8604326b4SDaniel Borkmann #include <linux/file.h>
9604326b4SDaniel Borkmann #include <linux/net.h>
10604326b4SDaniel Borkmann #include <linux/workqueue.h>
11604326b4SDaniel Borkmann #include <linux/skmsg.h>
12604326b4SDaniel Borkmann #include <linux/list.h>
13604326b4SDaniel Borkmann #include <linux/jhash.h>
14c1cdf65dSJakub Sitnicki #include <linux/sock_diag.h>
157b98cd42SLorenz Bauer #include <net/udp.h>
16604326b4SDaniel Borkmann
17604326b4SDaniel Borkmann struct bpf_stab {
18604326b4SDaniel Borkmann struct bpf_map map;
19604326b4SDaniel Borkmann struct sock **sks;
20604326b4SDaniel Borkmann struct sk_psock_progs progs;
2135d2b7ffSJohn Fastabend spinlock_t lock;
22604326b4SDaniel Borkmann };
23604326b4SDaniel Borkmann
24604326b4SDaniel Borkmann #define SOCK_CREATE_FLAG_MASK \
25604326b4SDaniel Borkmann (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
26604326b4SDaniel Borkmann
274675e234SCong Wang static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
284675e234SCong Wang struct bpf_prog *old, u32 which);
292004fdbdSCong Wang static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
304675e234SCong Wang
sock_map_alloc(union bpf_attr * attr)31604326b4SDaniel Borkmann static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
32604326b4SDaniel Borkmann {
33604326b4SDaniel Borkmann struct bpf_stab *stab;
34604326b4SDaniel Borkmann
35604326b4SDaniel Borkmann if (attr->max_entries == 0 ||
36604326b4SDaniel Borkmann attr->key_size != 4 ||
37c1cdf65dSJakub Sitnicki (attr->value_size != sizeof(u32) &&
38c1cdf65dSJakub Sitnicki attr->value_size != sizeof(u64)) ||
39604326b4SDaniel Borkmann attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
40604326b4SDaniel Borkmann return ERR_PTR(-EINVAL);
41604326b4SDaniel Borkmann
4273cf09a3SYafang Shao stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE);
43604326b4SDaniel Borkmann if (!stab)
44604326b4SDaniel Borkmann return ERR_PTR(-ENOMEM);
45604326b4SDaniel Borkmann
46604326b4SDaniel Borkmann bpf_map_init_from_attr(&stab->map, attr);
4735d2b7ffSJohn Fastabend spin_lock_init(&stab->lock);
48604326b4SDaniel Borkmann
497dd5d437SBui Quang Minh stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
50604326b4SDaniel Borkmann sizeof(struct sock *),
51604326b4SDaniel Borkmann stab->map.numa_node);
520d2c4f96SRoman Gushchin if (!stab->sks) {
5373cf09a3SYafang Shao bpf_map_area_free(stab);
540d2c4f96SRoman Gushchin return ERR_PTR(-ENOMEM);
550d2c4f96SRoman Gushchin }
560d2c4f96SRoman Gushchin
570d2c4f96SRoman Gushchin return &stab->map;
58604326b4SDaniel Borkmann }
59604326b4SDaniel Borkmann
sock_map_get_from_fd(const union bpf_attr * attr,struct bpf_prog * prog)60604326b4SDaniel Borkmann int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
61604326b4SDaniel Borkmann {
62604326b4SDaniel Borkmann u32 ufd = attr->target_fd;
63604326b4SDaniel Borkmann struct bpf_map *map;
64604326b4SDaniel Borkmann struct fd f;
65604326b4SDaniel Borkmann int ret;
66604326b4SDaniel Borkmann
679b2b0971SLorenz Bauer if (attr->attach_flags || attr->replace_bpf_fd)
689b2b0971SLorenz Bauer return -EINVAL;
699b2b0971SLorenz Bauer
70604326b4SDaniel Borkmann f = fdget(ufd);
71604326b4SDaniel Borkmann map = __bpf_map_get(f);
72604326b4SDaniel Borkmann if (IS_ERR(map))
73604326b4SDaniel Borkmann return PTR_ERR(map);
74bb0de313SLorenz Bauer ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
75bb0de313SLorenz Bauer fdput(f);
76bb0de313SLorenz Bauer return ret;
77bb0de313SLorenz Bauer }
78bb0de313SLorenz Bauer
sock_map_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)79bb0de313SLorenz Bauer int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
80bb0de313SLorenz Bauer {
81bb0de313SLorenz Bauer u32 ufd = attr->target_fd;
82bb0de313SLorenz Bauer struct bpf_prog *prog;
83bb0de313SLorenz Bauer struct bpf_map *map;
84bb0de313SLorenz Bauer struct fd f;
85bb0de313SLorenz Bauer int ret;
86bb0de313SLorenz Bauer
87bb0de313SLorenz Bauer if (attr->attach_flags || attr->replace_bpf_fd)
88bb0de313SLorenz Bauer return -EINVAL;
89bb0de313SLorenz Bauer
90bb0de313SLorenz Bauer f = fdget(ufd);
91bb0de313SLorenz Bauer map = __bpf_map_get(f);
92bb0de313SLorenz Bauer if (IS_ERR(map))
93bb0de313SLorenz Bauer return PTR_ERR(map);
94bb0de313SLorenz Bauer
95bb0de313SLorenz Bauer prog = bpf_prog_get(attr->attach_bpf_fd);
96bb0de313SLorenz Bauer if (IS_ERR(prog)) {
97bb0de313SLorenz Bauer ret = PTR_ERR(prog);
98bb0de313SLorenz Bauer goto put_map;
99bb0de313SLorenz Bauer }
100bb0de313SLorenz Bauer
101bb0de313SLorenz Bauer if (prog->type != ptype) {
102bb0de313SLorenz Bauer ret = -EINVAL;
103bb0de313SLorenz Bauer goto put_prog;
104bb0de313SLorenz Bauer }
105bb0de313SLorenz Bauer
106bb0de313SLorenz Bauer ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
107bb0de313SLorenz Bauer put_prog:
108bb0de313SLorenz Bauer bpf_prog_put(prog);
109bb0de313SLorenz Bauer put_map:
110604326b4SDaniel Borkmann fdput(f);
111604326b4SDaniel Borkmann return ret;
112604326b4SDaniel Borkmann }
113604326b4SDaniel Borkmann
sock_map_sk_acquire(struct sock * sk)114604326b4SDaniel Borkmann static void sock_map_sk_acquire(struct sock *sk)
115604326b4SDaniel Borkmann __acquires(&sk->sk_lock.slock)
116604326b4SDaniel Borkmann {
117604326b4SDaniel Borkmann lock_sock(sk);
118604326b4SDaniel Borkmann rcu_read_lock();
119604326b4SDaniel Borkmann }
120604326b4SDaniel Borkmann
sock_map_sk_release(struct sock * sk)121604326b4SDaniel Borkmann static void sock_map_sk_release(struct sock *sk)
122604326b4SDaniel Borkmann __releases(&sk->sk_lock.slock)
123604326b4SDaniel Borkmann {
124604326b4SDaniel Borkmann rcu_read_unlock();
125604326b4SDaniel Borkmann release_sock(sk);
126604326b4SDaniel Borkmann }
127604326b4SDaniel Borkmann
sock_map_add_link(struct sk_psock * psock,struct sk_psock_link * link,struct bpf_map * map,void * link_raw)128604326b4SDaniel Borkmann static void sock_map_add_link(struct sk_psock *psock,
129604326b4SDaniel Borkmann struct sk_psock_link *link,
130604326b4SDaniel Borkmann struct bpf_map *map, void *link_raw)
131604326b4SDaniel Borkmann {
132604326b4SDaniel Borkmann link->link_raw = link_raw;
133604326b4SDaniel Borkmann link->map = map;
134604326b4SDaniel Borkmann spin_lock_bh(&psock->link_lock);
135604326b4SDaniel Borkmann list_add_tail(&link->list, &psock->link);
136604326b4SDaniel Borkmann spin_unlock_bh(&psock->link_lock);
137604326b4SDaniel Borkmann }
138604326b4SDaniel Borkmann
sock_map_del_link(struct sock * sk,struct sk_psock * psock,void * link_raw)139604326b4SDaniel Borkmann static void sock_map_del_link(struct sock *sk,
140604326b4SDaniel Borkmann struct sk_psock *psock, void *link_raw)
141604326b4SDaniel Borkmann {
142ef565928SJohn Fastabend bool strp_stop = false, verdict_stop = false;
143604326b4SDaniel Borkmann struct sk_psock_link *link, *tmp;
144604326b4SDaniel Borkmann
145604326b4SDaniel Borkmann spin_lock_bh(&psock->link_lock);
146604326b4SDaniel Borkmann list_for_each_entry_safe(link, tmp, &psock->link, list) {
147604326b4SDaniel Borkmann if (link->link_raw == link_raw) {
148604326b4SDaniel Borkmann struct bpf_map *map = link->map;
1497e96ec0eSXu Kuohai struct sk_psock_progs *progs = sock_map_progs(map);
1507e96ec0eSXu Kuohai
1517e96ec0eSXu Kuohai if (psock->saved_data_ready && progs->stream_parser)
152604326b4SDaniel Borkmann strp_stop = true;
1537e96ec0eSXu Kuohai if (psock->saved_data_ready && progs->stream_verdict)
154ef565928SJohn Fastabend verdict_stop = true;
1557e96ec0eSXu Kuohai if (psock->saved_data_ready && progs->skb_verdict)
156a7ba4558SCong Wang verdict_stop = true;
157604326b4SDaniel Borkmann list_del(&link->list);
158604326b4SDaniel Borkmann sk_psock_free_link(link);
159dbec5b42SMichal Luczaj break;
160604326b4SDaniel Borkmann }
161604326b4SDaniel Borkmann }
162604326b4SDaniel Borkmann spin_unlock_bh(&psock->link_lock);
163ef565928SJohn Fastabend if (strp_stop || verdict_stop) {
164604326b4SDaniel Borkmann write_lock_bh(&sk->sk_callback_lock);
165ef565928SJohn Fastabend if (strp_stop)
166604326b4SDaniel Borkmann sk_psock_stop_strp(sk, psock);
167c0d95d33SJohn Fastabend if (verdict_stop)
168ef565928SJohn Fastabend sk_psock_stop_verdict(sk, psock);
169c0d95d33SJohn Fastabend
170c0d95d33SJohn Fastabend if (psock->psock_update_sk_prot)
171c0d95d33SJohn Fastabend psock->psock_update_sk_prot(sk, psock, false);
172604326b4SDaniel Borkmann write_unlock_bh(&sk->sk_callback_lock);
173604326b4SDaniel Borkmann }
174604326b4SDaniel Borkmann }
175604326b4SDaniel Borkmann
sock_map_unref(struct sock * sk,void * link_raw)176604326b4SDaniel Borkmann static void sock_map_unref(struct sock *sk, void *link_raw)
177604326b4SDaniel Borkmann {
178604326b4SDaniel Borkmann struct sk_psock *psock = sk_psock(sk);
179604326b4SDaniel Borkmann
180604326b4SDaniel Borkmann if (likely(psock)) {
181604326b4SDaniel Borkmann sock_map_del_link(sk, psock, link_raw);
182604326b4SDaniel Borkmann sk_psock_put(sk, psock);
183604326b4SDaniel Borkmann }
184604326b4SDaniel Borkmann }
185604326b4SDaniel Borkmann
sock_map_init_proto(struct sock * sk,struct sk_psock * psock)186cb21802bSLorenz Bauer static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
187f747632bSLorenz Bauer {
1888a59f9d1SCong Wang if (!sk->sk_prot->psock_update_sk_prot)
1897b98cd42SLorenz Bauer return -EINVAL;
1908a59f9d1SCong Wang psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot;
19151e0158aSCong Wang return sk->sk_prot->psock_update_sk_prot(sk, psock, false);
192f747632bSLorenz Bauer }
193f747632bSLorenz Bauer
sock_map_psock_get_checked(struct sock * sk)194f747632bSLorenz Bauer static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
195f747632bSLorenz Bauer {
196f747632bSLorenz Bauer struct sk_psock *psock;
197f747632bSLorenz Bauer
198f747632bSLorenz Bauer rcu_read_lock();
199f747632bSLorenz Bauer psock = sk_psock(sk);
200f747632bSLorenz Bauer if (psock) {
2017b98cd42SLorenz Bauer if (sk->sk_prot->close != sock_map_close) {
202f747632bSLorenz Bauer psock = ERR_PTR(-EBUSY);
203f747632bSLorenz Bauer goto out;
204f747632bSLorenz Bauer }
205f747632bSLorenz Bauer
206f747632bSLorenz Bauer if (!refcount_inc_not_zero(&psock->refcnt))
207f747632bSLorenz Bauer psock = ERR_PTR(-EBUSY);
208f747632bSLorenz Bauer }
209f747632bSLorenz Bauer out:
210f747632bSLorenz Bauer rcu_read_unlock();
211f747632bSLorenz Bauer return psock;
212f747632bSLorenz Bauer }
213f747632bSLorenz Bauer
sock_map_link(struct bpf_map * map,struct sock * sk)2142004fdbdSCong Wang static int sock_map_link(struct bpf_map *map, struct sock *sk)
215604326b4SDaniel Borkmann {
2162004fdbdSCong Wang struct sk_psock_progs *progs = sock_map_progs(map);
217b0170552SCong Wang struct bpf_prog *stream_verdict = NULL;
218b0170552SCong Wang struct bpf_prog *stream_parser = NULL;
219a7ba4558SCong Wang struct bpf_prog *skb_verdict = NULL;
220b0170552SCong Wang struct bpf_prog *msg_parser = NULL;
221604326b4SDaniel Borkmann struct sk_psock *psock;
222604326b4SDaniel Borkmann int ret;
223604326b4SDaniel Borkmann
224ae8b8332SCong Wang stream_verdict = READ_ONCE(progs->stream_verdict);
225ae8b8332SCong Wang if (stream_verdict) {
226ae8b8332SCong Wang stream_verdict = bpf_prog_inc_not_zero(stream_verdict);
227ae8b8332SCong Wang if (IS_ERR(stream_verdict))
228ae8b8332SCong Wang return PTR_ERR(stream_verdict);
229743df8b7SJohn Fastabend }
23083c11c17SAlex Dewar
231ae8b8332SCong Wang stream_parser = READ_ONCE(progs->stream_parser);
232ae8b8332SCong Wang if (stream_parser) {
233ae8b8332SCong Wang stream_parser = bpf_prog_inc_not_zero(stream_parser);
234ae8b8332SCong Wang if (IS_ERR(stream_parser)) {
235ae8b8332SCong Wang ret = PTR_ERR(stream_parser);
236ae8b8332SCong Wang goto out_put_stream_verdict;
237604326b4SDaniel Borkmann }
238604326b4SDaniel Borkmann }
239604326b4SDaniel Borkmann
240604326b4SDaniel Borkmann msg_parser = READ_ONCE(progs->msg_parser);
241604326b4SDaniel Borkmann if (msg_parser) {
242604326b4SDaniel Borkmann msg_parser = bpf_prog_inc_not_zero(msg_parser);
243604326b4SDaniel Borkmann if (IS_ERR(msg_parser)) {
244604326b4SDaniel Borkmann ret = PTR_ERR(msg_parser);
245ae8b8332SCong Wang goto out_put_stream_parser;
246604326b4SDaniel Borkmann }
247604326b4SDaniel Borkmann }
248604326b4SDaniel Borkmann
249a7ba4558SCong Wang skb_verdict = READ_ONCE(progs->skb_verdict);
250a7ba4558SCong Wang if (skb_verdict) {
251a7ba4558SCong Wang skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
252a7ba4558SCong Wang if (IS_ERR(skb_verdict)) {
253a7ba4558SCong Wang ret = PTR_ERR(skb_verdict);
254a7ba4558SCong Wang goto out_put_msg_parser;
255a7ba4558SCong Wang }
256a7ba4558SCong Wang }
257a7ba4558SCong Wang
258f747632bSLorenz Bauer psock = sock_map_psock_get_checked(sk);
2595032d079SJohn Fastabend if (IS_ERR(psock)) {
2605032d079SJohn Fastabend ret = PTR_ERR(psock);
261604326b4SDaniel Borkmann goto out_progs;
262604326b4SDaniel Borkmann }
2635032d079SJohn Fastabend
2645032d079SJohn Fastabend if (psock) {
265604326b4SDaniel Borkmann if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
266ae8b8332SCong Wang (stream_parser && READ_ONCE(psock->progs.stream_parser)) ||
267a7ba4558SCong Wang (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
268a7ba4558SCong Wang (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) ||
269a7ba4558SCong Wang (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
270ae8b8332SCong Wang (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) {
271604326b4SDaniel Borkmann sk_psock_put(sk, psock);
272604326b4SDaniel Borkmann ret = -EBUSY;
273604326b4SDaniel Borkmann goto out_progs;
274604326b4SDaniel Borkmann }
275604326b4SDaniel Borkmann } else {
276604326b4SDaniel Borkmann psock = sk_psock_init(sk, map->numa_node);
2777b219da4SLorenz Bauer if (IS_ERR(psock)) {
2787b219da4SLorenz Bauer ret = PTR_ERR(psock);
279604326b4SDaniel Borkmann goto out_progs;
280604326b4SDaniel Borkmann }
281604326b4SDaniel Borkmann }
282604326b4SDaniel Borkmann
283604326b4SDaniel Borkmann if (msg_parser)
284604326b4SDaniel Borkmann psock_set_prog(&psock->progs.msg_parser, msg_parser);
28538207a5eSJohn Fastabend if (stream_parser)
28638207a5eSJohn Fastabend psock_set_prog(&psock->progs.stream_parser, stream_parser);
28738207a5eSJohn Fastabend if (stream_verdict)
28838207a5eSJohn Fastabend psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
28938207a5eSJohn Fastabend if (skb_verdict)
29038207a5eSJohn Fastabend psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
291d19da360SLorenz Bauer
292218d747aSJohn Fastabend /* msg_* and stream_* programs references tracked in psock after this
293218d747aSJohn Fastabend * point. Reference dec and cleanup will occur through psock destructor
294218d747aSJohn Fastabend */
295cb21802bSLorenz Bauer ret = sock_map_init_proto(sk, psock);
296218d747aSJohn Fastabend if (ret < 0) {
297218d747aSJohn Fastabend sk_psock_put(sk, psock);
298218d747aSJohn Fastabend goto out;
299218d747aSJohn Fastabend }
300604326b4SDaniel Borkmann
301604326b4SDaniel Borkmann write_lock_bh(&sk->sk_callback_lock);
302ae8b8332SCong Wang if (stream_parser && stream_verdict && !psock->saved_data_ready) {
303*eab50afcSJiayuan Chen if (sk_is_tcp(sk))
304604326b4SDaniel Borkmann ret = sk_psock_init_strp(sk, psock);
305*eab50afcSJiayuan Chen else
306*eab50afcSJiayuan Chen ret = -EOPNOTSUPP;
307218d747aSJohn Fastabend if (ret) {
308218d747aSJohn Fastabend write_unlock_bh(&sk->sk_callback_lock);
309218d747aSJohn Fastabend sk_psock_put(sk, psock);
310218d747aSJohn Fastabend goto out;
311218d747aSJohn Fastabend }
312604326b4SDaniel Borkmann sk_psock_start_strp(sk, psock);
313ae8b8332SCong Wang } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
314ef565928SJohn Fastabend sk_psock_start_verdict(sk,psock);
315a7ba4558SCong Wang } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
316a7ba4558SCong Wang sk_psock_start_verdict(sk, psock);
317604326b4SDaniel Borkmann }
318604326b4SDaniel Borkmann write_unlock_bh(&sk->sk_callback_lock);
319604326b4SDaniel Borkmann return 0;
320604326b4SDaniel Borkmann out_progs:
321a7ba4558SCong Wang if (skb_verdict)
322a7ba4558SCong Wang bpf_prog_put(skb_verdict);
323a7ba4558SCong Wang out_put_msg_parser:
324604326b4SDaniel Borkmann if (msg_parser)
325604326b4SDaniel Borkmann bpf_prog_put(msg_parser);
326ae8b8332SCong Wang out_put_stream_parser:
327ae8b8332SCong Wang if (stream_parser)
328ae8b8332SCong Wang bpf_prog_put(stream_parser);
329ae8b8332SCong Wang out_put_stream_verdict:
330ae8b8332SCong Wang if (stream_verdict)
331ae8b8332SCong Wang bpf_prog_put(stream_verdict);
332218d747aSJohn Fastabend out:
333604326b4SDaniel Borkmann return ret;
334604326b4SDaniel Borkmann }
335604326b4SDaniel Borkmann
sock_map_free(struct bpf_map * map)336604326b4SDaniel Borkmann static void sock_map_free(struct bpf_map *map)
337604326b4SDaniel Borkmann {
338604326b4SDaniel Borkmann struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
339604326b4SDaniel Borkmann int i;
340604326b4SDaniel Borkmann
34190db6d77SJohn Fastabend /* After the sync no updates or deletes will be in-flight so it
34290db6d77SJohn Fastabend * is safe to walk map and remove entries without risking a race
34390db6d77SJohn Fastabend * in EEXIST update case.
34490db6d77SJohn Fastabend */
345604326b4SDaniel Borkmann synchronize_rcu();
346604326b4SDaniel Borkmann for (i = 0; i < stab->map.max_entries; i++) {
347604326b4SDaniel Borkmann struct sock **psk = &stab->sks[i];
348604326b4SDaniel Borkmann struct sock *sk;
349604326b4SDaniel Borkmann
350604326b4SDaniel Borkmann sk = xchg(psk, NULL);
3517e81a353SJohn Fastabend if (sk) {
3520a182f8dSEric Dumazet sock_hold(sk);
3537e81a353SJohn Fastabend lock_sock(sk);
354db6a5018SJakub Sitnicki rcu_read_lock();
355604326b4SDaniel Borkmann sock_map_unref(sk, psk);
356db6a5018SJakub Sitnicki rcu_read_unlock();
3577e81a353SJohn Fastabend release_sock(sk);
3580a182f8dSEric Dumazet sock_put(sk);
3597e81a353SJohn Fastabend }
360604326b4SDaniel Borkmann }
361604326b4SDaniel Borkmann
3620b2dc839SJakub Sitnicki /* wait for psock readers accessing its map link */
3632bb90e5cSJohn Fastabend synchronize_rcu();
3642bb90e5cSJohn Fastabend
365604326b4SDaniel Borkmann bpf_map_area_free(stab->sks);
36673cf09a3SYafang Shao bpf_map_area_free(stab);
367604326b4SDaniel Borkmann }
368604326b4SDaniel Borkmann
sock_map_release_progs(struct bpf_map * map)369604326b4SDaniel Borkmann static void sock_map_release_progs(struct bpf_map *map)
370604326b4SDaniel Borkmann {
371604326b4SDaniel Borkmann psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
372604326b4SDaniel Borkmann }
373604326b4SDaniel Borkmann
__sock_map_lookup_elem(struct bpf_map * map,u32 key)374604326b4SDaniel Borkmann static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
375604326b4SDaniel Borkmann {
376604326b4SDaniel Borkmann struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
377604326b4SDaniel Borkmann
378604326b4SDaniel Borkmann WARN_ON_ONCE(!rcu_read_lock_held());
379604326b4SDaniel Borkmann
380604326b4SDaniel Borkmann if (unlikely(key >= map->max_entries))
381604326b4SDaniel Borkmann return NULL;
382604326b4SDaniel Borkmann return READ_ONCE(stab->sks[key]);
383604326b4SDaniel Borkmann }
384604326b4SDaniel Borkmann
sock_map_lookup(struct bpf_map * map,void * key)385604326b4SDaniel Borkmann static void *sock_map_lookup(struct bpf_map *map, void *key)
386604326b4SDaniel Borkmann {
38764d85290SJakub Sitnicki struct sock *sk;
38864d85290SJakub Sitnicki
38964d85290SJakub Sitnicki sk = __sock_map_lookup_elem(map, *(u32 *)key);
390654785a1SLorenz Bauer if (!sk)
39164d85290SJakub Sitnicki return NULL;
39264d85290SJakub Sitnicki if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
39364d85290SJakub Sitnicki return NULL;
39464d85290SJakub Sitnicki return sk;
395604326b4SDaniel Borkmann }
396604326b4SDaniel Borkmann
sock_map_lookup_sys(struct bpf_map * map,void * key)397c1cdf65dSJakub Sitnicki static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
398c1cdf65dSJakub Sitnicki {
399c1cdf65dSJakub Sitnicki struct sock *sk;
400c1cdf65dSJakub Sitnicki
401c1cdf65dSJakub Sitnicki if (map->value_size != sizeof(u64))
402c1cdf65dSJakub Sitnicki return ERR_PTR(-ENOSPC);
403c1cdf65dSJakub Sitnicki
404c1cdf65dSJakub Sitnicki sk = __sock_map_lookup_elem(map, *(u32 *)key);
405c1cdf65dSJakub Sitnicki if (!sk)
406c1cdf65dSJakub Sitnicki return ERR_PTR(-ENOENT);
407c1cdf65dSJakub Sitnicki
40892acdc58SDaniel Borkmann __sock_gen_cookie(sk);
409c1cdf65dSJakub Sitnicki return &sk->sk_cookie;
410c1cdf65dSJakub Sitnicki }
411c1cdf65dSJakub Sitnicki
__sock_map_delete(struct bpf_stab * stab,struct sock * sk_test,struct sock ** psk)412604326b4SDaniel Borkmann static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
413604326b4SDaniel Borkmann struct sock **psk)
414604326b4SDaniel Borkmann {
415b015f19fSMichal Luczaj struct sock *sk = NULL;
41645a4521dSJohn Fastabend int err = 0;
417604326b4SDaniel Borkmann
41835d2b7ffSJohn Fastabend spin_lock_bh(&stab->lock);
419b015f19fSMichal Luczaj if (!sk_test || sk_test == *psk)
42045a4521dSJohn Fastabend sk = xchg(psk, NULL);
42145a4521dSJohn Fastabend
42245a4521dSJohn Fastabend if (likely(sk))
423604326b4SDaniel Borkmann sock_map_unref(sk, psk);
42445a4521dSJohn Fastabend else
42545a4521dSJohn Fastabend err = -EINVAL;
42645a4521dSJohn Fastabend
42735d2b7ffSJohn Fastabend spin_unlock_bh(&stab->lock);
42845a4521dSJohn Fastabend return err;
429604326b4SDaniel Borkmann }
430604326b4SDaniel Borkmann
sock_map_delete_from_link(struct bpf_map * map,struct sock * sk,void * link_raw)431604326b4SDaniel Borkmann static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
432604326b4SDaniel Borkmann void *link_raw)
433604326b4SDaniel Borkmann {
434604326b4SDaniel Borkmann struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
435604326b4SDaniel Borkmann
436604326b4SDaniel Borkmann __sock_map_delete(stab, sk, link_raw);
437604326b4SDaniel Borkmann }
438604326b4SDaniel Borkmann
sock_map_delete_elem(struct bpf_map * map,void * key)439d7ba4cc9SJP Kobryn static long sock_map_delete_elem(struct bpf_map *map, void *key)
440604326b4SDaniel Borkmann {
441604326b4SDaniel Borkmann struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
442604326b4SDaniel Borkmann u32 i = *(u32 *)key;
443604326b4SDaniel Borkmann struct sock **psk;
444604326b4SDaniel Borkmann
445604326b4SDaniel Borkmann if (unlikely(i >= map->max_entries))
446604326b4SDaniel Borkmann return -EINVAL;
447604326b4SDaniel Borkmann
448604326b4SDaniel Borkmann psk = &stab->sks[i];
449604326b4SDaniel Borkmann return __sock_map_delete(stab, NULL, psk);
450604326b4SDaniel Borkmann }
451604326b4SDaniel Borkmann
sock_map_get_next_key(struct bpf_map * map,void * key,void * next)452604326b4SDaniel Borkmann static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
453604326b4SDaniel Borkmann {
454604326b4SDaniel Borkmann struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
455604326b4SDaniel Borkmann u32 i = key ? *(u32 *)key : U32_MAX;
456604326b4SDaniel Borkmann u32 *key_next = next;
457604326b4SDaniel Borkmann
458604326b4SDaniel Borkmann if (i == stab->map.max_entries - 1)
459604326b4SDaniel Borkmann return -ENOENT;
460604326b4SDaniel Borkmann if (i >= stab->map.max_entries)
461604326b4SDaniel Borkmann *key_next = 0;
462604326b4SDaniel Borkmann else
463604326b4SDaniel Borkmann *key_next = i + 1;
464604326b4SDaniel Borkmann return 0;
465604326b4SDaniel Borkmann }
466604326b4SDaniel Borkmann
sock_map_update_common(struct bpf_map * map,u32 idx,struct sock * sk,u64 flags)467604326b4SDaniel Borkmann static int sock_map_update_common(struct bpf_map *map, u32 idx,
468604326b4SDaniel Borkmann struct sock *sk, u64 flags)
469604326b4SDaniel Borkmann {
470604326b4SDaniel Borkmann struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
471604326b4SDaniel Borkmann struct sk_psock_link *link;
472604326b4SDaniel Borkmann struct sk_psock *psock;
473604326b4SDaniel Borkmann struct sock *osk;
474604326b4SDaniel Borkmann int ret;
475604326b4SDaniel Borkmann
476604326b4SDaniel Borkmann WARN_ON_ONCE(!rcu_read_lock_held());
477604326b4SDaniel Borkmann if (unlikely(flags > BPF_EXIST))
478604326b4SDaniel Borkmann return -EINVAL;
479604326b4SDaniel Borkmann if (unlikely(idx >= map->max_entries))
480604326b4SDaniel Borkmann return -E2BIG;
481604326b4SDaniel Borkmann
482604326b4SDaniel Borkmann link = sk_psock_init_link();
483604326b4SDaniel Borkmann if (!link)
484604326b4SDaniel Borkmann return -ENOMEM;
485604326b4SDaniel Borkmann
4862004fdbdSCong Wang ret = sock_map_link(map, sk);
487604326b4SDaniel Borkmann if (ret < 0)
488604326b4SDaniel Borkmann goto out_free;
489604326b4SDaniel Borkmann
490604326b4SDaniel Borkmann psock = sk_psock(sk);
491604326b4SDaniel Borkmann WARN_ON_ONCE(!psock);
492604326b4SDaniel Borkmann
49335d2b7ffSJohn Fastabend spin_lock_bh(&stab->lock);
494604326b4SDaniel Borkmann osk = stab->sks[idx];
495604326b4SDaniel Borkmann if (osk && flags == BPF_NOEXIST) {
496604326b4SDaniel Borkmann ret = -EEXIST;
497604326b4SDaniel Borkmann goto out_unlock;
498604326b4SDaniel Borkmann } else if (!osk && flags == BPF_EXIST) {
499604326b4SDaniel Borkmann ret = -ENOENT;
500604326b4SDaniel Borkmann goto out_unlock;
501604326b4SDaniel Borkmann }
502604326b4SDaniel Borkmann
503604326b4SDaniel Borkmann sock_map_add_link(psock, link, map, &stab->sks[idx]);
504604326b4SDaniel Borkmann stab->sks[idx] = sk;
505604326b4SDaniel Borkmann if (osk)
506604326b4SDaniel Borkmann sock_map_unref(osk, &stab->sks[idx]);
50735d2b7ffSJohn Fastabend spin_unlock_bh(&stab->lock);
508604326b4SDaniel Borkmann return 0;
509604326b4SDaniel Borkmann out_unlock:
51035d2b7ffSJohn Fastabend spin_unlock_bh(&stab->lock);
511604326b4SDaniel Borkmann if (psock)
512604326b4SDaniel Borkmann sk_psock_put(sk, psock);
513604326b4SDaniel Borkmann out_free:
514604326b4SDaniel Borkmann sk_psock_free_link(link);
515604326b4SDaniel Borkmann return ret;
516604326b4SDaniel Borkmann }
517604326b4SDaniel Borkmann
sock_map_op_okay(const struct bpf_sock_ops_kern * ops)518604326b4SDaniel Borkmann static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
519604326b4SDaniel Borkmann {
520604326b4SDaniel Borkmann return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
5218ca30379SJakub Sitnicki ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
5228ca30379SJakub Sitnicki ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
523604326b4SDaniel Borkmann }
524604326b4SDaniel Borkmann
sock_map_redirect_allowed(const struct sock * sk)525f6fede85SLorenz Bauer static bool sock_map_redirect_allowed(const struct sock *sk)
526f6fede85SLorenz Bauer {
527122e6c79SCong Wang if (sk_is_tcp(sk))
528122e6c79SCong Wang return sk->sk_state != TCP_LISTEN;
529122e6c79SCong Wang else
530122e6c79SCong Wang return sk->sk_state == TCP_ESTABLISHED;
531f6fede85SLorenz Bauer }
532f6fede85SLorenz Bauer
sock_map_sk_is_suitable(const struct sock * sk)5337b98cd42SLorenz Bauer static bool sock_map_sk_is_suitable(const struct sock *sk)
5347b98cd42SLorenz Bauer {
5358a59f9d1SCong Wang return !!sk->sk_prot->psock_update_sk_prot;
5367b98cd42SLorenz Bauer }
5377b98cd42SLorenz Bauer
sock_map_sk_state_allowed(const struct sock * sk)5388ca30379SJakub Sitnicki static bool sock_map_sk_state_allowed(const struct sock *sk)
5398ca30379SJakub Sitnicki {
5407b98cd42SLorenz Bauer if (sk_is_tcp(sk))
5418ca30379SJakub Sitnicki return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
542bcc5b2d8SJohn Fastabend if (sk_is_stream_unix(sk))
543bcc5b2d8SJohn Fastabend return (1 << sk->sk_state) & TCPF_ESTABLISHED;
544cc9a7832SMichal Luczaj if (sk_is_vsock(sk) &&
545cc9a7832SMichal Luczaj (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET))
546cc9a7832SMichal Luczaj return (1 << sk->sk_state) & TCPF_ESTABLISHED;
5470c48eefaSCong Wang return true;
5488ca30379SJakub Sitnicki }
5498ca30379SJakub Sitnicki
55038e12f90SLorenz Bauer static int sock_hash_update_common(struct bpf_map *map, void *key,
55138e12f90SLorenz Bauer struct sock *sk, u64 flags);
55238e12f90SLorenz Bauer
sock_map_update_elem_sys(struct bpf_map * map,void * key,void * value,u64 flags)55313b79d3fSLorenz Bauer int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
55413b79d3fSLorenz Bauer u64 flags)
555604326b4SDaniel Borkmann {
556604326b4SDaniel Borkmann struct socket *sock;
557604326b4SDaniel Borkmann struct sock *sk;
558604326b4SDaniel Borkmann int ret;
559c1cdf65dSJakub Sitnicki u64 ufd;
560c1cdf65dSJakub Sitnicki
561c1cdf65dSJakub Sitnicki if (map->value_size == sizeof(u64))
562c1cdf65dSJakub Sitnicki ufd = *(u64 *)value;
563c1cdf65dSJakub Sitnicki else
564c1cdf65dSJakub Sitnicki ufd = *(u32 *)value;
565c1cdf65dSJakub Sitnicki if (ufd > S32_MAX)
566c1cdf65dSJakub Sitnicki return -EINVAL;
567604326b4SDaniel Borkmann
568604326b4SDaniel Borkmann sock = sockfd_lookup(ufd, &ret);
569604326b4SDaniel Borkmann if (!sock)
570604326b4SDaniel Borkmann return ret;
571604326b4SDaniel Borkmann sk = sock->sk;
572604326b4SDaniel Borkmann if (!sk) {
573604326b4SDaniel Borkmann ret = -EINVAL;
574604326b4SDaniel Borkmann goto out;
575604326b4SDaniel Borkmann }
57685b8ac01SLorenz Bauer if (!sock_map_sk_is_suitable(sk)) {
577604326b4SDaniel Borkmann ret = -EOPNOTSUPP;
578604326b4SDaniel Borkmann goto out;
579604326b4SDaniel Borkmann }
580604326b4SDaniel Borkmann
581604326b4SDaniel Borkmann sock_map_sk_acquire(sk);
5828ca30379SJakub Sitnicki if (!sock_map_sk_state_allowed(sk))
58385b8ac01SLorenz Bauer ret = -EOPNOTSUPP;
58438e12f90SLorenz Bauer else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
58538e12f90SLorenz Bauer ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
58685b8ac01SLorenz Bauer else
58738e12f90SLorenz Bauer ret = sock_hash_update_common(map, key, sk, flags);
588604326b4SDaniel Borkmann sock_map_sk_release(sk);
589604326b4SDaniel Borkmann out:
590ec24e11eSZheng Yongjun sockfd_put(sock);
591604326b4SDaniel Borkmann return ret;
592604326b4SDaniel Borkmann }
593604326b4SDaniel Borkmann
sock_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)594d7ba4cc9SJP Kobryn static long sock_map_update_elem(struct bpf_map *map, void *key,
5950126240fSLorenz Bauer void *value, u64 flags)
5960126240fSLorenz Bauer {
5970126240fSLorenz Bauer struct sock *sk = (struct sock *)value;
5980126240fSLorenz Bauer int ret;
5990126240fSLorenz Bauer
6006550f2ddSLorenz Bauer if (unlikely(!sk || !sk_fullsock(sk)))
6016550f2ddSLorenz Bauer return -EINVAL;
6026550f2ddSLorenz Bauer
6030126240fSLorenz Bauer if (!sock_map_sk_is_suitable(sk))
6040126240fSLorenz Bauer return -EOPNOTSUPP;
6050126240fSLorenz Bauer
6060126240fSLorenz Bauer local_bh_disable();
6070126240fSLorenz Bauer bh_lock_sock(sk);
6080126240fSLorenz Bauer if (!sock_map_sk_state_allowed(sk))
6090126240fSLorenz Bauer ret = -EOPNOTSUPP;
6100126240fSLorenz Bauer else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
6110126240fSLorenz Bauer ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
6120126240fSLorenz Bauer else
6130126240fSLorenz Bauer ret = sock_hash_update_common(map, key, sk, flags);
6140126240fSLorenz Bauer bh_unlock_sock(sk);
6150126240fSLorenz Bauer local_bh_enable();
6160126240fSLorenz Bauer return ret;
6170126240fSLorenz Bauer }
6180126240fSLorenz Bauer
BPF_CALL_4(bpf_sock_map_update,struct bpf_sock_ops_kern *,sops,struct bpf_map *,map,void *,key,u64,flags)619604326b4SDaniel Borkmann BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
620604326b4SDaniel Borkmann struct bpf_map *, map, void *, key, u64, flags)
621604326b4SDaniel Borkmann {
622604326b4SDaniel Borkmann WARN_ON_ONCE(!rcu_read_lock_held());
623604326b4SDaniel Borkmann
624604326b4SDaniel Borkmann if (likely(sock_map_sk_is_suitable(sops->sk) &&
625604326b4SDaniel Borkmann sock_map_op_okay(sops)))
626604326b4SDaniel Borkmann return sock_map_update_common(map, *(u32 *)key, sops->sk,
627604326b4SDaniel Borkmann flags);
628604326b4SDaniel Borkmann return -EOPNOTSUPP;
629604326b4SDaniel Borkmann }
630604326b4SDaniel Borkmann
631604326b4SDaniel Borkmann const struct bpf_func_proto bpf_sock_map_update_proto = {
632604326b4SDaniel Borkmann .func = bpf_sock_map_update,
633604326b4SDaniel Borkmann .gpl_only = false,
634604326b4SDaniel Borkmann .pkt_access = true,
635604326b4SDaniel Borkmann .ret_type = RET_INTEGER,
636604326b4SDaniel Borkmann .arg1_type = ARG_PTR_TO_CTX,
637604326b4SDaniel Borkmann .arg2_type = ARG_CONST_MAP_PTR,
638604326b4SDaniel Borkmann .arg3_type = ARG_PTR_TO_MAP_KEY,
639604326b4SDaniel Borkmann .arg4_type = ARG_ANYTHING,
640604326b4SDaniel Borkmann };
641604326b4SDaniel Borkmann
BPF_CALL_4(bpf_sk_redirect_map,struct sk_buff *,skb,struct bpf_map *,map,u32,key,u64,flags)642604326b4SDaniel Borkmann BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
643604326b4SDaniel Borkmann struct bpf_map *, map, u32, key, u64, flags)
644604326b4SDaniel Borkmann {
6458ca30379SJakub Sitnicki struct sock *sk;
646604326b4SDaniel Borkmann
647604326b4SDaniel Borkmann if (unlikely(flags & ~(BPF_F_INGRESS)))
648604326b4SDaniel Borkmann return SK_DROP;
6498ca30379SJakub Sitnicki
6508ca30379SJakub Sitnicki sk = __sock_map_lookup_elem(map, key);
6518ca30379SJakub Sitnicki if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
652604326b4SDaniel Borkmann return SK_DROP;
65312c3e619SMichal Luczaj if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
65412c3e619SMichal Luczaj return SK_DROP;
6558ca30379SJakub Sitnicki
656e3526bb9SCong Wang skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
657604326b4SDaniel Borkmann return SK_PASS;
658604326b4SDaniel Borkmann }
659604326b4SDaniel Borkmann
660604326b4SDaniel Borkmann const struct bpf_func_proto bpf_sk_redirect_map_proto = {
661604326b4SDaniel Borkmann .func = bpf_sk_redirect_map,
662604326b4SDaniel Borkmann .gpl_only = false,
663604326b4SDaniel Borkmann .ret_type = RET_INTEGER,
664604326b4SDaniel Borkmann .arg1_type = ARG_PTR_TO_CTX,
665604326b4SDaniel Borkmann .arg2_type = ARG_CONST_MAP_PTR,
666604326b4SDaniel Borkmann .arg3_type = ARG_ANYTHING,
667604326b4SDaniel Borkmann .arg4_type = ARG_ANYTHING,
668604326b4SDaniel Borkmann };
669604326b4SDaniel Borkmann
BPF_CALL_4(bpf_msg_redirect_map,struct sk_msg *,msg,struct bpf_map *,map,u32,key,u64,flags)670604326b4SDaniel Borkmann BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
671604326b4SDaniel Borkmann struct bpf_map *, map, u32, key, u64, flags)
672604326b4SDaniel Borkmann {
6738ca30379SJakub Sitnicki struct sock *sk;
6748ca30379SJakub Sitnicki
675604326b4SDaniel Borkmann if (unlikely(flags & ~(BPF_F_INGRESS)))
676604326b4SDaniel Borkmann return SK_DROP;
6778ca30379SJakub Sitnicki
6788ca30379SJakub Sitnicki sk = __sock_map_lookup_elem(map, key);
6798ca30379SJakub Sitnicki if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
680604326b4SDaniel Borkmann return SK_DROP;
681b80e31baSJakub Sitnicki if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
682b80e31baSJakub Sitnicki return SK_DROP;
68312c3e619SMichal Luczaj if (sk_is_vsock(sk))
68412c3e619SMichal Luczaj return SK_DROP;
6858ca30379SJakub Sitnicki
6868ca30379SJakub Sitnicki msg->flags = flags;
6878ca30379SJakub Sitnicki msg->sk_redir = sk;
688604326b4SDaniel Borkmann return SK_PASS;
689604326b4SDaniel Borkmann }
690604326b4SDaniel Borkmann
691604326b4SDaniel Borkmann const struct bpf_func_proto bpf_msg_redirect_map_proto = {
692604326b4SDaniel Borkmann .func = bpf_msg_redirect_map,
693604326b4SDaniel Borkmann .gpl_only = false,
694604326b4SDaniel Borkmann .ret_type = RET_INTEGER,
695604326b4SDaniel Borkmann .arg1_type = ARG_PTR_TO_CTX,
696604326b4SDaniel Borkmann .arg2_type = ARG_CONST_MAP_PTR,
697604326b4SDaniel Borkmann .arg3_type = ARG_ANYTHING,
698604326b4SDaniel Borkmann .arg4_type = ARG_ANYTHING,
699604326b4SDaniel Borkmann };
700604326b4SDaniel Borkmann
70103653515SLorenz Bauer struct sock_map_seq_info {
70203653515SLorenz Bauer struct bpf_map *map;
70303653515SLorenz Bauer struct sock *sk;
70403653515SLorenz Bauer u32 index;
70503653515SLorenz Bauer };
70603653515SLorenz Bauer
70703653515SLorenz Bauer struct bpf_iter__sockmap {
70803653515SLorenz Bauer __bpf_md_ptr(struct bpf_iter_meta *, meta);
70903653515SLorenz Bauer __bpf_md_ptr(struct bpf_map *, map);
71003653515SLorenz Bauer __bpf_md_ptr(void *, key);
71103653515SLorenz Bauer __bpf_md_ptr(struct sock *, sk);
71203653515SLorenz Bauer };
71303653515SLorenz Bauer
DEFINE_BPF_ITER_FUNC(sockmap,struct bpf_iter_meta * meta,struct bpf_map * map,void * key,struct sock * sk)71403653515SLorenz Bauer DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
71503653515SLorenz Bauer struct bpf_map *map, void *key,
71603653515SLorenz Bauer struct sock *sk)
71703653515SLorenz Bauer
71803653515SLorenz Bauer static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
71903653515SLorenz Bauer {
72003653515SLorenz Bauer if (unlikely(info->index >= info->map->max_entries))
72103653515SLorenz Bauer return NULL;
72203653515SLorenz Bauer
72303653515SLorenz Bauer info->sk = __sock_map_lookup_elem(info->map, info->index);
72403653515SLorenz Bauer
72503653515SLorenz Bauer /* can't return sk directly, since that might be NULL */
72603653515SLorenz Bauer return info;
72703653515SLorenz Bauer }
72803653515SLorenz Bauer
sock_map_seq_start(struct seq_file * seq,loff_t * pos)72903653515SLorenz Bauer static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
730f58423aeSLorenz Bauer __acquires(rcu)
73103653515SLorenz Bauer {
73203653515SLorenz Bauer struct sock_map_seq_info *info = seq->private;
73303653515SLorenz Bauer
73403653515SLorenz Bauer if (*pos == 0)
73503653515SLorenz Bauer ++*pos;
73603653515SLorenz Bauer
73703653515SLorenz Bauer /* pairs with sock_map_seq_stop */
73803653515SLorenz Bauer rcu_read_lock();
73903653515SLorenz Bauer return sock_map_seq_lookup_elem(info);
74003653515SLorenz Bauer }
74103653515SLorenz Bauer
sock_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)74203653515SLorenz Bauer static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
743f58423aeSLorenz Bauer __must_hold(rcu)
74403653515SLorenz Bauer {
74503653515SLorenz Bauer struct sock_map_seq_info *info = seq->private;
74603653515SLorenz Bauer
74703653515SLorenz Bauer ++*pos;
74803653515SLorenz Bauer ++info->index;
74903653515SLorenz Bauer
75003653515SLorenz Bauer return sock_map_seq_lookup_elem(info);
75103653515SLorenz Bauer }
75203653515SLorenz Bauer
sock_map_seq_show(struct seq_file * seq,void * v)75303653515SLorenz Bauer static int sock_map_seq_show(struct seq_file *seq, void *v)
754f58423aeSLorenz Bauer __must_hold(rcu)
75503653515SLorenz Bauer {
75603653515SLorenz Bauer struct sock_map_seq_info *info = seq->private;
75703653515SLorenz Bauer struct bpf_iter__sockmap ctx = {};
75803653515SLorenz Bauer struct bpf_iter_meta meta;
75903653515SLorenz Bauer struct bpf_prog *prog;
76003653515SLorenz Bauer
76103653515SLorenz Bauer meta.seq = seq;
76203653515SLorenz Bauer prog = bpf_iter_get_info(&meta, !v);
76303653515SLorenz Bauer if (!prog)
76403653515SLorenz Bauer return 0;
76503653515SLorenz Bauer
76603653515SLorenz Bauer ctx.meta = &meta;
76703653515SLorenz Bauer ctx.map = info->map;
76803653515SLorenz Bauer if (v) {
76903653515SLorenz Bauer ctx.key = &info->index;
77003653515SLorenz Bauer ctx.sk = info->sk;
77103653515SLorenz Bauer }
77203653515SLorenz Bauer
77303653515SLorenz Bauer return bpf_iter_run_prog(prog, &ctx);
77403653515SLorenz Bauer }
77503653515SLorenz Bauer
sock_map_seq_stop(struct seq_file * seq,void * v)77603653515SLorenz Bauer static void sock_map_seq_stop(struct seq_file *seq, void *v)
777f58423aeSLorenz Bauer __releases(rcu)
77803653515SLorenz Bauer {
77903653515SLorenz Bauer if (!v)
78003653515SLorenz Bauer (void)sock_map_seq_show(seq, NULL);
78103653515SLorenz Bauer
78203653515SLorenz Bauer /* pairs with sock_map_seq_start */
78303653515SLorenz Bauer rcu_read_unlock();
78403653515SLorenz Bauer }
78503653515SLorenz Bauer
78603653515SLorenz Bauer static const struct seq_operations sock_map_seq_ops = {
78703653515SLorenz Bauer .start = sock_map_seq_start,
78803653515SLorenz Bauer .next = sock_map_seq_next,
78903653515SLorenz Bauer .stop = sock_map_seq_stop,
79003653515SLorenz Bauer .show = sock_map_seq_show,
79103653515SLorenz Bauer };
79203653515SLorenz Bauer
sock_map_init_seq_private(void * priv_data,struct bpf_iter_aux_info * aux)79303653515SLorenz Bauer static int sock_map_init_seq_private(void *priv_data,
79403653515SLorenz Bauer struct bpf_iter_aux_info *aux)
79503653515SLorenz Bauer {
79603653515SLorenz Bauer struct sock_map_seq_info *info = priv_data;
79703653515SLorenz Bauer
798f0d2b271SHou Tao bpf_map_inc_with_uref(aux->map);
79903653515SLorenz Bauer info->map = aux->map;
80003653515SLorenz Bauer return 0;
80103653515SLorenz Bauer }
80203653515SLorenz Bauer
sock_map_fini_seq_private(void * priv_data)803f0d2b271SHou Tao static void sock_map_fini_seq_private(void *priv_data)
804f0d2b271SHou Tao {
805f0d2b271SHou Tao struct sock_map_seq_info *info = priv_data;
806f0d2b271SHou Tao
807f0d2b271SHou Tao bpf_map_put_with_uref(info->map);
808f0d2b271SHou Tao }
809f0d2b271SHou Tao
sock_map_mem_usage(const struct bpf_map * map)81073d2c619SYafang Shao static u64 sock_map_mem_usage(const struct bpf_map *map)
81173d2c619SYafang Shao {
81273d2c619SYafang Shao u64 usage = sizeof(struct bpf_stab);
81373d2c619SYafang Shao
81473d2c619SYafang Shao usage += (u64)map->max_entries * sizeof(struct sock *);
81573d2c619SYafang Shao return usage;
81673d2c619SYafang Shao }
81773d2c619SYafang Shao
81803653515SLorenz Bauer static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
81903653515SLorenz Bauer .seq_ops = &sock_map_seq_ops,
82003653515SLorenz Bauer .init_seq_private = sock_map_init_seq_private,
821f0d2b271SHou Tao .fini_seq_private = sock_map_fini_seq_private,
82203653515SLorenz Bauer .seq_priv_size = sizeof(struct sock_map_seq_info),
82303653515SLorenz Bauer };
82403653515SLorenz Bauer
825c317ab71SMenglong Dong BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab)
826604326b4SDaniel Borkmann const struct bpf_map_ops sock_map_ops = {
827f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal,
828604326b4SDaniel Borkmann .map_alloc = sock_map_alloc,
829604326b4SDaniel Borkmann .map_free = sock_map_free,
830604326b4SDaniel Borkmann .map_get_next_key = sock_map_get_next_key,
831c1cdf65dSJakub Sitnicki .map_lookup_elem_sys_only = sock_map_lookup_sys,
8320126240fSLorenz Bauer .map_update_elem = sock_map_update_elem,
833604326b4SDaniel Borkmann .map_delete_elem = sock_map_delete_elem,
834604326b4SDaniel Borkmann .map_lookup_elem = sock_map_lookup,
835604326b4SDaniel Borkmann .map_release_uref = sock_map_release_progs,
836604326b4SDaniel Borkmann .map_check_btf = map_check_no_btf,
83773d2c619SYafang Shao .map_mem_usage = sock_map_mem_usage,
838c317ab71SMenglong Dong .map_btf_id = &sock_map_btf_ids[0],
83903653515SLorenz Bauer .iter_seq_info = &sock_map_iter_seq_info,
840604326b4SDaniel Borkmann };
841604326b4SDaniel Borkmann
842032a6b35SAndrey Ignatov struct bpf_shtab_elem {
843604326b4SDaniel Borkmann struct rcu_head rcu;
844604326b4SDaniel Borkmann u32 hash;
845604326b4SDaniel Borkmann struct sock *sk;
846604326b4SDaniel Borkmann struct hlist_node node;
84745a4296bSGustavo A. R. Silva u8 key[];
848604326b4SDaniel Borkmann };
849604326b4SDaniel Borkmann
850032a6b35SAndrey Ignatov struct bpf_shtab_bucket {
851604326b4SDaniel Borkmann struct hlist_head head;
85235d2b7ffSJohn Fastabend spinlock_t lock;
853604326b4SDaniel Borkmann };
854604326b4SDaniel Borkmann
855032a6b35SAndrey Ignatov struct bpf_shtab {
856604326b4SDaniel Borkmann struct bpf_map map;
857032a6b35SAndrey Ignatov struct bpf_shtab_bucket *buckets;
858604326b4SDaniel Borkmann u32 buckets_num;
859604326b4SDaniel Borkmann u32 elem_size;
860604326b4SDaniel Borkmann struct sk_psock_progs progs;
861604326b4SDaniel Borkmann atomic_t count;
862604326b4SDaniel Borkmann };
863604326b4SDaniel Borkmann
sock_hash_bucket_hash(const void * key,u32 len)864604326b4SDaniel Borkmann static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
865604326b4SDaniel Borkmann {
866604326b4SDaniel Borkmann return jhash(key, len, 0);
867604326b4SDaniel Borkmann }
868604326b4SDaniel Borkmann
sock_hash_select_bucket(struct bpf_shtab * htab,u32 hash)869032a6b35SAndrey Ignatov static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
870604326b4SDaniel Borkmann u32 hash)
871604326b4SDaniel Borkmann {
872604326b4SDaniel Borkmann return &htab->buckets[hash & (htab->buckets_num - 1)];
873604326b4SDaniel Borkmann }
874604326b4SDaniel Borkmann
875032a6b35SAndrey Ignatov static struct bpf_shtab_elem *
sock_hash_lookup_elem_raw(struct hlist_head * head,u32 hash,void * key,u32 key_size)876604326b4SDaniel Borkmann sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
877604326b4SDaniel Borkmann u32 key_size)
878604326b4SDaniel Borkmann {
879032a6b35SAndrey Ignatov struct bpf_shtab_elem *elem;
880604326b4SDaniel Borkmann
881604326b4SDaniel Borkmann hlist_for_each_entry_rcu(elem, head, node) {
882604326b4SDaniel Borkmann if (elem->hash == hash &&
883604326b4SDaniel Borkmann !memcmp(&elem->key, key, key_size))
884604326b4SDaniel Borkmann return elem;
885604326b4SDaniel Borkmann }
886604326b4SDaniel Borkmann
887604326b4SDaniel Borkmann return NULL;
888604326b4SDaniel Borkmann }
889604326b4SDaniel Borkmann
__sock_hash_lookup_elem(struct bpf_map * map,void * key)890604326b4SDaniel Borkmann static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
891604326b4SDaniel Borkmann {
892032a6b35SAndrey Ignatov struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
893604326b4SDaniel Borkmann u32 key_size = map->key_size, hash;
894032a6b35SAndrey Ignatov struct bpf_shtab_bucket *bucket;
895032a6b35SAndrey Ignatov struct bpf_shtab_elem *elem;
896604326b4SDaniel Borkmann
897604326b4SDaniel Borkmann WARN_ON_ONCE(!rcu_read_lock_held());
898604326b4SDaniel Borkmann
899604326b4SDaniel Borkmann hash = sock_hash_bucket_hash(key, key_size);
900604326b4SDaniel Borkmann bucket = sock_hash_select_bucket(htab, hash);
901604326b4SDaniel Borkmann elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
902604326b4SDaniel Borkmann
903604326b4SDaniel Borkmann return elem ? elem->sk : NULL;
904604326b4SDaniel Borkmann }
905604326b4SDaniel Borkmann
sock_hash_free_elem(struct bpf_shtab * htab,struct bpf_shtab_elem * elem)906032a6b35SAndrey Ignatov static void sock_hash_free_elem(struct bpf_shtab *htab,
907032a6b35SAndrey Ignatov struct bpf_shtab_elem *elem)
908604326b4SDaniel Borkmann {
909604326b4SDaniel Borkmann atomic_dec(&htab->count);
910604326b4SDaniel Borkmann kfree_rcu(elem, rcu);
911604326b4SDaniel Borkmann }
912604326b4SDaniel Borkmann
sock_hash_delete_from_link(struct bpf_map * map,struct sock * sk,void * link_raw)913604326b4SDaniel Borkmann static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
914604326b4SDaniel Borkmann void *link_raw)
915604326b4SDaniel Borkmann {
916032a6b35SAndrey Ignatov struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
917032a6b35SAndrey Ignatov struct bpf_shtab_elem *elem_probe, *elem = link_raw;
918032a6b35SAndrey Ignatov struct bpf_shtab_bucket *bucket;
919604326b4SDaniel Borkmann
920604326b4SDaniel Borkmann WARN_ON_ONCE(!rcu_read_lock_held());
921604326b4SDaniel Borkmann bucket = sock_hash_select_bucket(htab, elem->hash);
922604326b4SDaniel Borkmann
923604326b4SDaniel Borkmann /* elem may be deleted in parallel from the map, but access here
924604326b4SDaniel Borkmann * is okay since it's going away only after RCU grace period.
925604326b4SDaniel Borkmann * However, we need to check whether it's still present.
926604326b4SDaniel Borkmann */
92735d2b7ffSJohn Fastabend spin_lock_bh(&bucket->lock);
928604326b4SDaniel Borkmann elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
929604326b4SDaniel Borkmann elem->key, map->key_size);
930604326b4SDaniel Borkmann if (elem_probe && elem_probe == elem) {
931604326b4SDaniel Borkmann hlist_del_rcu(&elem->node);
932604326b4SDaniel Borkmann sock_map_unref(elem->sk, elem);
933604326b4SDaniel Borkmann sock_hash_free_elem(htab, elem);
934604326b4SDaniel Borkmann }
93535d2b7ffSJohn Fastabend spin_unlock_bh(&bucket->lock);
936604326b4SDaniel Borkmann }
937604326b4SDaniel Borkmann
sock_hash_delete_elem(struct bpf_map * map,void * key)938d7ba4cc9SJP Kobryn static long sock_hash_delete_elem(struct bpf_map *map, void *key)
939604326b4SDaniel Borkmann {
940032a6b35SAndrey Ignatov struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
941604326b4SDaniel Borkmann u32 hash, key_size = map->key_size;
942032a6b35SAndrey Ignatov struct bpf_shtab_bucket *bucket;
943032a6b35SAndrey Ignatov struct bpf_shtab_elem *elem;
944604326b4SDaniel Borkmann int ret = -ENOENT;
945604326b4SDaniel Borkmann
946604326b4SDaniel Borkmann hash = sock_hash_bucket_hash(key, key_size);
947604326b4SDaniel Borkmann bucket = sock_hash_select_bucket(htab, hash);
948604326b4SDaniel Borkmann
94935d2b7ffSJohn Fastabend spin_lock_bh(&bucket->lock);
950604326b4SDaniel Borkmann elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
951604326b4SDaniel Borkmann if (elem) {
952604326b4SDaniel Borkmann hlist_del_rcu(&elem->node);
953604326b4SDaniel Borkmann sock_map_unref(elem->sk, elem);
954604326b4SDaniel Borkmann sock_hash_free_elem(htab, elem);
955604326b4SDaniel Borkmann ret = 0;
956604326b4SDaniel Borkmann }
95735d2b7ffSJohn Fastabend spin_unlock_bh(&bucket->lock);
958604326b4SDaniel Borkmann return ret;
959604326b4SDaniel Borkmann }
960604326b4SDaniel Borkmann
sock_hash_alloc_elem(struct bpf_shtab * htab,void * key,u32 key_size,u32 hash,struct sock * sk,struct bpf_shtab_elem * old)961032a6b35SAndrey Ignatov static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
962604326b4SDaniel Borkmann void *key, u32 key_size,
963604326b4SDaniel Borkmann u32 hash, struct sock *sk,
964032a6b35SAndrey Ignatov struct bpf_shtab_elem *old)
965604326b4SDaniel Borkmann {
966032a6b35SAndrey Ignatov struct bpf_shtab_elem *new;
967604326b4SDaniel Borkmann
968604326b4SDaniel Borkmann if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
969604326b4SDaniel Borkmann if (!old) {
970604326b4SDaniel Borkmann atomic_dec(&htab->count);
971604326b4SDaniel Borkmann return ERR_PTR(-E2BIG);
972604326b4SDaniel Borkmann }
973604326b4SDaniel Borkmann }
974604326b4SDaniel Borkmann
9757846dd9fSRoman Gushchin new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
9767846dd9fSRoman Gushchin GFP_ATOMIC | __GFP_NOWARN,
977604326b4SDaniel Borkmann htab->map.numa_node);
978604326b4SDaniel Borkmann if (!new) {
979604326b4SDaniel Borkmann atomic_dec(&htab->count);
980604326b4SDaniel Borkmann return ERR_PTR(-ENOMEM);
981604326b4SDaniel Borkmann }
982604326b4SDaniel Borkmann memcpy(new->key, key, key_size);
983604326b4SDaniel Borkmann new->sk = sk;
984604326b4SDaniel Borkmann new->hash = hash;
985604326b4SDaniel Borkmann return new;
986604326b4SDaniel Borkmann }
987604326b4SDaniel Borkmann
sock_hash_update_common(struct bpf_map * map,void * key,struct sock * sk,u64 flags)988604326b4SDaniel Borkmann static int sock_hash_update_common(struct bpf_map *map, void *key,
989604326b4SDaniel Borkmann struct sock *sk, u64 flags)
990604326b4SDaniel Borkmann {
991032a6b35SAndrey Ignatov struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
992604326b4SDaniel Borkmann u32 key_size = map->key_size, hash;
993032a6b35SAndrey Ignatov struct bpf_shtab_elem *elem, *elem_new;
994032a6b35SAndrey Ignatov struct bpf_shtab_bucket *bucket;
995604326b4SDaniel Borkmann struct sk_psock_link *link;
996604326b4SDaniel Borkmann struct sk_psock *psock;
997604326b4SDaniel Borkmann int ret;
998604326b4SDaniel Borkmann
999604326b4SDaniel Borkmann WARN_ON_ONCE(!rcu_read_lock_held());
1000604326b4SDaniel Borkmann if (unlikely(flags > BPF_EXIST))
1001604326b4SDaniel Borkmann return -EINVAL;
1002604326b4SDaniel Borkmann
1003604326b4SDaniel Borkmann link = sk_psock_init_link();
1004604326b4SDaniel Borkmann if (!link)
1005604326b4SDaniel Borkmann return -ENOMEM;
1006604326b4SDaniel Borkmann
10072004fdbdSCong Wang ret = sock_map_link(map, sk);
1008604326b4SDaniel Borkmann if (ret < 0)
1009604326b4SDaniel Borkmann goto out_free;
1010604326b4SDaniel Borkmann
1011604326b4SDaniel Borkmann psock = sk_psock(sk);
1012604326b4SDaniel Borkmann WARN_ON_ONCE(!psock);
1013604326b4SDaniel Borkmann
1014604326b4SDaniel Borkmann hash = sock_hash_bucket_hash(key, key_size);
1015604326b4SDaniel Borkmann bucket = sock_hash_select_bucket(htab, hash);
1016604326b4SDaniel Borkmann
101735d2b7ffSJohn Fastabend spin_lock_bh(&bucket->lock);
1018604326b4SDaniel Borkmann elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
1019604326b4SDaniel Borkmann if (elem && flags == BPF_NOEXIST) {
1020604326b4SDaniel Borkmann ret = -EEXIST;
1021604326b4SDaniel Borkmann goto out_unlock;
1022604326b4SDaniel Borkmann } else if (!elem && flags == BPF_EXIST) {
1023604326b4SDaniel Borkmann ret = -ENOENT;
1024604326b4SDaniel Borkmann goto out_unlock;
1025604326b4SDaniel Borkmann }
1026604326b4SDaniel Borkmann
1027604326b4SDaniel Borkmann elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
1028604326b4SDaniel Borkmann if (IS_ERR(elem_new)) {
1029604326b4SDaniel Borkmann ret = PTR_ERR(elem_new);
1030604326b4SDaniel Borkmann goto out_unlock;
1031604326b4SDaniel Borkmann }
1032604326b4SDaniel Borkmann
1033604326b4SDaniel Borkmann sock_map_add_link(psock, link, map, elem_new);
1034604326b4SDaniel Borkmann /* Add new element to the head of the list, so that
1035604326b4SDaniel Borkmann * concurrent search will find it before old elem.
1036604326b4SDaniel Borkmann */
1037604326b4SDaniel Borkmann hlist_add_head_rcu(&elem_new->node, &bucket->head);
1038604326b4SDaniel Borkmann if (elem) {
1039604326b4SDaniel Borkmann hlist_del_rcu(&elem->node);
1040604326b4SDaniel Borkmann sock_map_unref(elem->sk, elem);
1041604326b4SDaniel Borkmann sock_hash_free_elem(htab, elem);
1042604326b4SDaniel Borkmann }
104335d2b7ffSJohn Fastabend spin_unlock_bh(&bucket->lock);
1044604326b4SDaniel Borkmann return 0;
1045604326b4SDaniel Borkmann out_unlock:
104635d2b7ffSJohn Fastabend spin_unlock_bh(&bucket->lock);
1047604326b4SDaniel Borkmann sk_psock_put(sk, psock);
1048604326b4SDaniel Borkmann out_free:
1049604326b4SDaniel Borkmann sk_psock_free_link(link);
1050604326b4SDaniel Borkmann return ret;
1051604326b4SDaniel Borkmann }
1052604326b4SDaniel Borkmann
sock_hash_get_next_key(struct bpf_map * map,void * key,void * key_next)1053604326b4SDaniel Borkmann static int sock_hash_get_next_key(struct bpf_map *map, void *key,
1054604326b4SDaniel Borkmann void *key_next)
1055604326b4SDaniel Borkmann {
1056032a6b35SAndrey Ignatov struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1057032a6b35SAndrey Ignatov struct bpf_shtab_elem *elem, *elem_next;
1058604326b4SDaniel Borkmann u32 hash, key_size = map->key_size;
1059604326b4SDaniel Borkmann struct hlist_head *head;
1060604326b4SDaniel Borkmann int i = 0;
1061604326b4SDaniel Borkmann
1062604326b4SDaniel Borkmann if (!key)
1063604326b4SDaniel Borkmann goto find_first_elem;
1064604326b4SDaniel Borkmann hash = sock_hash_bucket_hash(key, key_size);
1065604326b4SDaniel Borkmann head = &sock_hash_select_bucket(htab, hash)->head;
1066604326b4SDaniel Borkmann elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
1067604326b4SDaniel Borkmann if (!elem)
1068604326b4SDaniel Borkmann goto find_first_elem;
1069604326b4SDaniel Borkmann
107003653515SLorenz Bauer elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
1071032a6b35SAndrey Ignatov struct bpf_shtab_elem, node);
1072604326b4SDaniel Borkmann if (elem_next) {
1073604326b4SDaniel Borkmann memcpy(key_next, elem_next->key, key_size);
1074604326b4SDaniel Borkmann return 0;
1075604326b4SDaniel Borkmann }
1076604326b4SDaniel Borkmann
1077604326b4SDaniel Borkmann i = hash & (htab->buckets_num - 1);
1078604326b4SDaniel Borkmann i++;
1079604326b4SDaniel Borkmann find_first_elem:
1080604326b4SDaniel Borkmann for (; i < htab->buckets_num; i++) {
1081604326b4SDaniel Borkmann head = &sock_hash_select_bucket(htab, i)->head;
108203653515SLorenz Bauer elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
1083032a6b35SAndrey Ignatov struct bpf_shtab_elem, node);
1084604326b4SDaniel Borkmann if (elem_next) {
1085604326b4SDaniel Borkmann memcpy(key_next, elem_next->key, key_size);
1086604326b4SDaniel Borkmann return 0;
1087604326b4SDaniel Borkmann }
1088604326b4SDaniel Borkmann }
1089604326b4SDaniel Borkmann
1090604326b4SDaniel Borkmann return -ENOENT;
1091604326b4SDaniel Borkmann }
1092604326b4SDaniel Borkmann
sock_hash_alloc(union bpf_attr * attr)1093604326b4SDaniel Borkmann static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
1094604326b4SDaniel Borkmann {
1095032a6b35SAndrey Ignatov struct bpf_shtab *htab;
1096604326b4SDaniel Borkmann int i, err;
1097604326b4SDaniel Borkmann
1098604326b4SDaniel Borkmann if (attr->max_entries == 0 ||
1099604326b4SDaniel Borkmann attr->key_size == 0 ||
1100c1cdf65dSJakub Sitnicki (attr->value_size != sizeof(u32) &&
1101c1cdf65dSJakub Sitnicki attr->value_size != sizeof(u64)) ||
1102604326b4SDaniel Borkmann attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1103604326b4SDaniel Borkmann return ERR_PTR(-EINVAL);
1104604326b4SDaniel Borkmann if (attr->key_size > MAX_BPF_STACK)
1105604326b4SDaniel Borkmann return ERR_PTR(-E2BIG);
1106604326b4SDaniel Borkmann
110773cf09a3SYafang Shao htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
1108604326b4SDaniel Borkmann if (!htab)
1109604326b4SDaniel Borkmann return ERR_PTR(-ENOMEM);
1110604326b4SDaniel Borkmann
1111604326b4SDaniel Borkmann bpf_map_init_from_attr(&htab->map, attr);
1112604326b4SDaniel Borkmann
1113604326b4SDaniel Borkmann htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1114032a6b35SAndrey Ignatov htab->elem_size = sizeof(struct bpf_shtab_elem) +
1115604326b4SDaniel Borkmann round_up(htab->map.key_size, 8);
1116604326b4SDaniel Borkmann if (htab->buckets_num == 0 ||
1117032a6b35SAndrey Ignatov htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1118604326b4SDaniel Borkmann err = -EINVAL;
1119604326b4SDaniel Borkmann goto free_htab;
1120604326b4SDaniel Borkmann }
1121604326b4SDaniel Borkmann
1122604326b4SDaniel Borkmann htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1123032a6b35SAndrey Ignatov sizeof(struct bpf_shtab_bucket),
1124604326b4SDaniel Borkmann htab->map.numa_node);
1125604326b4SDaniel Borkmann if (!htab->buckets) {
1126604326b4SDaniel Borkmann err = -ENOMEM;
1127604326b4SDaniel Borkmann goto free_htab;
1128604326b4SDaniel Borkmann }
1129604326b4SDaniel Borkmann
1130604326b4SDaniel Borkmann for (i = 0; i < htab->buckets_num; i++) {
1131604326b4SDaniel Borkmann INIT_HLIST_HEAD(&htab->buckets[i].head);
113235d2b7ffSJohn Fastabend spin_lock_init(&htab->buckets[i].lock);
1133604326b4SDaniel Borkmann }
1134604326b4SDaniel Borkmann
1135604326b4SDaniel Borkmann return &htab->map;
1136604326b4SDaniel Borkmann free_htab:
113773cf09a3SYafang Shao bpf_map_area_free(htab);
1138604326b4SDaniel Borkmann return ERR_PTR(err);
1139604326b4SDaniel Borkmann }
1140604326b4SDaniel Borkmann
sock_hash_free(struct bpf_map * map)1141604326b4SDaniel Borkmann static void sock_hash_free(struct bpf_map *map)
1142604326b4SDaniel Borkmann {
1143032a6b35SAndrey Ignatov struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1144032a6b35SAndrey Ignatov struct bpf_shtab_bucket *bucket;
114575e68e5bSJakub Sitnicki struct hlist_head unlink_list;
1146032a6b35SAndrey Ignatov struct bpf_shtab_elem *elem;
1147604326b4SDaniel Borkmann struct hlist_node *node;
1148604326b4SDaniel Borkmann int i;
1149604326b4SDaniel Borkmann
115090db6d77SJohn Fastabend /* After the sync no updates or deletes will be in-flight so it
115190db6d77SJohn Fastabend * is safe to walk map and remove entries without risking a race
115290db6d77SJohn Fastabend * in EEXIST update case.
115390db6d77SJohn Fastabend */
1154604326b4SDaniel Borkmann synchronize_rcu();
1155604326b4SDaniel Borkmann for (i = 0; i < htab->buckets_num; i++) {
1156604326b4SDaniel Borkmann bucket = sock_hash_select_bucket(htab, i);
115775e68e5bSJakub Sitnicki
115875e68e5bSJakub Sitnicki /* We are racing with sock_hash_delete_from_link to
115975e68e5bSJakub Sitnicki * enter the spin-lock critical section. Every socket on
116075e68e5bSJakub Sitnicki * the list is still linked to sockhash. Since link
116175e68e5bSJakub Sitnicki * exists, psock exists and holds a ref to socket. That
116275e68e5bSJakub Sitnicki * lets us to grab a socket ref too.
116375e68e5bSJakub Sitnicki */
116435d2b7ffSJohn Fastabend spin_lock_bh(&bucket->lock);
116575e68e5bSJakub Sitnicki hlist_for_each_entry(elem, &bucket->head, node)
116675e68e5bSJakub Sitnicki sock_hold(elem->sk);
116775e68e5bSJakub Sitnicki hlist_move_list(&bucket->head, &unlink_list);
116835d2b7ffSJohn Fastabend spin_unlock_bh(&bucket->lock);
116975e68e5bSJakub Sitnicki
117075e68e5bSJakub Sitnicki /* Process removed entries out of atomic context to
117175e68e5bSJakub Sitnicki * block for socket lock before deleting the psock's
117275e68e5bSJakub Sitnicki * link to sockhash.
117375e68e5bSJakub Sitnicki */
117475e68e5bSJakub Sitnicki hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
117575e68e5bSJakub Sitnicki hlist_del(&elem->node);
11767e81a353SJohn Fastabend lock_sock(elem->sk);
1177db6a5018SJakub Sitnicki rcu_read_lock();
1178604326b4SDaniel Borkmann sock_map_unref(elem->sk, elem);
1179db6a5018SJakub Sitnicki rcu_read_unlock();
11807e81a353SJohn Fastabend release_sock(elem->sk);
118175e68e5bSJakub Sitnicki sock_put(elem->sk);
118233a7c831SJakub Sitnicki sock_hash_free_elem(htab, elem);
1183604326b4SDaniel Borkmann }
118480bd490aSEric Dumazet cond_resched();
1185604326b4SDaniel Borkmann }
1186604326b4SDaniel Borkmann
11870b2dc839SJakub Sitnicki /* wait for psock readers accessing its map link */
11880b2dc839SJakub Sitnicki synchronize_rcu();
11890b2dc839SJakub Sitnicki
1190604326b4SDaniel Borkmann bpf_map_area_free(htab->buckets);
119173cf09a3SYafang Shao bpf_map_area_free(htab);
1192604326b4SDaniel Borkmann }
1193604326b4SDaniel Borkmann
sock_hash_lookup_sys(struct bpf_map * map,void * key)1194c1cdf65dSJakub Sitnicki static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1195c1cdf65dSJakub Sitnicki {
1196c1cdf65dSJakub Sitnicki struct sock *sk;
1197c1cdf65dSJakub Sitnicki
1198c1cdf65dSJakub Sitnicki if (map->value_size != sizeof(u64))
1199c1cdf65dSJakub Sitnicki return ERR_PTR(-ENOSPC);
1200c1cdf65dSJakub Sitnicki
1201c1cdf65dSJakub Sitnicki sk = __sock_hash_lookup_elem(map, key);
1202c1cdf65dSJakub Sitnicki if (!sk)
1203c1cdf65dSJakub Sitnicki return ERR_PTR(-ENOENT);
1204c1cdf65dSJakub Sitnicki
120592acdc58SDaniel Borkmann __sock_gen_cookie(sk);
1206c1cdf65dSJakub Sitnicki return &sk->sk_cookie;
1207c1cdf65dSJakub Sitnicki }
1208c1cdf65dSJakub Sitnicki
sock_hash_lookup(struct bpf_map * map,void * key)12091d59f3bcSJakub Sitnicki static void *sock_hash_lookup(struct bpf_map *map, void *key)
12101d59f3bcSJakub Sitnicki {
121164d85290SJakub Sitnicki struct sock *sk;
121264d85290SJakub Sitnicki
121364d85290SJakub Sitnicki sk = __sock_hash_lookup_elem(map, key);
1214654785a1SLorenz Bauer if (!sk)
121564d85290SJakub Sitnicki return NULL;
121664d85290SJakub Sitnicki if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
121764d85290SJakub Sitnicki return NULL;
121864d85290SJakub Sitnicki return sk;
12191d59f3bcSJakub Sitnicki }
12201d59f3bcSJakub Sitnicki
sock_hash_release_progs(struct bpf_map * map)1221604326b4SDaniel Borkmann static void sock_hash_release_progs(struct bpf_map *map)
1222604326b4SDaniel Borkmann {
1223032a6b35SAndrey Ignatov psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1224604326b4SDaniel Borkmann }
1225604326b4SDaniel Borkmann
BPF_CALL_4(bpf_sock_hash_update,struct bpf_sock_ops_kern *,sops,struct bpf_map *,map,void *,key,u64,flags)1226604326b4SDaniel Borkmann BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1227604326b4SDaniel Borkmann struct bpf_map *, map, void *, key, u64, flags)
1228604326b4SDaniel Borkmann {
1229604326b4SDaniel Borkmann WARN_ON_ONCE(!rcu_read_lock_held());
1230604326b4SDaniel Borkmann
1231604326b4SDaniel Borkmann if (likely(sock_map_sk_is_suitable(sops->sk) &&
1232604326b4SDaniel Borkmann sock_map_op_okay(sops)))
1233604326b4SDaniel Borkmann return sock_hash_update_common(map, key, sops->sk, flags);
1234604326b4SDaniel Borkmann return -EOPNOTSUPP;
1235604326b4SDaniel Borkmann }
1236604326b4SDaniel Borkmann
1237604326b4SDaniel Borkmann const struct bpf_func_proto bpf_sock_hash_update_proto = {
1238604326b4SDaniel Borkmann .func = bpf_sock_hash_update,
1239604326b4SDaniel Borkmann .gpl_only = false,
1240604326b4SDaniel Borkmann .pkt_access = true,
1241604326b4SDaniel Borkmann .ret_type = RET_INTEGER,
1242604326b4SDaniel Borkmann .arg1_type = ARG_PTR_TO_CTX,
1243604326b4SDaniel Borkmann .arg2_type = ARG_CONST_MAP_PTR,
1244604326b4SDaniel Borkmann .arg3_type = ARG_PTR_TO_MAP_KEY,
1245604326b4SDaniel Borkmann .arg4_type = ARG_ANYTHING,
1246604326b4SDaniel Borkmann };
1247604326b4SDaniel Borkmann
BPF_CALL_4(bpf_sk_redirect_hash,struct sk_buff *,skb,struct bpf_map *,map,void *,key,u64,flags)1248604326b4SDaniel Borkmann BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1249604326b4SDaniel Borkmann struct bpf_map *, map, void *, key, u64, flags)
1250604326b4SDaniel Borkmann {
12518ca30379SJakub Sitnicki struct sock *sk;
1252604326b4SDaniel Borkmann
1253604326b4SDaniel Borkmann if (unlikely(flags & ~(BPF_F_INGRESS)))
1254604326b4SDaniel Borkmann return SK_DROP;
12558ca30379SJakub Sitnicki
12568ca30379SJakub Sitnicki sk = __sock_hash_lookup_elem(map, key);
12578ca30379SJakub Sitnicki if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1258604326b4SDaniel Borkmann return SK_DROP;
125912c3e619SMichal Luczaj if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
126012c3e619SMichal Luczaj return SK_DROP;
12618ca30379SJakub Sitnicki
1262e3526bb9SCong Wang skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
1263604326b4SDaniel Borkmann return SK_PASS;
1264604326b4SDaniel Borkmann }
1265604326b4SDaniel Borkmann
1266604326b4SDaniel Borkmann const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1267604326b4SDaniel Borkmann .func = bpf_sk_redirect_hash,
1268604326b4SDaniel Borkmann .gpl_only = false,
1269604326b4SDaniel Borkmann .ret_type = RET_INTEGER,
1270604326b4SDaniel Borkmann .arg1_type = ARG_PTR_TO_CTX,
1271604326b4SDaniel Borkmann .arg2_type = ARG_CONST_MAP_PTR,
1272604326b4SDaniel Borkmann .arg3_type = ARG_PTR_TO_MAP_KEY,
1273604326b4SDaniel Borkmann .arg4_type = ARG_ANYTHING,
1274604326b4SDaniel Borkmann };
1275604326b4SDaniel Borkmann
BPF_CALL_4(bpf_msg_redirect_hash,struct sk_msg *,msg,struct bpf_map *,map,void *,key,u64,flags)1276604326b4SDaniel Borkmann BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1277604326b4SDaniel Borkmann struct bpf_map *, map, void *, key, u64, flags)
1278604326b4SDaniel Borkmann {
12798ca30379SJakub Sitnicki struct sock *sk;
12808ca30379SJakub Sitnicki
1281604326b4SDaniel Borkmann if (unlikely(flags & ~(BPF_F_INGRESS)))
1282604326b4SDaniel Borkmann return SK_DROP;
12838ca30379SJakub Sitnicki
12848ca30379SJakub Sitnicki sk = __sock_hash_lookup_elem(map, key);
12858ca30379SJakub Sitnicki if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1286604326b4SDaniel Borkmann return SK_DROP;
1287b80e31baSJakub Sitnicki if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
1288b80e31baSJakub Sitnicki return SK_DROP;
128912c3e619SMichal Luczaj if (sk_is_vsock(sk))
129012c3e619SMichal Luczaj return SK_DROP;
12918ca30379SJakub Sitnicki
12928ca30379SJakub Sitnicki msg->flags = flags;
12938ca30379SJakub Sitnicki msg->sk_redir = sk;
1294604326b4SDaniel Borkmann return SK_PASS;
1295604326b4SDaniel Borkmann }
1296604326b4SDaniel Borkmann
1297604326b4SDaniel Borkmann const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1298604326b4SDaniel Borkmann .func = bpf_msg_redirect_hash,
1299604326b4SDaniel Borkmann .gpl_only = false,
1300604326b4SDaniel Borkmann .ret_type = RET_INTEGER,
1301604326b4SDaniel Borkmann .arg1_type = ARG_PTR_TO_CTX,
1302604326b4SDaniel Borkmann .arg2_type = ARG_CONST_MAP_PTR,
1303604326b4SDaniel Borkmann .arg3_type = ARG_PTR_TO_MAP_KEY,
1304604326b4SDaniel Borkmann .arg4_type = ARG_ANYTHING,
1305604326b4SDaniel Borkmann };
1306604326b4SDaniel Borkmann
130703653515SLorenz Bauer struct sock_hash_seq_info {
130803653515SLorenz Bauer struct bpf_map *map;
130903653515SLorenz Bauer struct bpf_shtab *htab;
131003653515SLorenz Bauer u32 bucket_id;
131103653515SLorenz Bauer };
131203653515SLorenz Bauer
sock_hash_seq_find_next(struct sock_hash_seq_info * info,struct bpf_shtab_elem * prev_elem)131303653515SLorenz Bauer static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
131403653515SLorenz Bauer struct bpf_shtab_elem *prev_elem)
131503653515SLorenz Bauer {
131603653515SLorenz Bauer const struct bpf_shtab *htab = info->htab;
131703653515SLorenz Bauer struct bpf_shtab_bucket *bucket;
131803653515SLorenz Bauer struct bpf_shtab_elem *elem;
131903653515SLorenz Bauer struct hlist_node *node;
132003653515SLorenz Bauer
132103653515SLorenz Bauer /* try to find next elem in the same bucket */
132203653515SLorenz Bauer if (prev_elem) {
132303653515SLorenz Bauer node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
132403653515SLorenz Bauer elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
132503653515SLorenz Bauer if (elem)
132603653515SLorenz Bauer return elem;
132703653515SLorenz Bauer
132803653515SLorenz Bauer /* no more elements, continue in the next bucket */
132903653515SLorenz Bauer info->bucket_id++;
133003653515SLorenz Bauer }
133103653515SLorenz Bauer
133203653515SLorenz Bauer for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
133303653515SLorenz Bauer bucket = &htab->buckets[info->bucket_id];
133403653515SLorenz Bauer node = rcu_dereference(hlist_first_rcu(&bucket->head));
133503653515SLorenz Bauer elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
133603653515SLorenz Bauer if (elem)
133703653515SLorenz Bauer return elem;
133803653515SLorenz Bauer }
133903653515SLorenz Bauer
134003653515SLorenz Bauer return NULL;
134103653515SLorenz Bauer }
134203653515SLorenz Bauer
sock_hash_seq_start(struct seq_file * seq,loff_t * pos)134303653515SLorenz Bauer static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
1344f58423aeSLorenz Bauer __acquires(rcu)
134503653515SLorenz Bauer {
134603653515SLorenz Bauer struct sock_hash_seq_info *info = seq->private;
134703653515SLorenz Bauer
134803653515SLorenz Bauer if (*pos == 0)
134903653515SLorenz Bauer ++*pos;
135003653515SLorenz Bauer
135103653515SLorenz Bauer /* pairs with sock_hash_seq_stop */
135203653515SLorenz Bauer rcu_read_lock();
135303653515SLorenz Bauer return sock_hash_seq_find_next(info, NULL);
135403653515SLorenz Bauer }
135503653515SLorenz Bauer
sock_hash_seq_next(struct seq_file * seq,void * v,loff_t * pos)135603653515SLorenz Bauer static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1357f58423aeSLorenz Bauer __must_hold(rcu)
135803653515SLorenz Bauer {
135903653515SLorenz Bauer struct sock_hash_seq_info *info = seq->private;
136003653515SLorenz Bauer
136103653515SLorenz Bauer ++*pos;
136203653515SLorenz Bauer return sock_hash_seq_find_next(info, v);
136303653515SLorenz Bauer }
136403653515SLorenz Bauer
sock_hash_seq_show(struct seq_file * seq,void * v)136503653515SLorenz Bauer static int sock_hash_seq_show(struct seq_file *seq, void *v)
1366f58423aeSLorenz Bauer __must_hold(rcu)
136703653515SLorenz Bauer {
136803653515SLorenz Bauer struct sock_hash_seq_info *info = seq->private;
136903653515SLorenz Bauer struct bpf_iter__sockmap ctx = {};
137003653515SLorenz Bauer struct bpf_shtab_elem *elem = v;
137103653515SLorenz Bauer struct bpf_iter_meta meta;
137203653515SLorenz Bauer struct bpf_prog *prog;
137303653515SLorenz Bauer
137403653515SLorenz Bauer meta.seq = seq;
137503653515SLorenz Bauer prog = bpf_iter_get_info(&meta, !elem);
137603653515SLorenz Bauer if (!prog)
137703653515SLorenz Bauer return 0;
137803653515SLorenz Bauer
137903653515SLorenz Bauer ctx.meta = &meta;
138003653515SLorenz Bauer ctx.map = info->map;
138103653515SLorenz Bauer if (elem) {
138203653515SLorenz Bauer ctx.key = elem->key;
138303653515SLorenz Bauer ctx.sk = elem->sk;
138403653515SLorenz Bauer }
138503653515SLorenz Bauer
138603653515SLorenz Bauer return bpf_iter_run_prog(prog, &ctx);
138703653515SLorenz Bauer }
138803653515SLorenz Bauer
sock_hash_seq_stop(struct seq_file * seq,void * v)138903653515SLorenz Bauer static void sock_hash_seq_stop(struct seq_file *seq, void *v)
1390f58423aeSLorenz Bauer __releases(rcu)
139103653515SLorenz Bauer {
139203653515SLorenz Bauer if (!v)
139303653515SLorenz Bauer (void)sock_hash_seq_show(seq, NULL);
139403653515SLorenz Bauer
139503653515SLorenz Bauer /* pairs with sock_hash_seq_start */
139603653515SLorenz Bauer rcu_read_unlock();
139703653515SLorenz Bauer }
139803653515SLorenz Bauer
139903653515SLorenz Bauer static const struct seq_operations sock_hash_seq_ops = {
140003653515SLorenz Bauer .start = sock_hash_seq_start,
140103653515SLorenz Bauer .next = sock_hash_seq_next,
140203653515SLorenz Bauer .stop = sock_hash_seq_stop,
140303653515SLorenz Bauer .show = sock_hash_seq_show,
140403653515SLorenz Bauer };
140503653515SLorenz Bauer
sock_hash_init_seq_private(void * priv_data,struct bpf_iter_aux_info * aux)140603653515SLorenz Bauer static int sock_hash_init_seq_private(void *priv_data,
140703653515SLorenz Bauer struct bpf_iter_aux_info *aux)
140803653515SLorenz Bauer {
140903653515SLorenz Bauer struct sock_hash_seq_info *info = priv_data;
141003653515SLorenz Bauer
1411f0d2b271SHou Tao bpf_map_inc_with_uref(aux->map);
141203653515SLorenz Bauer info->map = aux->map;
141303653515SLorenz Bauer info->htab = container_of(aux->map, struct bpf_shtab, map);
141403653515SLorenz Bauer return 0;
141503653515SLorenz Bauer }
141603653515SLorenz Bauer
sock_hash_fini_seq_private(void * priv_data)1417f0d2b271SHou Tao static void sock_hash_fini_seq_private(void *priv_data)
1418f0d2b271SHou Tao {
1419f0d2b271SHou Tao struct sock_hash_seq_info *info = priv_data;
1420f0d2b271SHou Tao
1421f0d2b271SHou Tao bpf_map_put_with_uref(info->map);
1422f0d2b271SHou Tao }
1423f0d2b271SHou Tao
sock_hash_mem_usage(const struct bpf_map * map)142473d2c619SYafang Shao static u64 sock_hash_mem_usage(const struct bpf_map *map)
142573d2c619SYafang Shao {
142673d2c619SYafang Shao struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
142773d2c619SYafang Shao u64 usage = sizeof(*htab);
142873d2c619SYafang Shao
142973d2c619SYafang Shao usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
143073d2c619SYafang Shao usage += atomic_read(&htab->count) * (u64)htab->elem_size;
143173d2c619SYafang Shao return usage;
143273d2c619SYafang Shao }
143373d2c619SYafang Shao
143403653515SLorenz Bauer static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
143503653515SLorenz Bauer .seq_ops = &sock_hash_seq_ops,
143603653515SLorenz Bauer .init_seq_private = sock_hash_init_seq_private,
1437f0d2b271SHou Tao .fini_seq_private = sock_hash_fini_seq_private,
143803653515SLorenz Bauer .seq_priv_size = sizeof(struct sock_hash_seq_info),
143903653515SLorenz Bauer };
144003653515SLorenz Bauer
1441c317ab71SMenglong Dong BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab)
1442604326b4SDaniel Borkmann const struct bpf_map_ops sock_hash_ops = {
1443f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal,
1444604326b4SDaniel Borkmann .map_alloc = sock_hash_alloc,
1445604326b4SDaniel Borkmann .map_free = sock_hash_free,
1446604326b4SDaniel Borkmann .map_get_next_key = sock_hash_get_next_key,
14470126240fSLorenz Bauer .map_update_elem = sock_map_update_elem,
1448604326b4SDaniel Borkmann .map_delete_elem = sock_hash_delete_elem,
14491d59f3bcSJakub Sitnicki .map_lookup_elem = sock_hash_lookup,
1450c1cdf65dSJakub Sitnicki .map_lookup_elem_sys_only = sock_hash_lookup_sys,
1451604326b4SDaniel Borkmann .map_release_uref = sock_hash_release_progs,
1452604326b4SDaniel Borkmann .map_check_btf = map_check_no_btf,
145373d2c619SYafang Shao .map_mem_usage = sock_hash_mem_usage,
1454c317ab71SMenglong Dong .map_btf_id = &sock_hash_map_btf_ids[0],
145503653515SLorenz Bauer .iter_seq_info = &sock_hash_iter_seq_info,
1456604326b4SDaniel Borkmann };
1457604326b4SDaniel Borkmann
sock_map_progs(struct bpf_map * map)1458604326b4SDaniel Borkmann static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1459604326b4SDaniel Borkmann {
1460604326b4SDaniel Borkmann switch (map->map_type) {
1461604326b4SDaniel Borkmann case BPF_MAP_TYPE_SOCKMAP:
1462604326b4SDaniel Borkmann return &container_of(map, struct bpf_stab, map)->progs;
1463604326b4SDaniel Borkmann case BPF_MAP_TYPE_SOCKHASH:
1464032a6b35SAndrey Ignatov return &container_of(map, struct bpf_shtab, map)->progs;
1465604326b4SDaniel Borkmann default:
1466604326b4SDaniel Borkmann break;
1467604326b4SDaniel Borkmann }
1468604326b4SDaniel Borkmann
1469604326b4SDaniel Borkmann return NULL;
1470604326b4SDaniel Borkmann }
1471604326b4SDaniel Borkmann
sock_map_prog_lookup(struct bpf_map * map,struct bpf_prog *** pprog,u32 which)1472748cd572SDi Zhu static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
1473748cd572SDi Zhu u32 which)
1474604326b4SDaniel Borkmann {
1475604326b4SDaniel Borkmann struct sk_psock_progs *progs = sock_map_progs(map);
1476604326b4SDaniel Borkmann
1477604326b4SDaniel Borkmann if (!progs)
1478604326b4SDaniel Borkmann return -EOPNOTSUPP;
1479604326b4SDaniel Borkmann
1480604326b4SDaniel Borkmann switch (which) {
1481604326b4SDaniel Borkmann case BPF_SK_MSG_VERDICT:
1482748cd572SDi Zhu *pprog = &progs->msg_parser;
1483604326b4SDaniel Borkmann break;
148488759609SCong Wang #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1485604326b4SDaniel Borkmann case BPF_SK_SKB_STREAM_PARSER:
1486748cd572SDi Zhu *pprog = &progs->stream_parser;
1487604326b4SDaniel Borkmann break;
148888759609SCong Wang #endif
1489604326b4SDaniel Borkmann case BPF_SK_SKB_STREAM_VERDICT:
1490a7ba4558SCong Wang if (progs->skb_verdict)
1491a7ba4558SCong Wang return -EBUSY;
1492748cd572SDi Zhu *pprog = &progs->stream_verdict;
1493604326b4SDaniel Borkmann break;
1494a7ba4558SCong Wang case BPF_SK_SKB_VERDICT:
1495a7ba4558SCong Wang if (progs->stream_verdict)
1496a7ba4558SCong Wang return -EBUSY;
1497748cd572SDi Zhu *pprog = &progs->skb_verdict;
1498a7ba4558SCong Wang break;
1499604326b4SDaniel Borkmann default:
1500604326b4SDaniel Borkmann return -EOPNOTSUPP;
1501604326b4SDaniel Borkmann }
1502604326b4SDaniel Borkmann
1503748cd572SDi Zhu return 0;
1504748cd572SDi Zhu }
1505748cd572SDi Zhu
sock_map_prog_update(struct bpf_map * map,struct bpf_prog * prog,struct bpf_prog * old,u32 which)1506748cd572SDi Zhu static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1507748cd572SDi Zhu struct bpf_prog *old, u32 which)
1508748cd572SDi Zhu {
1509748cd572SDi Zhu struct bpf_prog **pprog;
1510748cd572SDi Zhu int ret;
1511748cd572SDi Zhu
1512748cd572SDi Zhu ret = sock_map_prog_lookup(map, &pprog, which);
1513748cd572SDi Zhu if (ret)
1514748cd572SDi Zhu return ret;
1515748cd572SDi Zhu
1516bb0de313SLorenz Bauer if (old)
1517bb0de313SLorenz Bauer return psock_replace_prog(pprog, prog, old);
1518bb0de313SLorenz Bauer
1519bb0de313SLorenz Bauer psock_set_prog(pprog, prog);
1520604326b4SDaniel Borkmann return 0;
1521604326b4SDaniel Borkmann }
1522604326b4SDaniel Borkmann
sock_map_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)1523748cd572SDi Zhu int sock_map_bpf_prog_query(const union bpf_attr *attr,
1524748cd572SDi Zhu union bpf_attr __user *uattr)
1525748cd572SDi Zhu {
1526748cd572SDi Zhu __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1527748cd572SDi Zhu u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd;
1528748cd572SDi Zhu struct bpf_prog **pprog;
1529748cd572SDi Zhu struct bpf_prog *prog;
1530748cd572SDi Zhu struct bpf_map *map;
1531748cd572SDi Zhu struct fd f;
1532748cd572SDi Zhu u32 id = 0;
1533748cd572SDi Zhu int ret;
1534748cd572SDi Zhu
1535748cd572SDi Zhu if (attr->query.query_flags)
1536748cd572SDi Zhu return -EINVAL;
1537748cd572SDi Zhu
1538748cd572SDi Zhu f = fdget(ufd);
1539748cd572SDi Zhu map = __bpf_map_get(f);
1540748cd572SDi Zhu if (IS_ERR(map))
1541748cd572SDi Zhu return PTR_ERR(map);
1542748cd572SDi Zhu
1543748cd572SDi Zhu rcu_read_lock();
1544748cd572SDi Zhu
1545748cd572SDi Zhu ret = sock_map_prog_lookup(map, &pprog, attr->query.attach_type);
1546748cd572SDi Zhu if (ret)
1547748cd572SDi Zhu goto end;
1548748cd572SDi Zhu
1549748cd572SDi Zhu prog = *pprog;
1550748cd572SDi Zhu prog_cnt = !prog ? 0 : 1;
1551748cd572SDi Zhu
1552748cd572SDi Zhu if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
1553748cd572SDi Zhu goto end;
1554748cd572SDi Zhu
1555748cd572SDi Zhu /* we do not hold the refcnt, the bpf prog may be released
1556748cd572SDi Zhu * asynchronously and the id would be set to 0.
1557748cd572SDi Zhu */
1558748cd572SDi Zhu id = data_race(prog->aux->id);
1559748cd572SDi Zhu if (id == 0)
1560748cd572SDi Zhu prog_cnt = 0;
1561748cd572SDi Zhu
1562748cd572SDi Zhu end:
1563748cd572SDi Zhu rcu_read_unlock();
1564748cd572SDi Zhu
1565748cd572SDi Zhu if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) ||
1566748cd572SDi Zhu (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) ||
1567748cd572SDi Zhu copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
1568748cd572SDi Zhu ret = -EFAULT;
1569748cd572SDi Zhu
1570748cd572SDi Zhu fdput(f);
1571748cd572SDi Zhu return ret;
1572748cd572SDi Zhu }
1573748cd572SDi Zhu
sock_map_unlink(struct sock * sk,struct sk_psock_link * link)1574f747632bSLorenz Bauer static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1575604326b4SDaniel Borkmann {
1576604326b4SDaniel Borkmann switch (link->map->map_type) {
1577604326b4SDaniel Borkmann case BPF_MAP_TYPE_SOCKMAP:
1578604326b4SDaniel Borkmann return sock_map_delete_from_link(link->map, sk,
1579604326b4SDaniel Borkmann link->link_raw);
1580604326b4SDaniel Borkmann case BPF_MAP_TYPE_SOCKHASH:
1581604326b4SDaniel Borkmann return sock_hash_delete_from_link(link->map, sk,
1582604326b4SDaniel Borkmann link->link_raw);
1583604326b4SDaniel Borkmann default:
1584604326b4SDaniel Borkmann break;
1585604326b4SDaniel Borkmann }
1586604326b4SDaniel Borkmann }
1587f747632bSLorenz Bauer
sock_map_remove_links(struct sock * sk,struct sk_psock * psock)1588f747632bSLorenz Bauer static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1589f747632bSLorenz Bauer {
1590f747632bSLorenz Bauer struct sk_psock_link *link;
1591f747632bSLorenz Bauer
1592f747632bSLorenz Bauer while ((link = sk_psock_link_pop(psock))) {
1593f747632bSLorenz Bauer sock_map_unlink(sk, link);
1594f747632bSLorenz Bauer sk_psock_free_link(link);
1595f747632bSLorenz Bauer }
1596f747632bSLorenz Bauer }
1597f747632bSLorenz Bauer
sock_map_unhash(struct sock * sk)1598f747632bSLorenz Bauer void sock_map_unhash(struct sock *sk)
1599f747632bSLorenz Bauer {
1600f747632bSLorenz Bauer void (*saved_unhash)(struct sock *sk);
1601f747632bSLorenz Bauer struct sk_psock *psock;
1602f747632bSLorenz Bauer
1603f747632bSLorenz Bauer rcu_read_lock();
1604f747632bSLorenz Bauer psock = sk_psock(sk);
1605f747632bSLorenz Bauer if (unlikely(!psock)) {
1606f747632bSLorenz Bauer rcu_read_unlock();
16075b4a79baSJakub Sitnicki saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
16085b4a79baSJakub Sitnicki } else {
1609f747632bSLorenz Bauer saved_unhash = psock->saved_unhash;
1610f747632bSLorenz Bauer sock_map_remove_links(sk, psock);
1611f747632bSLorenz Bauer rcu_read_unlock();
16125b4a79baSJakub Sitnicki }
16135b4a79baSJakub Sitnicki if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
16145b4a79baSJakub Sitnicki return;
16155b4a79baSJakub Sitnicki if (saved_unhash)
1616f747632bSLorenz Bauer saved_unhash(sk);
1617f747632bSLorenz Bauer }
161894531cfcSJiang Wang EXPORT_SYMBOL_GPL(sock_map_unhash);
1619f747632bSLorenz Bauer
sock_map_destroy(struct sock * sk)1620d8616ee2SWang Yufen void sock_map_destroy(struct sock *sk)
1621d8616ee2SWang Yufen {
1622d8616ee2SWang Yufen void (*saved_destroy)(struct sock *sk);
1623d8616ee2SWang Yufen struct sk_psock *psock;
1624d8616ee2SWang Yufen
1625d8616ee2SWang Yufen rcu_read_lock();
1626d8616ee2SWang Yufen psock = sk_psock_get(sk);
1627d8616ee2SWang Yufen if (unlikely(!psock)) {
1628d8616ee2SWang Yufen rcu_read_unlock();
16295b4a79baSJakub Sitnicki saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
16305b4a79baSJakub Sitnicki } else {
1631d8616ee2SWang Yufen saved_destroy = psock->saved_destroy;
1632d8616ee2SWang Yufen sock_map_remove_links(sk, psock);
1633d8616ee2SWang Yufen rcu_read_unlock();
16348bbabb3fSCong Wang sk_psock_stop(psock);
1635d8616ee2SWang Yufen sk_psock_put(sk, psock);
16365b4a79baSJakub Sitnicki }
16375b4a79baSJakub Sitnicki if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
16385b4a79baSJakub Sitnicki return;
16395b4a79baSJakub Sitnicki if (saved_destroy)
1640d8616ee2SWang Yufen saved_destroy(sk);
1641d8616ee2SWang Yufen }
1642d8616ee2SWang Yufen EXPORT_SYMBOL_GPL(sock_map_destroy);
1643d8616ee2SWang Yufen
sock_map_close(struct sock * sk,long timeout)1644f747632bSLorenz Bauer void sock_map_close(struct sock *sk, long timeout)
1645f747632bSLorenz Bauer {
1646f747632bSLorenz Bauer void (*saved_close)(struct sock *sk, long timeout);
1647f747632bSLorenz Bauer struct sk_psock *psock;
1648f747632bSLorenz Bauer
1649f747632bSLorenz Bauer lock_sock(sk);
1650f747632bSLorenz Bauer rcu_read_lock();
1651e9464284SThadeu Lima de Souza Cascardo psock = sk_psock(sk);
1652e9464284SThadeu Lima de Souza Cascardo if (likely(psock)) {
1653f747632bSLorenz Bauer saved_close = psock->saved_close;
1654f747632bSLorenz Bauer sock_map_remove_links(sk, psock);
1655e9464284SThadeu Lima de Souza Cascardo psock = sk_psock_get(sk);
1656e9464284SThadeu Lima de Souza Cascardo if (unlikely(!psock))
1657e9464284SThadeu Lima de Souza Cascardo goto no_psock;
1658f747632bSLorenz Bauer rcu_read_unlock();
16598bbabb3fSCong Wang sk_psock_stop(psock);
1660f747632bSLorenz Bauer release_sock(sk);
166129173d07SJohn Fastabend cancel_delayed_work_sync(&psock->work);
16628bbabb3fSCong Wang sk_psock_put(sk, psock);
1663e9464284SThadeu Lima de Souza Cascardo } else {
1664e9464284SThadeu Lima de Souza Cascardo saved_close = READ_ONCE(sk->sk_prot)->close;
1665e9464284SThadeu Lima de Souza Cascardo no_psock:
1666e9464284SThadeu Lima de Souza Cascardo rcu_read_unlock();
1667e9464284SThadeu Lima de Souza Cascardo release_sock(sk);
16685b4a79baSJakub Sitnicki }
166929173d07SJohn Fastabend
16705b4a79baSJakub Sitnicki /* Make sure we do not recurse. This is a bug.
16715b4a79baSJakub Sitnicki * Leak the socket instead of crashing on a stack overflow.
16725b4a79baSJakub Sitnicki */
16735b4a79baSJakub Sitnicki if (WARN_ON_ONCE(saved_close == sock_map_close))
16745b4a79baSJakub Sitnicki return;
1675f747632bSLorenz Bauer saved_close(sk, timeout);
1676f747632bSLorenz Bauer }
1677c6382918SCong Wang EXPORT_SYMBOL_GPL(sock_map_close);
167803653515SLorenz Bauer
sock_map_iter_attach_target(struct bpf_prog * prog,union bpf_iter_link_info * linfo,struct bpf_iter_aux_info * aux)167903653515SLorenz Bauer static int sock_map_iter_attach_target(struct bpf_prog *prog,
168003653515SLorenz Bauer union bpf_iter_link_info *linfo,
168103653515SLorenz Bauer struct bpf_iter_aux_info *aux)
168203653515SLorenz Bauer {
168303653515SLorenz Bauer struct bpf_map *map;
168403653515SLorenz Bauer int err = -EINVAL;
168503653515SLorenz Bauer
168603653515SLorenz Bauer if (!linfo->map.map_fd)
168703653515SLorenz Bauer return -EBADF;
168803653515SLorenz Bauer
168903653515SLorenz Bauer map = bpf_map_get_with_uref(linfo->map.map_fd);
169003653515SLorenz Bauer if (IS_ERR(map))
169103653515SLorenz Bauer return PTR_ERR(map);
169203653515SLorenz Bauer
169303653515SLorenz Bauer if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
169403653515SLorenz Bauer map->map_type != BPF_MAP_TYPE_SOCKHASH)
169503653515SLorenz Bauer goto put_map;
169603653515SLorenz Bauer
169703653515SLorenz Bauer if (prog->aux->max_rdonly_access > map->key_size) {
169803653515SLorenz Bauer err = -EACCES;
169903653515SLorenz Bauer goto put_map;
170003653515SLorenz Bauer }
170103653515SLorenz Bauer
170203653515SLorenz Bauer aux->map = map;
170303653515SLorenz Bauer return 0;
170403653515SLorenz Bauer
170503653515SLorenz Bauer put_map:
170603653515SLorenz Bauer bpf_map_put_with_uref(map);
170703653515SLorenz Bauer return err;
170803653515SLorenz Bauer }
170903653515SLorenz Bauer
sock_map_iter_detach_target(struct bpf_iter_aux_info * aux)171003653515SLorenz Bauer static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
171103653515SLorenz Bauer {
171203653515SLorenz Bauer bpf_map_put_with_uref(aux->map);
171303653515SLorenz Bauer }
171403653515SLorenz Bauer
171503653515SLorenz Bauer static struct bpf_iter_reg sock_map_iter_reg = {
171603653515SLorenz Bauer .target = "sockmap",
171703653515SLorenz Bauer .attach_target = sock_map_iter_attach_target,
171803653515SLorenz Bauer .detach_target = sock_map_iter_detach_target,
171903653515SLorenz Bauer .show_fdinfo = bpf_iter_map_show_fdinfo,
172003653515SLorenz Bauer .fill_link_info = bpf_iter_map_fill_link_info,
172103653515SLorenz Bauer .ctx_arg_info_size = 2,
172203653515SLorenz Bauer .ctx_arg_info = {
172303653515SLorenz Bauer { offsetof(struct bpf_iter__sockmap, key),
172420b2aff4SHao Luo PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
172503653515SLorenz Bauer { offsetof(struct bpf_iter__sockmap, sk),
172603653515SLorenz Bauer PTR_TO_BTF_ID_OR_NULL },
172703653515SLorenz Bauer },
172803653515SLorenz Bauer };
172903653515SLorenz Bauer
bpf_sockmap_iter_init(void)173003653515SLorenz Bauer static int __init bpf_sockmap_iter_init(void)
173103653515SLorenz Bauer {
173203653515SLorenz Bauer sock_map_iter_reg.ctx_arg_info[1].btf_id =
173303653515SLorenz Bauer btf_sock_ids[BTF_SOCK_TYPE_SOCK];
173403653515SLorenz Bauer return bpf_iter_reg_target(&sock_map_iter_reg);
173503653515SLorenz Bauer }
173603653515SLorenz Bauer late_initcall(bpf_sockmap_iter_init);
1737