1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4 #ifndef _LINUX_SKMSG_H
5 #define _LINUX_SKMSG_H
6
7 #include <linux/bpf.h>
8 #include <linux/filter.h>
9 #include <linux/scatterlist.h>
10 #include <linux/skbuff.h>
11
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <net/strparser.h>
15
16 #define MAX_MSG_FRAGS MAX_SKB_FRAGS
17 #define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1)
18
19 enum __sk_action {
20 __SK_DROP = 0,
21 __SK_PASS,
22 __SK_REDIRECT,
23 __SK_NONE,
24 };
25
26 struct sk_msg_sg {
27 u32 start;
28 u32 curr;
29 u32 end;
30 u32 size;
31 u32 copybreak;
32 DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2);
33 /* The extra two elements:
34 * 1) used for chaining the front and sections when the list becomes
35 * partitioned (e.g. end < start). The crypto APIs require the
36 * chaining;
37 * 2) to chain tailer SG entries after the message.
38 */
39 struct scatterlist data[MAX_MSG_FRAGS + 2];
40 };
41
42 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */
43 struct sk_msg {
44 struct sk_msg_sg sg;
45 void *data;
46 void *data_end;
47 u32 apply_bytes;
48 u32 cork_bytes;
49 u32 flags;
50 struct sk_buff *skb;
51 struct sock *sk_redir;
52 struct sock *sk;
53 struct list_head list;
54 };
55
56 struct sk_psock_progs {
57 struct bpf_prog *msg_parser;
58 struct bpf_prog *stream_parser;
59 struct bpf_prog *stream_verdict;
60 struct bpf_prog *skb_verdict;
61 };
62
63 enum sk_psock_state_bits {
64 SK_PSOCK_TX_ENABLED,
65 SK_PSOCK_RX_STRP_ENABLED,
66 };
67
68 struct sk_psock_link {
69 struct list_head list;
70 struct bpf_map *map;
71 void *link_raw;
72 };
73
74 struct sk_psock_work_state {
75 u32 len;
76 u32 off;
77 };
78
79 struct sk_psock {
80 struct sock *sk;
81 struct sock *sk_redir;
82 u32 apply_bytes;
83 u32 cork_bytes;
84 u32 eval;
85 bool redir_ingress; /* undefined if sk_redir is null */
86 struct sk_msg *cork;
87 struct sk_psock_progs progs;
88 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
89 struct strparser strp;
90 u32 copied_seq;
91 u32 ingress_bytes;
92 #endif
93 struct sk_buff_head ingress_skb;
94 struct list_head ingress_msg;
95 spinlock_t ingress_lock;
96 unsigned long state;
97 struct list_head link;
98 spinlock_t link_lock;
99 refcount_t refcnt;
100 void (*saved_unhash)(struct sock *sk);
101 void (*saved_destroy)(struct sock *sk);
102 void (*saved_close)(struct sock *sk, long timeout);
103 void (*saved_write_space)(struct sock *sk);
104 void (*saved_data_ready)(struct sock *sk);
105 int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
106 bool restore);
107 struct proto *sk_proto;
108 struct mutex work_mutex;
109 struct sk_psock_work_state work_state;
110 struct delayed_work work;
111 struct sock *sk_pair;
112 struct rcu_work rwork;
113 };
114
115 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
116 int elem_first_coalesce);
117 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
118 u32 off, u32 len);
119 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
120 int sk_msg_free(struct sock *sk, struct sk_msg *msg);
121 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
122 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
123 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
124 u32 bytes);
125
126 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
127 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
128
129 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
130 struct sk_msg *msg, u32 bytes);
131 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
132 struct sk_msg *msg, u32 bytes);
133 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
134 int len, int flags);
135 bool sk_msg_is_readable(struct sock *sk);
136
sk_msg_check_to_free(struct sk_msg * msg,u32 i,u32 bytes)137 static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
138 {
139 WARN_ON(i == msg->sg.end && bytes);
140 }
141
sk_msg_apply_bytes(struct sk_psock * psock,u32 bytes)142 static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
143 {
144 if (psock->apply_bytes) {
145 if (psock->apply_bytes < bytes)
146 psock->apply_bytes = 0;
147 else
148 psock->apply_bytes -= bytes;
149 }
150 }
151
sk_msg_iter_dist(u32 start,u32 end)152 static inline u32 sk_msg_iter_dist(u32 start, u32 end)
153 {
154 return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
155 }
156
157 #define sk_msg_iter_var_prev(var) \
158 do { \
159 if (var == 0) \
160 var = NR_MSG_FRAG_IDS - 1; \
161 else \
162 var--; \
163 } while (0)
164
165 #define sk_msg_iter_var_next(var) \
166 do { \
167 var++; \
168 if (var == NR_MSG_FRAG_IDS) \
169 var = 0; \
170 } while (0)
171
172 #define sk_msg_iter_prev(msg, which) \
173 sk_msg_iter_var_prev(msg->sg.which)
174
175 #define sk_msg_iter_next(msg, which) \
176 sk_msg_iter_var_next(msg->sg.which)
177
sk_msg_init(struct sk_msg * msg)178 static inline void sk_msg_init(struct sk_msg *msg)
179 {
180 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
181 memset(msg, 0, sizeof(*msg));
182 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
183 }
184
sk_msg_xfer(struct sk_msg * dst,struct sk_msg * src,int which,u32 size)185 static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
186 int which, u32 size)
187 {
188 dst->sg.data[which] = src->sg.data[which];
189 dst->sg.data[which].length = size;
190 dst->sg.size += size;
191 src->sg.size -= size;
192 src->sg.data[which].length -= size;
193 src->sg.data[which].offset += size;
194 }
195
sk_msg_xfer_full(struct sk_msg * dst,struct sk_msg * src)196 static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
197 {
198 memcpy(dst, src, sizeof(*src));
199 sk_msg_init(src);
200 }
201
sk_msg_full(const struct sk_msg * msg)202 static inline bool sk_msg_full(const struct sk_msg *msg)
203 {
204 return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
205 }
206
sk_msg_elem_used(const struct sk_msg * msg)207 static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
208 {
209 return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
210 }
211
sk_msg_elem(struct sk_msg * msg,int which)212 static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
213 {
214 return &msg->sg.data[which];
215 }
216
sk_msg_elem_cpy(struct sk_msg * msg,int which)217 static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which)
218 {
219 return msg->sg.data[which];
220 }
221
sk_msg_page(struct sk_msg * msg,int which)222 static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
223 {
224 return sg_page(sk_msg_elem(msg, which));
225 }
226
sk_msg_to_ingress(const struct sk_msg * msg)227 static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
228 {
229 return msg->flags & BPF_F_INGRESS;
230 }
231
sk_msg_compute_data_pointers(struct sk_msg * msg)232 static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
233 {
234 struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
235
236 if (test_bit(msg->sg.start, msg->sg.copy)) {
237 msg->data = NULL;
238 msg->data_end = NULL;
239 } else {
240 msg->data = sg_virt(sge);
241 msg->data_end = msg->data + sge->length;
242 }
243 }
244
sk_msg_page_add(struct sk_msg * msg,struct page * page,u32 len,u32 offset)245 static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
246 u32 len, u32 offset)
247 {
248 struct scatterlist *sge;
249
250 get_page(page);
251 sge = sk_msg_elem(msg, msg->sg.end);
252 sg_set_page(sge, page, len, offset);
253 sg_unmark_end(sge);
254
255 __set_bit(msg->sg.end, msg->sg.copy);
256 msg->sg.size += len;
257 sk_msg_iter_next(msg, end);
258 }
259
sk_msg_sg_copy(struct sk_msg * msg,u32 i,bool copy_state)260 static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
261 {
262 do {
263 if (copy_state)
264 __set_bit(i, msg->sg.copy);
265 else
266 __clear_bit(i, msg->sg.copy);
267 sk_msg_iter_var_next(i);
268 if (i == msg->sg.end)
269 break;
270 } while (1);
271 }
272
sk_msg_sg_copy_set(struct sk_msg * msg,u32 start)273 static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start)
274 {
275 sk_msg_sg_copy(msg, start, true);
276 }
277
sk_msg_sg_copy_clear(struct sk_msg * msg,u32 start)278 static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
279 {
280 sk_msg_sg_copy(msg, start, false);
281 }
282
sk_psock(const struct sock * sk)283 static inline struct sk_psock *sk_psock(const struct sock *sk)
284 {
285 return __rcu_dereference_sk_user_data_with_flags(sk,
286 SK_USER_DATA_PSOCK);
287 }
288
sk_psock_set_state(struct sk_psock * psock,enum sk_psock_state_bits bit)289 static inline void sk_psock_set_state(struct sk_psock *psock,
290 enum sk_psock_state_bits bit)
291 {
292 set_bit(bit, &psock->state);
293 }
294
sk_psock_clear_state(struct sk_psock * psock,enum sk_psock_state_bits bit)295 static inline void sk_psock_clear_state(struct sk_psock *psock,
296 enum sk_psock_state_bits bit)
297 {
298 clear_bit(bit, &psock->state);
299 }
300
sk_psock_test_state(const struct sk_psock * psock,enum sk_psock_state_bits bit)301 static inline bool sk_psock_test_state(const struct sk_psock *psock,
302 enum sk_psock_state_bits bit)
303 {
304 return test_bit(bit, &psock->state);
305 }
306
sock_drop(struct sock * sk,struct sk_buff * skb)307 static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
308 {
309 sk_drops_add(sk, skb);
310 kfree_skb(skb);
311 }
312
sk_psock_queue_msg(struct sk_psock * psock,struct sk_msg * msg)313 static inline bool sk_psock_queue_msg(struct sk_psock *psock,
314 struct sk_msg *msg)
315 {
316 bool ret;
317
318 spin_lock_bh(&psock->ingress_lock);
319 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
320 list_add_tail(&msg->list, &psock->ingress_msg);
321 ret = true;
322 } else {
323 sk_msg_free(psock->sk, msg);
324 kfree(msg);
325 ret = false;
326 }
327 spin_unlock_bh(&psock->ingress_lock);
328 return ret;
329 }
330
sk_psock_dequeue_msg(struct sk_psock * psock)331 static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
332 {
333 struct sk_msg *msg;
334
335 spin_lock_bh(&psock->ingress_lock);
336 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
337 if (msg)
338 list_del(&msg->list);
339 spin_unlock_bh(&psock->ingress_lock);
340 return msg;
341 }
342
sk_psock_peek_msg(struct sk_psock * psock)343 static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
344 {
345 struct sk_msg *msg;
346
347 spin_lock_bh(&psock->ingress_lock);
348 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
349 spin_unlock_bh(&psock->ingress_lock);
350 return msg;
351 }
352
sk_psock_next_msg(struct sk_psock * psock,struct sk_msg * msg)353 static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock,
354 struct sk_msg *msg)
355 {
356 struct sk_msg *ret;
357
358 spin_lock_bh(&psock->ingress_lock);
359 if (list_is_last(&msg->list, &psock->ingress_msg))
360 ret = NULL;
361 else
362 ret = list_next_entry(msg, list);
363 spin_unlock_bh(&psock->ingress_lock);
364 return ret;
365 }
366
sk_psock_queue_empty(const struct sk_psock * psock)367 static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
368 {
369 return psock ? list_empty(&psock->ingress_msg) : true;
370 }
371
kfree_sk_msg(struct sk_msg * msg)372 static inline void kfree_sk_msg(struct sk_msg *msg)
373 {
374 if (msg->skb)
375 consume_skb(msg->skb);
376 kfree(msg);
377 }
378
sk_psock_report_error(struct sk_psock * psock,int err)379 static inline void sk_psock_report_error(struct sk_psock *psock, int err)
380 {
381 struct sock *sk = psock->sk;
382
383 sk->sk_err = err;
384 sk_error_report(sk);
385 }
386
387 struct sk_psock *sk_psock_init(struct sock *sk, int node);
388 void sk_psock_stop(struct sk_psock *psock);
389
390 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
391 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
392 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
393 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
394 #else
sk_psock_init_strp(struct sock * sk,struct sk_psock * psock)395 static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
396 {
397 return -EOPNOTSUPP;
398 }
399
sk_psock_start_strp(struct sock * sk,struct sk_psock * psock)400 static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
401 {
402 }
403
sk_psock_stop_strp(struct sock * sk,struct sk_psock * psock)404 static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
405 {
406 }
407 #endif
408
409 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock);
410 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
411
412 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
413 struct sk_msg *msg);
414
sk_psock_init_link(void)415 static inline struct sk_psock_link *sk_psock_init_link(void)
416 {
417 return kzalloc(sizeof(struct sk_psock_link),
418 GFP_ATOMIC | __GFP_NOWARN);
419 }
420
sk_psock_free_link(struct sk_psock_link * link)421 static inline void sk_psock_free_link(struct sk_psock_link *link)
422 {
423 kfree(link);
424 }
425
426 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
427
sk_psock_cork_free(struct sk_psock * psock)428 static inline void sk_psock_cork_free(struct sk_psock *psock)
429 {
430 if (psock->cork) {
431 sk_msg_free(psock->sk, psock->cork);
432 kfree(psock->cork);
433 psock->cork = NULL;
434 }
435 }
436
sk_psock_restore_proto(struct sock * sk,struct sk_psock * psock)437 static inline void sk_psock_restore_proto(struct sock *sk,
438 struct sk_psock *psock)
439 {
440 if (psock->psock_update_sk_prot)
441 psock->psock_update_sk_prot(sk, psock, true);
442 }
443
sk_psock_get(struct sock * sk)444 static inline struct sk_psock *sk_psock_get(struct sock *sk)
445 {
446 struct sk_psock *psock;
447
448 rcu_read_lock();
449 psock = sk_psock(sk);
450 if (psock && !refcount_inc_not_zero(&psock->refcnt))
451 psock = NULL;
452 rcu_read_unlock();
453 return psock;
454 }
455
456 void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
457
sk_psock_put(struct sock * sk,struct sk_psock * psock)458 static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
459 {
460 if (refcount_dec_and_test(&psock->refcnt))
461 sk_psock_drop(sk, psock);
462 }
463
sk_psock_data_ready(struct sock * sk,struct sk_psock * psock)464 static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
465 {
466 read_lock_bh(&sk->sk_callback_lock);
467 if (psock->saved_data_ready)
468 psock->saved_data_ready(sk);
469 else
470 sk->sk_data_ready(sk);
471 read_unlock_bh(&sk->sk_callback_lock);
472 }
473
psock_set_prog(struct bpf_prog ** pprog,struct bpf_prog * prog)474 static inline void psock_set_prog(struct bpf_prog **pprog,
475 struct bpf_prog *prog)
476 {
477 prog = xchg(pprog, prog);
478 if (prog)
479 bpf_prog_put(prog);
480 }
481
psock_replace_prog(struct bpf_prog ** pprog,struct bpf_prog * prog,struct bpf_prog * old)482 static inline int psock_replace_prog(struct bpf_prog **pprog,
483 struct bpf_prog *prog,
484 struct bpf_prog *old)
485 {
486 if (cmpxchg(pprog, old, prog) != old)
487 return -ENOENT;
488
489 if (old)
490 bpf_prog_put(old);
491
492 return 0;
493 }
494
psock_progs_drop(struct sk_psock_progs * progs)495 static inline void psock_progs_drop(struct sk_psock_progs *progs)
496 {
497 psock_set_prog(&progs->msg_parser, NULL);
498 psock_set_prog(&progs->stream_parser, NULL);
499 psock_set_prog(&progs->stream_verdict, NULL);
500 psock_set_prog(&progs->skb_verdict, NULL);
501 }
502
503 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb);
504
sk_psock_strp_enabled(struct sk_psock * psock)505 static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
506 {
507 if (!psock)
508 return false;
509 return !!psock->saved_data_ready;
510 }
511
512 #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
513
514 #define BPF_F_STRPARSER (1UL << 1)
515
516 /* We only have two bits so far. */
517 #define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER)
518
skb_bpf_strparser(const struct sk_buff * skb)519 static inline bool skb_bpf_strparser(const struct sk_buff *skb)
520 {
521 unsigned long sk_redir = skb->_sk_redir;
522
523 return sk_redir & BPF_F_STRPARSER;
524 }
525
skb_bpf_set_strparser(struct sk_buff * skb)526 static inline void skb_bpf_set_strparser(struct sk_buff *skb)
527 {
528 skb->_sk_redir |= BPF_F_STRPARSER;
529 }
530
skb_bpf_ingress(const struct sk_buff * skb)531 static inline bool skb_bpf_ingress(const struct sk_buff *skb)
532 {
533 unsigned long sk_redir = skb->_sk_redir;
534
535 return sk_redir & BPF_F_INGRESS;
536 }
537
skb_bpf_set_ingress(struct sk_buff * skb)538 static inline void skb_bpf_set_ingress(struct sk_buff *skb)
539 {
540 skb->_sk_redir |= BPF_F_INGRESS;
541 }
542
skb_bpf_set_redir(struct sk_buff * skb,struct sock * sk_redir,bool ingress)543 static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir,
544 bool ingress)
545 {
546 skb->_sk_redir = (unsigned long)sk_redir;
547 if (ingress)
548 skb->_sk_redir |= BPF_F_INGRESS;
549 }
550
skb_bpf_redirect_fetch(const struct sk_buff * skb)551 static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb)
552 {
553 unsigned long sk_redir = skb->_sk_redir;
554
555 return (struct sock *)(sk_redir & BPF_F_PTR_MASK);
556 }
557
skb_bpf_redirect_clear(struct sk_buff * skb)558 static inline void skb_bpf_redirect_clear(struct sk_buff *skb)
559 {
560 skb->_sk_redir = 0;
561 }
562 #endif /* CONFIG_NET_SOCK_MSG */
563 #endif /* _LINUX_SKMSG_H */
564