1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #ifndef _LINUX_SKMSG_H 5 #define _LINUX_SKMSG_H 6 7 #include <linux/bpf.h> 8 #include <linux/filter.h> 9 #include <linux/scatterlist.h> 10 #include <linux/skbuff.h> 11 12 #include <net/sock.h> 13 #include <net/tcp.h> 14 #include <net/strparser.h> 15 16 #define MAX_MSG_FRAGS MAX_SKB_FRAGS 17 #define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1) 18 19 enum __sk_action { 20 __SK_DROP = 0, 21 __SK_PASS, 22 __SK_REDIRECT, 23 __SK_NONE, 24 }; 25 26 struct sk_msg_sg { 27 u32 start; 28 u32 curr; 29 u32 end; 30 u32 size; 31 u32 copybreak; 32 DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2); 33 /* The extra two elements: 34 * 1) used for chaining the front and sections when the list becomes 35 * partitioned (e.g. end < start). The crypto APIs require the 36 * chaining; 37 * 2) to chain tailer SG entries after the message. 38 */ 39 struct scatterlist data[MAX_MSG_FRAGS + 2]; 40 }; 41 42 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */ 43 struct sk_msg { 44 struct sk_msg_sg sg; 45 void *data; 46 void *data_end; 47 u32 apply_bytes; 48 u32 cork_bytes; 49 u32 flags; 50 struct sk_buff *skb; 51 struct sock *sk_redir; 52 struct sock *sk; 53 struct list_head list; 54 }; 55 56 struct sk_psock_progs { 57 struct bpf_prog *msg_parser; 58 struct bpf_prog *stream_parser; 59 struct bpf_prog *stream_verdict; 60 struct bpf_prog *skb_verdict; 61 }; 62 63 enum sk_psock_state_bits { 64 SK_PSOCK_TX_ENABLED, 65 SK_PSOCK_RX_STRP_ENABLED, 66 }; 67 68 struct sk_psock_link { 69 struct list_head list; 70 struct bpf_map *map; 71 void *link_raw; 72 }; 73 74 struct sk_psock_work_state { 75 u32 len; 76 u32 off; 77 }; 78 79 struct sk_psock { 80 struct sock *sk; 81 struct sock *sk_redir; 82 u32 apply_bytes; 83 u32 cork_bytes; 84 u32 eval; 85 bool redir_ingress; /* undefined if sk_redir is null */ 86 struct sk_msg *cork; 87 struct sk_psock_progs progs; 88 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 89 struct strparser strp; 90 #endif 91 struct sk_buff_head ingress_skb; 92 struct list_head ingress_msg; 93 spinlock_t ingress_lock; 94 unsigned long state; 95 struct list_head link; 96 spinlock_t link_lock; 97 refcount_t refcnt; 98 void (*saved_unhash)(struct sock *sk); 99 void (*saved_destroy)(struct sock *sk); 100 void (*saved_close)(struct sock *sk, long timeout); 101 void (*saved_write_space)(struct sock *sk); 102 void (*saved_data_ready)(struct sock *sk); 103 int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock, 104 bool restore); 105 struct proto *sk_proto; 106 struct mutex work_mutex; 107 struct sk_psock_work_state work_state; 108 struct delayed_work work; 109 struct sock *sk_pair; 110 struct rcu_work rwork; 111 }; 112 113 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, 114 int elem_first_coalesce); 115 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, 116 u32 off, u32 len); 117 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len); 118 int sk_msg_free(struct sock *sk, struct sk_msg *msg); 119 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg); 120 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes); 121 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, 122 u32 bytes); 123 124 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes); 125 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes); 126 127 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, 128 struct sk_msg *msg, u32 bytes); 129 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, 130 struct sk_msg *msg, u32 bytes); 131 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, 132 int len, int flags); 133 bool sk_msg_is_readable(struct sock *sk); 134 135 static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) 136 { 137 WARN_ON(i == msg->sg.end && bytes); 138 } 139 140 static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) 141 { 142 if (psock->apply_bytes) { 143 if (psock->apply_bytes < bytes) 144 psock->apply_bytes = 0; 145 else 146 psock->apply_bytes -= bytes; 147 } 148 } 149 150 static inline u32 sk_msg_iter_dist(u32 start, u32 end) 151 { 152 return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start); 153 } 154 155 #define sk_msg_iter_var_prev(var) \ 156 do { \ 157 if (var == 0) \ 158 var = NR_MSG_FRAG_IDS - 1; \ 159 else \ 160 var--; \ 161 } while (0) 162 163 #define sk_msg_iter_var_next(var) \ 164 do { \ 165 var++; \ 166 if (var == NR_MSG_FRAG_IDS) \ 167 var = 0; \ 168 } while (0) 169 170 #define sk_msg_iter_prev(msg, which) \ 171 sk_msg_iter_var_prev(msg->sg.which) 172 173 #define sk_msg_iter_next(msg, which) \ 174 sk_msg_iter_var_next(msg->sg.which) 175 176 static inline void sk_msg_init(struct sk_msg *msg) 177 { 178 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); 179 memset(msg, 0, sizeof(*msg)); 180 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); 181 } 182 183 static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, 184 int which, u32 size) 185 { 186 dst->sg.data[which] = src->sg.data[which]; 187 dst->sg.data[which].length = size; 188 dst->sg.size += size; 189 src->sg.size -= size; 190 src->sg.data[which].length -= size; 191 src->sg.data[which].offset += size; 192 } 193 194 static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) 195 { 196 memcpy(dst, src, sizeof(*src)); 197 sk_msg_init(src); 198 } 199 200 static inline bool sk_msg_full(const struct sk_msg *msg) 201 { 202 return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS; 203 } 204 205 static inline u32 sk_msg_elem_used(const struct sk_msg *msg) 206 { 207 return sk_msg_iter_dist(msg->sg.start, msg->sg.end); 208 } 209 210 static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) 211 { 212 return &msg->sg.data[which]; 213 } 214 215 static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which) 216 { 217 return msg->sg.data[which]; 218 } 219 220 static inline struct page *sk_msg_page(struct sk_msg *msg, int which) 221 { 222 return sg_page(sk_msg_elem(msg, which)); 223 } 224 225 static inline bool sk_msg_to_ingress(const struct sk_msg *msg) 226 { 227 return msg->flags & BPF_F_INGRESS; 228 } 229 230 static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) 231 { 232 struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); 233 234 if (test_bit(msg->sg.start, msg->sg.copy)) { 235 msg->data = NULL; 236 msg->data_end = NULL; 237 } else { 238 msg->data = sg_virt(sge); 239 msg->data_end = msg->data + sge->length; 240 } 241 } 242 243 static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, 244 u32 len, u32 offset) 245 { 246 struct scatterlist *sge; 247 248 get_page(page); 249 sge = sk_msg_elem(msg, msg->sg.end); 250 sg_set_page(sge, page, len, offset); 251 sg_unmark_end(sge); 252 253 __set_bit(msg->sg.end, msg->sg.copy); 254 msg->sg.size += len; 255 sk_msg_iter_next(msg, end); 256 } 257 258 static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) 259 { 260 do { 261 if (copy_state) 262 __set_bit(i, msg->sg.copy); 263 else 264 __clear_bit(i, msg->sg.copy); 265 sk_msg_iter_var_next(i); 266 if (i == msg->sg.end) 267 break; 268 } while (1); 269 } 270 271 static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start) 272 { 273 sk_msg_sg_copy(msg, start, true); 274 } 275 276 static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) 277 { 278 sk_msg_sg_copy(msg, start, false); 279 } 280 281 static inline struct sk_psock *sk_psock(const struct sock *sk) 282 { 283 return __rcu_dereference_sk_user_data_with_flags(sk, 284 SK_USER_DATA_PSOCK); 285 } 286 287 static inline void sk_psock_set_state(struct sk_psock *psock, 288 enum sk_psock_state_bits bit) 289 { 290 set_bit(bit, &psock->state); 291 } 292 293 static inline void sk_psock_clear_state(struct sk_psock *psock, 294 enum sk_psock_state_bits bit) 295 { 296 clear_bit(bit, &psock->state); 297 } 298 299 static inline bool sk_psock_test_state(const struct sk_psock *psock, 300 enum sk_psock_state_bits bit) 301 { 302 return test_bit(bit, &psock->state); 303 } 304 305 static inline void sock_drop(struct sock *sk, struct sk_buff *skb) 306 { 307 sk_drops_add(sk, skb); 308 kfree_skb(skb); 309 } 310 311 static inline void sk_psock_queue_msg(struct sk_psock *psock, 312 struct sk_msg *msg) 313 { 314 spin_lock_bh(&psock->ingress_lock); 315 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) 316 list_add_tail(&msg->list, &psock->ingress_msg); 317 else { 318 sk_msg_free(psock->sk, msg); 319 kfree(msg); 320 } 321 spin_unlock_bh(&psock->ingress_lock); 322 } 323 324 static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) 325 { 326 struct sk_msg *msg; 327 328 spin_lock_bh(&psock->ingress_lock); 329 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); 330 if (msg) 331 list_del(&msg->list); 332 spin_unlock_bh(&psock->ingress_lock); 333 return msg; 334 } 335 336 static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock) 337 { 338 struct sk_msg *msg; 339 340 spin_lock_bh(&psock->ingress_lock); 341 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); 342 spin_unlock_bh(&psock->ingress_lock); 343 return msg; 344 } 345 346 static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock, 347 struct sk_msg *msg) 348 { 349 struct sk_msg *ret; 350 351 spin_lock_bh(&psock->ingress_lock); 352 if (list_is_last(&msg->list, &psock->ingress_msg)) 353 ret = NULL; 354 else 355 ret = list_next_entry(msg, list); 356 spin_unlock_bh(&psock->ingress_lock); 357 return ret; 358 } 359 360 static inline bool sk_psock_queue_empty(const struct sk_psock *psock) 361 { 362 return psock ? list_empty(&psock->ingress_msg) : true; 363 } 364 365 static inline void kfree_sk_msg(struct sk_msg *msg) 366 { 367 if (msg->skb) 368 consume_skb(msg->skb); 369 kfree(msg); 370 } 371 372 static inline void sk_psock_report_error(struct sk_psock *psock, int err) 373 { 374 struct sock *sk = psock->sk; 375 376 sk->sk_err = err; 377 sk_error_report(sk); 378 } 379 380 struct sk_psock *sk_psock_init(struct sock *sk, int node); 381 void sk_psock_stop(struct sk_psock *psock); 382 383 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 384 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); 385 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); 386 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); 387 #else 388 static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) 389 { 390 return -EOPNOTSUPP; 391 } 392 393 static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) 394 { 395 } 396 397 static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) 398 { 399 } 400 #endif 401 402 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock); 403 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock); 404 405 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, 406 struct sk_msg *msg); 407 408 static inline struct sk_psock_link *sk_psock_init_link(void) 409 { 410 return kzalloc(sizeof(struct sk_psock_link), 411 GFP_ATOMIC | __GFP_NOWARN); 412 } 413 414 static inline void sk_psock_free_link(struct sk_psock_link *link) 415 { 416 kfree(link); 417 } 418 419 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); 420 421 static inline void sk_psock_cork_free(struct sk_psock *psock) 422 { 423 if (psock->cork) { 424 sk_msg_free(psock->sk, psock->cork); 425 kfree(psock->cork); 426 psock->cork = NULL; 427 } 428 } 429 430 static inline void sk_psock_restore_proto(struct sock *sk, 431 struct sk_psock *psock) 432 { 433 if (psock->psock_update_sk_prot) 434 psock->psock_update_sk_prot(sk, psock, true); 435 } 436 437 static inline struct sk_psock *sk_psock_get(struct sock *sk) 438 { 439 struct sk_psock *psock; 440 441 rcu_read_lock(); 442 psock = sk_psock(sk); 443 if (psock && !refcount_inc_not_zero(&psock->refcnt)) 444 psock = NULL; 445 rcu_read_unlock(); 446 return psock; 447 } 448 449 void sk_psock_drop(struct sock *sk, struct sk_psock *psock); 450 451 static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) 452 { 453 if (refcount_dec_and_test(&psock->refcnt)) 454 sk_psock_drop(sk, psock); 455 } 456 457 static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) 458 { 459 if (psock->saved_data_ready) 460 psock->saved_data_ready(sk); 461 else 462 sk->sk_data_ready(sk); 463 } 464 465 static inline void psock_set_prog(struct bpf_prog **pprog, 466 struct bpf_prog *prog) 467 { 468 prog = xchg(pprog, prog); 469 if (prog) 470 bpf_prog_put(prog); 471 } 472 473 static inline int psock_replace_prog(struct bpf_prog **pprog, 474 struct bpf_prog *prog, 475 struct bpf_prog *old) 476 { 477 if (cmpxchg(pprog, old, prog) != old) 478 return -ENOENT; 479 480 if (old) 481 bpf_prog_put(old); 482 483 return 0; 484 } 485 486 static inline void psock_progs_drop(struct sk_psock_progs *progs) 487 { 488 psock_set_prog(&progs->msg_parser, NULL); 489 psock_set_prog(&progs->stream_parser, NULL); 490 psock_set_prog(&progs->stream_verdict, NULL); 491 psock_set_prog(&progs->skb_verdict, NULL); 492 } 493 494 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb); 495 496 static inline bool sk_psock_strp_enabled(struct sk_psock *psock) 497 { 498 if (!psock) 499 return false; 500 return !!psock->saved_data_ready; 501 } 502 503 #if IS_ENABLED(CONFIG_NET_SOCK_MSG) 504 505 #define BPF_F_STRPARSER (1UL << 1) 506 507 /* We only have two bits so far. */ 508 #define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER) 509 510 static inline bool skb_bpf_strparser(const struct sk_buff *skb) 511 { 512 unsigned long sk_redir = skb->_sk_redir; 513 514 return sk_redir & BPF_F_STRPARSER; 515 } 516 517 static inline void skb_bpf_set_strparser(struct sk_buff *skb) 518 { 519 skb->_sk_redir |= BPF_F_STRPARSER; 520 } 521 522 static inline bool skb_bpf_ingress(const struct sk_buff *skb) 523 { 524 unsigned long sk_redir = skb->_sk_redir; 525 526 return sk_redir & BPF_F_INGRESS; 527 } 528 529 static inline void skb_bpf_set_ingress(struct sk_buff *skb) 530 { 531 skb->_sk_redir |= BPF_F_INGRESS; 532 } 533 534 static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir, 535 bool ingress) 536 { 537 skb->_sk_redir = (unsigned long)sk_redir; 538 if (ingress) 539 skb->_sk_redir |= BPF_F_INGRESS; 540 } 541 542 static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb) 543 { 544 unsigned long sk_redir = skb->_sk_redir; 545 546 return (struct sock *)(sk_redir & BPF_F_PTR_MASK); 547 } 548 549 static inline void skb_bpf_redirect_clear(struct sk_buff *skb) 550 { 551 skb->_sk_redir = 0; 552 } 553 #endif /* CONFIG_NET_SOCK_MSG */ 554 #endif /* _LINUX_SKMSG_H */ 555