1604326b4SDaniel Borkmann // SPDX-License-Identifier: GPL-2.0 2604326b4SDaniel Borkmann /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3604326b4SDaniel Borkmann 4604326b4SDaniel Borkmann #include <linux/skmsg.h> 5604326b4SDaniel Borkmann #include <linux/filter.h> 6604326b4SDaniel Borkmann #include <linux/bpf.h> 7604326b4SDaniel Borkmann #include <linux/init.h> 8604326b4SDaniel Borkmann #include <linux/wait.h> 9604326b4SDaniel Borkmann 10604326b4SDaniel Borkmann #include <net/inet_common.h> 110608c69cSJohn Fastabend #include <net/tls.h> 12604326b4SDaniel Borkmann 13604326b4SDaniel Borkmann static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, 14604326b4SDaniel Borkmann struct sk_msg *msg, u32 apply_bytes, int flags) 15604326b4SDaniel Borkmann { 16604326b4SDaniel Borkmann bool apply = apply_bytes; 17604326b4SDaniel Borkmann struct scatterlist *sge; 18604326b4SDaniel Borkmann u32 size, copied = 0; 19604326b4SDaniel Borkmann struct sk_msg *tmp; 20604326b4SDaniel Borkmann int i, ret = 0; 21604326b4SDaniel Borkmann 22604326b4SDaniel Borkmann tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL); 23604326b4SDaniel Borkmann if (unlikely(!tmp)) 24604326b4SDaniel Borkmann return -ENOMEM; 25604326b4SDaniel Borkmann 26604326b4SDaniel Borkmann lock_sock(sk); 27604326b4SDaniel Borkmann tmp->sg.start = msg->sg.start; 28604326b4SDaniel Borkmann i = msg->sg.start; 29604326b4SDaniel Borkmann do { 30604326b4SDaniel Borkmann sge = sk_msg_elem(msg, i); 31604326b4SDaniel Borkmann size = (apply && apply_bytes < sge->length) ? 32604326b4SDaniel Borkmann apply_bytes : sge->length; 33604326b4SDaniel Borkmann if (!sk_wmem_schedule(sk, size)) { 34604326b4SDaniel Borkmann if (!copied) 35604326b4SDaniel Borkmann ret = -ENOMEM; 36604326b4SDaniel Borkmann break; 37604326b4SDaniel Borkmann } 38604326b4SDaniel Borkmann 39604326b4SDaniel Borkmann sk_mem_charge(sk, size); 40604326b4SDaniel Borkmann sk_msg_xfer(tmp, msg, i, size); 41604326b4SDaniel Borkmann copied += size; 42604326b4SDaniel Borkmann if (sge->length) 43604326b4SDaniel Borkmann get_page(sk_msg_page(tmp, i)); 44604326b4SDaniel Borkmann sk_msg_iter_var_next(i); 45604326b4SDaniel Borkmann tmp->sg.end = i; 46604326b4SDaniel Borkmann if (apply) { 47604326b4SDaniel Borkmann apply_bytes -= size; 48604326b4SDaniel Borkmann if (!apply_bytes) 49604326b4SDaniel Borkmann break; 50604326b4SDaniel Borkmann } 51604326b4SDaniel Borkmann } while (i != msg->sg.end); 52604326b4SDaniel Borkmann 53604326b4SDaniel Borkmann if (!ret) { 54604326b4SDaniel Borkmann msg->sg.start = i; 55604326b4SDaniel Borkmann sk_psock_queue_msg(psock, tmp); 56552de910SJohn Fastabend sk_psock_data_ready(sk, psock); 57604326b4SDaniel Borkmann } else { 58604326b4SDaniel Borkmann sk_msg_free(sk, tmp); 59604326b4SDaniel Borkmann kfree(tmp); 60604326b4SDaniel Borkmann } 61604326b4SDaniel Borkmann 62604326b4SDaniel Borkmann release_sock(sk); 63604326b4SDaniel Borkmann return ret; 64604326b4SDaniel Borkmann } 65604326b4SDaniel Borkmann 66604326b4SDaniel Borkmann static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, 67604326b4SDaniel Borkmann int flags, bool uncharge) 68604326b4SDaniel Borkmann { 69604326b4SDaniel Borkmann bool apply = apply_bytes; 70604326b4SDaniel Borkmann struct scatterlist *sge; 71604326b4SDaniel Borkmann struct page *page; 72604326b4SDaniel Borkmann int size, ret = 0; 73604326b4SDaniel Borkmann u32 off; 74604326b4SDaniel Borkmann 75604326b4SDaniel Borkmann while (1) { 760608c69cSJohn Fastabend bool has_tx_ulp; 770608c69cSJohn Fastabend 78604326b4SDaniel Borkmann sge = sk_msg_elem(msg, msg->sg.start); 79604326b4SDaniel Borkmann size = (apply && apply_bytes < sge->length) ? 80604326b4SDaniel Borkmann apply_bytes : sge->length; 81604326b4SDaniel Borkmann off = sge->offset; 82604326b4SDaniel Borkmann page = sg_page(sge); 83604326b4SDaniel Borkmann 84604326b4SDaniel Borkmann tcp_rate_check_app_limited(sk); 85604326b4SDaniel Borkmann retry: 860608c69cSJohn Fastabend has_tx_ulp = tls_sw_has_ctx_tx(sk); 870608c69cSJohn Fastabend if (has_tx_ulp) { 880608c69cSJohn Fastabend flags |= MSG_SENDPAGE_NOPOLICY; 890608c69cSJohn Fastabend ret = kernel_sendpage_locked(sk, 900608c69cSJohn Fastabend page, off, size, flags); 910608c69cSJohn Fastabend } else { 92604326b4SDaniel Borkmann ret = do_tcp_sendpages(sk, page, off, size, flags); 930608c69cSJohn Fastabend } 940608c69cSJohn Fastabend 95604326b4SDaniel Borkmann if (ret <= 0) 96604326b4SDaniel Borkmann return ret; 97604326b4SDaniel Borkmann if (apply) 98604326b4SDaniel Borkmann apply_bytes -= ret; 99604326b4SDaniel Borkmann msg->sg.size -= ret; 100604326b4SDaniel Borkmann sge->offset += ret; 101604326b4SDaniel Borkmann sge->length -= ret; 102604326b4SDaniel Borkmann if (uncharge) 103604326b4SDaniel Borkmann sk_mem_uncharge(sk, ret); 104604326b4SDaniel Borkmann if (ret != size) { 105604326b4SDaniel Borkmann size -= ret; 106604326b4SDaniel Borkmann off += ret; 107604326b4SDaniel Borkmann goto retry; 108604326b4SDaniel Borkmann } 109604326b4SDaniel Borkmann if (!sge->length) { 110604326b4SDaniel Borkmann put_page(page); 111604326b4SDaniel Borkmann sk_msg_iter_next(msg, start); 112604326b4SDaniel Borkmann sg_init_table(sge, 1); 113604326b4SDaniel Borkmann if (msg->sg.start == msg->sg.end) 114604326b4SDaniel Borkmann break; 115604326b4SDaniel Borkmann } 116604326b4SDaniel Borkmann if (apply && !apply_bytes) 117604326b4SDaniel Borkmann break; 118604326b4SDaniel Borkmann } 119604326b4SDaniel Borkmann 120604326b4SDaniel Borkmann return 0; 121604326b4SDaniel Borkmann } 122604326b4SDaniel Borkmann 123604326b4SDaniel Borkmann static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg, 124604326b4SDaniel Borkmann u32 apply_bytes, int flags, bool uncharge) 125604326b4SDaniel Borkmann { 126604326b4SDaniel Borkmann int ret; 127604326b4SDaniel Borkmann 128604326b4SDaniel Borkmann lock_sock(sk); 129604326b4SDaniel Borkmann ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge); 130604326b4SDaniel Borkmann release_sock(sk); 131604326b4SDaniel Borkmann return ret; 132604326b4SDaniel Borkmann } 133604326b4SDaniel Borkmann 134604326b4SDaniel Borkmann int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, 135604326b4SDaniel Borkmann u32 bytes, int flags) 136604326b4SDaniel Borkmann { 137604326b4SDaniel Borkmann bool ingress = sk_msg_to_ingress(msg); 138604326b4SDaniel Borkmann struct sk_psock *psock = sk_psock_get(sk); 139604326b4SDaniel Borkmann int ret; 140604326b4SDaniel Borkmann 141604326b4SDaniel Borkmann if (unlikely(!psock)) { 142604326b4SDaniel Borkmann sk_msg_free(sk, msg); 143604326b4SDaniel Borkmann return 0; 144604326b4SDaniel Borkmann } 145604326b4SDaniel Borkmann ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : 146604326b4SDaniel Borkmann tcp_bpf_push_locked(sk, msg, bytes, flags, false); 147604326b4SDaniel Borkmann sk_psock_put(sk, psock); 148604326b4SDaniel Borkmann return ret; 149604326b4SDaniel Borkmann } 150604326b4SDaniel Borkmann EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); 151604326b4SDaniel Borkmann 15288759609SCong Wang #ifdef CONFIG_BPF_SYSCALL 153a2652798SYueHaibing static bool tcp_bpf_stream_read(const struct sock *sk) 154a2652798SYueHaibing { 155a2652798SYueHaibing struct sk_psock *psock; 156a2652798SYueHaibing bool empty = true; 157a2652798SYueHaibing 158a2652798SYueHaibing rcu_read_lock(); 159a2652798SYueHaibing psock = sk_psock(sk); 160a2652798SYueHaibing if (likely(psock)) 161a2652798SYueHaibing empty = list_empty(&psock->ingress_msg); 162a2652798SYueHaibing rcu_read_unlock(); 163a2652798SYueHaibing return !empty; 164a2652798SYueHaibing } 165a2652798SYueHaibing 166b6df0078SJakub Kicinski static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock, 167b6df0078SJakub Kicinski long timeo) 1689f2470fbSCong Wang { 1699f2470fbSCong Wang DEFINE_WAIT_FUNC(wait, woken_wake_function); 1709f2470fbSCong Wang int ret = 0; 1719f2470fbSCong Wang 1729f2470fbSCong Wang if (sk->sk_shutdown & RCV_SHUTDOWN) 1739f2470fbSCong Wang return 1; 1749f2470fbSCong Wang 1759f2470fbSCong Wang if (!timeo) 1769f2470fbSCong Wang return ret; 1779f2470fbSCong Wang 1789f2470fbSCong Wang add_wait_queue(sk_sleep(sk), &wait); 1799f2470fbSCong Wang sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1809f2470fbSCong Wang ret = sk_wait_event(sk, &timeo, 1819f2470fbSCong Wang !list_empty(&psock->ingress_msg) || 1829f2470fbSCong Wang !skb_queue_empty(&sk->sk_receive_queue), &wait); 1839f2470fbSCong Wang sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1849f2470fbSCong Wang remove_wait_queue(sk_sleep(sk), &wait); 1859f2470fbSCong Wang return ret; 1869f2470fbSCong Wang } 1879f2470fbSCong Wang 188c0fd336eSYueHaibing static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 189c0fd336eSYueHaibing int nonblock, int flags, int *addr_len) 190c0fd336eSYueHaibing { 191c0fd336eSYueHaibing struct sk_psock *psock; 192c0fd336eSYueHaibing int copied, ret; 193c0fd336eSYueHaibing 19418f02ad1SXiyu Yang if (unlikely(flags & MSG_ERRQUEUE)) 19518f02ad1SXiyu Yang return inet_recv_error(sk, msg, len, addr_len); 19618f02ad1SXiyu Yang 197c0fd336eSYueHaibing psock = sk_psock_get(sk); 198c0fd336eSYueHaibing if (unlikely(!psock)) 199c0fd336eSYueHaibing return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); 200c0fd336eSYueHaibing if (!skb_queue_empty(&sk->sk_receive_queue) && 20118f02ad1SXiyu Yang sk_psock_queue_empty(psock)) { 20218f02ad1SXiyu Yang sk_psock_put(sk, psock); 203c0fd336eSYueHaibing return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); 20418f02ad1SXiyu Yang } 205c0fd336eSYueHaibing lock_sock(sk); 206c0fd336eSYueHaibing msg_bytes_ready: 2072bc793e3SCong Wang copied = sk_msg_recvmsg(sk, psock, msg, len, flags); 208c0fd336eSYueHaibing if (!copied) { 209c0fd336eSYueHaibing long timeo; 210c49661aaSCong Wang int data; 211c0fd336eSYueHaibing 212c0fd336eSYueHaibing timeo = sock_rcvtimeo(sk, nonblock); 213b6df0078SJakub Kicinski data = tcp_msg_wait_data(sk, psock, timeo); 214c0fd336eSYueHaibing if (data) { 215c0fd336eSYueHaibing if (!sk_psock_queue_empty(psock)) 216c0fd336eSYueHaibing goto msg_bytes_ready; 217c0fd336eSYueHaibing release_sock(sk); 218c0fd336eSYueHaibing sk_psock_put(sk, psock); 219c0fd336eSYueHaibing return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); 220c0fd336eSYueHaibing } 221c0fd336eSYueHaibing copied = -EAGAIN; 222c0fd336eSYueHaibing } 223c0fd336eSYueHaibing ret = copied; 224c0fd336eSYueHaibing release_sock(sk); 225c0fd336eSYueHaibing sk_psock_put(sk, psock); 226c0fd336eSYueHaibing return ret; 227c0fd336eSYueHaibing } 228c0fd336eSYueHaibing 229604326b4SDaniel Borkmann static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, 230604326b4SDaniel Borkmann struct sk_msg *msg, int *copied, int flags) 231604326b4SDaniel Borkmann { 232031097d9SJakub Kicinski bool cork = false, enospc = sk_msg_full(msg); 233604326b4SDaniel Borkmann struct sock *sk_redir; 2347246d8edSJohn Fastabend u32 tosend, delta = 0; 235604326b4SDaniel Borkmann int ret; 236604326b4SDaniel Borkmann 237604326b4SDaniel Borkmann more_data: 2387246d8edSJohn Fastabend if (psock->eval == __SK_NONE) { 2397246d8edSJohn Fastabend /* Track delta in msg size to add/subtract it on SK_DROP from 2407246d8edSJohn Fastabend * returned to user copied size. This ensures user doesn't 2417246d8edSJohn Fastabend * get a positive return code with msg_cut_data and SK_DROP 2427246d8edSJohn Fastabend * verdict. 2437246d8edSJohn Fastabend */ 2447246d8edSJohn Fastabend delta = msg->sg.size; 245604326b4SDaniel Borkmann psock->eval = sk_psock_msg_verdict(sk, psock, msg); 2467246d8edSJohn Fastabend delta -= msg->sg.size; 2477246d8edSJohn Fastabend } 248604326b4SDaniel Borkmann 249604326b4SDaniel Borkmann if (msg->cork_bytes && 250604326b4SDaniel Borkmann msg->cork_bytes > msg->sg.size && !enospc) { 251604326b4SDaniel Borkmann psock->cork_bytes = msg->cork_bytes - msg->sg.size; 252604326b4SDaniel Borkmann if (!psock->cork) { 253604326b4SDaniel Borkmann psock->cork = kzalloc(sizeof(*psock->cork), 254604326b4SDaniel Borkmann GFP_ATOMIC | __GFP_NOWARN); 255604326b4SDaniel Borkmann if (!psock->cork) 256604326b4SDaniel Borkmann return -ENOMEM; 257604326b4SDaniel Borkmann } 258604326b4SDaniel Borkmann memcpy(psock->cork, msg, sizeof(*msg)); 259604326b4SDaniel Borkmann return 0; 260604326b4SDaniel Borkmann } 261604326b4SDaniel Borkmann 262604326b4SDaniel Borkmann tosend = msg->sg.size; 263604326b4SDaniel Borkmann if (psock->apply_bytes && psock->apply_bytes < tosend) 264604326b4SDaniel Borkmann tosend = psock->apply_bytes; 265604326b4SDaniel Borkmann 266604326b4SDaniel Borkmann switch (psock->eval) { 267604326b4SDaniel Borkmann case __SK_PASS: 268604326b4SDaniel Borkmann ret = tcp_bpf_push(sk, msg, tosend, flags, true); 269604326b4SDaniel Borkmann if (unlikely(ret)) { 270604326b4SDaniel Borkmann *copied -= sk_msg_free(sk, msg); 271604326b4SDaniel Borkmann break; 272604326b4SDaniel Borkmann } 273604326b4SDaniel Borkmann sk_msg_apply_bytes(psock, tosend); 274604326b4SDaniel Borkmann break; 275604326b4SDaniel Borkmann case __SK_REDIRECT: 276604326b4SDaniel Borkmann sk_redir = psock->sk_redir; 277604326b4SDaniel Borkmann sk_msg_apply_bytes(psock, tosend); 278604326b4SDaniel Borkmann if (psock->cork) { 279604326b4SDaniel Borkmann cork = true; 280604326b4SDaniel Borkmann psock->cork = NULL; 281604326b4SDaniel Borkmann } 282604326b4SDaniel Borkmann sk_msg_return(sk, msg, tosend); 283604326b4SDaniel Borkmann release_sock(sk); 284604326b4SDaniel Borkmann ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags); 285604326b4SDaniel Borkmann lock_sock(sk); 286604326b4SDaniel Borkmann if (unlikely(ret < 0)) { 287604326b4SDaniel Borkmann int free = sk_msg_free_nocharge(sk, msg); 288604326b4SDaniel Borkmann 289604326b4SDaniel Borkmann if (!cork) 290604326b4SDaniel Borkmann *copied -= free; 291604326b4SDaniel Borkmann } 292604326b4SDaniel Borkmann if (cork) { 293604326b4SDaniel Borkmann sk_msg_free(sk, msg); 294604326b4SDaniel Borkmann kfree(msg); 295604326b4SDaniel Borkmann msg = NULL; 296604326b4SDaniel Borkmann ret = 0; 297604326b4SDaniel Borkmann } 298604326b4SDaniel Borkmann break; 299604326b4SDaniel Borkmann case __SK_DROP: 300604326b4SDaniel Borkmann default: 301604326b4SDaniel Borkmann sk_msg_free_partial(sk, msg, tosend); 302604326b4SDaniel Borkmann sk_msg_apply_bytes(psock, tosend); 3037246d8edSJohn Fastabend *copied -= (tosend + delta); 304604326b4SDaniel Borkmann return -EACCES; 305604326b4SDaniel Borkmann } 306604326b4SDaniel Borkmann 307604326b4SDaniel Borkmann if (likely(!ret)) { 308604326b4SDaniel Borkmann if (!psock->apply_bytes) { 309604326b4SDaniel Borkmann psock->eval = __SK_NONE; 310604326b4SDaniel Borkmann if (psock->sk_redir) { 311604326b4SDaniel Borkmann sock_put(psock->sk_redir); 312604326b4SDaniel Borkmann psock->sk_redir = NULL; 313604326b4SDaniel Borkmann } 314604326b4SDaniel Borkmann } 315604326b4SDaniel Borkmann if (msg && 316604326b4SDaniel Borkmann msg->sg.data[msg->sg.start].page_link && 317604326b4SDaniel Borkmann msg->sg.data[msg->sg.start].length) 318604326b4SDaniel Borkmann goto more_data; 319604326b4SDaniel Borkmann } 320604326b4SDaniel Borkmann return ret; 321604326b4SDaniel Borkmann } 322604326b4SDaniel Borkmann 323604326b4SDaniel Borkmann static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 324604326b4SDaniel Borkmann { 325604326b4SDaniel Borkmann struct sk_msg tmp, *msg_tx = NULL; 326604326b4SDaniel Borkmann int copied = 0, err = 0; 327604326b4SDaniel Borkmann struct sk_psock *psock; 328604326b4SDaniel Borkmann long timeo; 32941477662SJakub Kicinski int flags; 33041477662SJakub Kicinski 33141477662SJakub Kicinski /* Don't let internal do_tcp_sendpages() flags through */ 33241477662SJakub Kicinski flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); 33341477662SJakub Kicinski flags |= MSG_NO_SHARED_FRAGS; 334604326b4SDaniel Borkmann 335604326b4SDaniel Borkmann psock = sk_psock_get(sk); 336604326b4SDaniel Borkmann if (unlikely(!psock)) 337604326b4SDaniel Borkmann return tcp_sendmsg(sk, msg, size); 338604326b4SDaniel Borkmann 339604326b4SDaniel Borkmann lock_sock(sk); 340604326b4SDaniel Borkmann timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 341604326b4SDaniel Borkmann while (msg_data_left(msg)) { 342604326b4SDaniel Borkmann bool enospc = false; 343604326b4SDaniel Borkmann u32 copy, osize; 344604326b4SDaniel Borkmann 345604326b4SDaniel Borkmann if (sk->sk_err) { 346604326b4SDaniel Borkmann err = -sk->sk_err; 347604326b4SDaniel Borkmann goto out_err; 348604326b4SDaniel Borkmann } 349604326b4SDaniel Borkmann 350604326b4SDaniel Borkmann copy = msg_data_left(msg); 351604326b4SDaniel Borkmann if (!sk_stream_memory_free(sk)) 352604326b4SDaniel Borkmann goto wait_for_sndbuf; 353604326b4SDaniel Borkmann if (psock->cork) { 354604326b4SDaniel Borkmann msg_tx = psock->cork; 355604326b4SDaniel Borkmann } else { 356604326b4SDaniel Borkmann msg_tx = &tmp; 357604326b4SDaniel Borkmann sk_msg_init(msg_tx); 358604326b4SDaniel Borkmann } 359604326b4SDaniel Borkmann 360604326b4SDaniel Borkmann osize = msg_tx->sg.size; 361604326b4SDaniel Borkmann err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1); 362604326b4SDaniel Borkmann if (err) { 363604326b4SDaniel Borkmann if (err != -ENOSPC) 364604326b4SDaniel Borkmann goto wait_for_memory; 365604326b4SDaniel Borkmann enospc = true; 366604326b4SDaniel Borkmann copy = msg_tx->sg.size - osize; 367604326b4SDaniel Borkmann } 368604326b4SDaniel Borkmann 369604326b4SDaniel Borkmann err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx, 370604326b4SDaniel Borkmann copy); 371604326b4SDaniel Borkmann if (err < 0) { 372604326b4SDaniel Borkmann sk_msg_trim(sk, msg_tx, osize); 373604326b4SDaniel Borkmann goto out_err; 374604326b4SDaniel Borkmann } 375604326b4SDaniel Borkmann 376604326b4SDaniel Borkmann copied += copy; 377604326b4SDaniel Borkmann if (psock->cork_bytes) { 378604326b4SDaniel Borkmann if (size > psock->cork_bytes) 379604326b4SDaniel Borkmann psock->cork_bytes = 0; 380604326b4SDaniel Borkmann else 381604326b4SDaniel Borkmann psock->cork_bytes -= size; 382604326b4SDaniel Borkmann if (psock->cork_bytes && !enospc) 383604326b4SDaniel Borkmann goto out_err; 384604326b4SDaniel Borkmann /* All cork bytes are accounted, rerun the prog. */ 385604326b4SDaniel Borkmann psock->eval = __SK_NONE; 386604326b4SDaniel Borkmann psock->cork_bytes = 0; 387604326b4SDaniel Borkmann } 388604326b4SDaniel Borkmann 389604326b4SDaniel Borkmann err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags); 390604326b4SDaniel Borkmann if (unlikely(err < 0)) 391604326b4SDaniel Borkmann goto out_err; 392604326b4SDaniel Borkmann continue; 393604326b4SDaniel Borkmann wait_for_sndbuf: 394604326b4SDaniel Borkmann set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 395604326b4SDaniel Borkmann wait_for_memory: 396604326b4SDaniel Borkmann err = sk_stream_wait_memory(sk, &timeo); 397604326b4SDaniel Borkmann if (err) { 398604326b4SDaniel Borkmann if (msg_tx && msg_tx != psock->cork) 399604326b4SDaniel Borkmann sk_msg_free(sk, msg_tx); 400604326b4SDaniel Borkmann goto out_err; 401604326b4SDaniel Borkmann } 402604326b4SDaniel Borkmann } 403604326b4SDaniel Borkmann out_err: 404604326b4SDaniel Borkmann if (err < 0) 405604326b4SDaniel Borkmann err = sk_stream_error(sk, msg->msg_flags, err); 406604326b4SDaniel Borkmann release_sock(sk); 407604326b4SDaniel Borkmann sk_psock_put(sk, psock); 408604326b4SDaniel Borkmann return copied ? copied : err; 409604326b4SDaniel Borkmann } 410604326b4SDaniel Borkmann 411604326b4SDaniel Borkmann static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset, 412604326b4SDaniel Borkmann size_t size, int flags) 413604326b4SDaniel Borkmann { 414604326b4SDaniel Borkmann struct sk_msg tmp, *msg = NULL; 415604326b4SDaniel Borkmann int err = 0, copied = 0; 416604326b4SDaniel Borkmann struct sk_psock *psock; 417604326b4SDaniel Borkmann bool enospc = false; 418604326b4SDaniel Borkmann 419604326b4SDaniel Borkmann psock = sk_psock_get(sk); 420604326b4SDaniel Borkmann if (unlikely(!psock)) 421604326b4SDaniel Borkmann return tcp_sendpage(sk, page, offset, size, flags); 422604326b4SDaniel Borkmann 423604326b4SDaniel Borkmann lock_sock(sk); 424604326b4SDaniel Borkmann if (psock->cork) { 425604326b4SDaniel Borkmann msg = psock->cork; 426604326b4SDaniel Borkmann } else { 427604326b4SDaniel Borkmann msg = &tmp; 428604326b4SDaniel Borkmann sk_msg_init(msg); 429604326b4SDaniel Borkmann } 430604326b4SDaniel Borkmann 431604326b4SDaniel Borkmann /* Catch case where ring is full and sendpage is stalled. */ 432604326b4SDaniel Borkmann if (unlikely(sk_msg_full(msg))) 433604326b4SDaniel Borkmann goto out_err; 434604326b4SDaniel Borkmann 435604326b4SDaniel Borkmann sk_msg_page_add(msg, page, size, offset); 436604326b4SDaniel Borkmann sk_mem_charge(sk, size); 437604326b4SDaniel Borkmann copied = size; 438604326b4SDaniel Borkmann if (sk_msg_full(msg)) 439604326b4SDaniel Borkmann enospc = true; 440604326b4SDaniel Borkmann if (psock->cork_bytes) { 441604326b4SDaniel Borkmann if (size > psock->cork_bytes) 442604326b4SDaniel Borkmann psock->cork_bytes = 0; 443604326b4SDaniel Borkmann else 444604326b4SDaniel Borkmann psock->cork_bytes -= size; 445604326b4SDaniel Borkmann if (psock->cork_bytes && !enospc) 446604326b4SDaniel Borkmann goto out_err; 447604326b4SDaniel Borkmann /* All cork bytes are accounted, rerun the prog. */ 448604326b4SDaniel Borkmann psock->eval = __SK_NONE; 449604326b4SDaniel Borkmann psock->cork_bytes = 0; 450604326b4SDaniel Borkmann } 451604326b4SDaniel Borkmann 452604326b4SDaniel Borkmann err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags); 453604326b4SDaniel Borkmann out_err: 454604326b4SDaniel Borkmann release_sock(sk); 455604326b4SDaniel Borkmann sk_psock_put(sk, psock); 456604326b4SDaniel Borkmann return copied ? copied : err; 457604326b4SDaniel Borkmann } 458604326b4SDaniel Borkmann 459604326b4SDaniel Borkmann enum { 460604326b4SDaniel Borkmann TCP_BPF_IPV4, 461604326b4SDaniel Borkmann TCP_BPF_IPV6, 462604326b4SDaniel Borkmann TCP_BPF_NUM_PROTS, 463604326b4SDaniel Borkmann }; 464604326b4SDaniel Borkmann 465604326b4SDaniel Borkmann enum { 466604326b4SDaniel Borkmann TCP_BPF_BASE, 467604326b4SDaniel Borkmann TCP_BPF_TX, 468604326b4SDaniel Borkmann TCP_BPF_NUM_CFGS, 469604326b4SDaniel Borkmann }; 470604326b4SDaniel Borkmann 471604326b4SDaniel Borkmann static struct proto *tcpv6_prot_saved __read_mostly; 472604326b4SDaniel Borkmann static DEFINE_SPINLOCK(tcpv6_prot_lock); 473604326b4SDaniel Borkmann static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS]; 474604326b4SDaniel Borkmann 475604326b4SDaniel Borkmann static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS], 476604326b4SDaniel Borkmann struct proto *base) 477604326b4SDaniel Borkmann { 478604326b4SDaniel Borkmann prot[TCP_BPF_BASE] = *base; 479f747632bSLorenz Bauer prot[TCP_BPF_BASE].unhash = sock_map_unhash; 480f747632bSLorenz Bauer prot[TCP_BPF_BASE].close = sock_map_close; 481604326b4SDaniel Borkmann prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; 482604326b4SDaniel Borkmann prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read; 483604326b4SDaniel Borkmann 484604326b4SDaniel Borkmann prot[TCP_BPF_TX] = prot[TCP_BPF_BASE]; 485604326b4SDaniel Borkmann prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg; 486604326b4SDaniel Borkmann prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage; 487604326b4SDaniel Borkmann } 488604326b4SDaniel Borkmann 4897b219da4SLorenz Bauer static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops) 490604326b4SDaniel Borkmann { 4917b219da4SLorenz Bauer if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) { 492604326b4SDaniel Borkmann spin_lock_bh(&tcpv6_prot_lock); 493604326b4SDaniel Borkmann if (likely(ops != tcpv6_prot_saved)) { 494604326b4SDaniel Borkmann tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops); 495604326b4SDaniel Borkmann smp_store_release(&tcpv6_prot_saved, ops); 496604326b4SDaniel Borkmann } 497604326b4SDaniel Borkmann spin_unlock_bh(&tcpv6_prot_lock); 498604326b4SDaniel Borkmann } 499604326b4SDaniel Borkmann } 500604326b4SDaniel Borkmann 501604326b4SDaniel Borkmann static int __init tcp_bpf_v4_build_proto(void) 502604326b4SDaniel Borkmann { 503604326b4SDaniel Borkmann tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); 504604326b4SDaniel Borkmann return 0; 505604326b4SDaniel Borkmann } 506*228a4a7bSJohn Fastabend late_initcall(tcp_bpf_v4_build_proto); 507604326b4SDaniel Borkmann 508604326b4SDaniel Borkmann static int tcp_bpf_assert_proto_ops(struct proto *ops) 509604326b4SDaniel Borkmann { 510604326b4SDaniel Borkmann /* In order to avoid retpoline, we make assumptions when we call 511604326b4SDaniel Borkmann * into ops if e.g. a psock is not present. Make sure they are 512604326b4SDaniel Borkmann * indeed valid assumptions. 513604326b4SDaniel Borkmann */ 514604326b4SDaniel Borkmann return ops->recvmsg == tcp_recvmsg && 515604326b4SDaniel Borkmann ops->sendmsg == tcp_sendmsg && 516604326b4SDaniel Borkmann ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP; 517604326b4SDaniel Borkmann } 518604326b4SDaniel Borkmann 51951e0158aSCong Wang int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) 520604326b4SDaniel Borkmann { 521d19da360SLorenz Bauer int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; 522d19da360SLorenz Bauer int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; 523604326b4SDaniel Borkmann 5248a59f9d1SCong Wang if (restore) { 5258a59f9d1SCong Wang if (inet_csk_has_ulp(sk)) { 5268859a44eSJakub Kicinski /* TLS does not have an unhash proto in SW cases, 5278859a44eSJakub Kicinski * but we need to ensure we stop using the sock_map 5288859a44eSJakub Kicinski * unhash routine because the associated psock is being 5298859a44eSJakub Kicinski * removed. So use the original unhash handler. 5308859a44eSJakub Kicinski */ 5318859a44eSJakub Kicinski WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash); 5328a59f9d1SCong Wang tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); 5338a59f9d1SCong Wang } else { 5348a59f9d1SCong Wang sk->sk_write_space = psock->saved_write_space; 5358a59f9d1SCong Wang /* Pairs with lockless read in sk_clone_lock() */ 5368a59f9d1SCong Wang WRITE_ONCE(sk->sk_prot, psock->sk_proto); 5378a59f9d1SCong Wang } 5388a59f9d1SCong Wang return 0; 5398a59f9d1SCong Wang } 5408a59f9d1SCong Wang 5418a59f9d1SCong Wang if (inet_csk_has_ulp(sk)) 5428a59f9d1SCong Wang return -EINVAL; 5438a59f9d1SCong Wang 5447b219da4SLorenz Bauer if (sk->sk_family == AF_INET6) { 5457b219da4SLorenz Bauer if (tcp_bpf_assert_proto_ops(psock->sk_proto)) 5468a59f9d1SCong Wang return -EINVAL; 547d19da360SLorenz Bauer 5487b219da4SLorenz Bauer tcp_bpf_check_v6_needs_rebuild(psock->sk_proto); 549d19da360SLorenz Bauer } 550d19da360SLorenz Bauer 5518a59f9d1SCong Wang /* Pairs with lockless read in sk_clone_lock() */ 5528a59f9d1SCong Wang WRITE_ONCE(sk->sk_prot, &tcp_bpf_prots[family][config]); 5538a59f9d1SCong Wang return 0; 554604326b4SDaniel Borkmann } 5558a59f9d1SCong Wang EXPORT_SYMBOL_GPL(tcp_bpf_update_proto); 556604326b4SDaniel Borkmann 557e8025155SJakub Sitnicki /* If a child got cloned from a listening socket that had tcp_bpf 558e8025155SJakub Sitnicki * protocol callbacks installed, we need to restore the callbacks to 559e8025155SJakub Sitnicki * the default ones because the child does not inherit the psock state 560e8025155SJakub Sitnicki * that tcp_bpf callbacks expect. 561e8025155SJakub Sitnicki */ 562e8025155SJakub Sitnicki void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) 563e8025155SJakub Sitnicki { 564e8025155SJakub Sitnicki int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; 565e8025155SJakub Sitnicki struct proto *prot = newsk->sk_prot; 566e8025155SJakub Sitnicki 567e8025155SJakub Sitnicki if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE]) 568e8025155SJakub Sitnicki newsk->sk_prot = sk->sk_prot_creator; 569e8025155SJakub Sitnicki } 57088759609SCong Wang #endif /* CONFIG_BPF_SYSCALL */ 571