Lines Matching refs:msk
34 struct mptcp_sock msk; member
53 static u64 mptcp_wnd_end(const struct mptcp_sock *msk) in mptcp_wnd_end() argument
55 return READ_ONCE(msk->wnd_end); in mptcp_wnd_end()
68 static int __mptcp_socket_create(struct mptcp_sock *msk) in __mptcp_socket_create() argument
71 struct sock *sk = (struct sock *)msk; in __mptcp_socket_create()
79 msk->scaling_ratio = tcp_sk(ssock->sk)->scaling_ratio; in __mptcp_socket_create()
80 WRITE_ONCE(msk->first, ssock->sk); in __mptcp_socket_create()
82 list_add(&subflow->node, &msk->conn_list); in __mptcp_socket_create()
85 subflow->subflow_id = msk->subflow_id++; in __mptcp_socket_create()
89 mptcp_sock_graft(msk->first, sk->sk_socket); in __mptcp_socket_create()
98 struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk) in __mptcp_nmpc_sk() argument
100 struct sock *sk = (struct sock *)msk; in __mptcp_nmpc_sk()
106 if (!msk->first) { in __mptcp_nmpc_sk()
107 ret = __mptcp_socket_create(msk); in __mptcp_nmpc_sk()
111 mptcp_sockopt_sync(msk, msk->first); in __mptcp_nmpc_sk()
114 return msk->first; in __mptcp_nmpc_sk()
161 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, in mptcp_ooo_try_coalesce() argument
167 return mptcp_try_coalesce((struct sock *)msk, to, from); in mptcp_ooo_try_coalesce()
179 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_rmem_uncharge() local
183 reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk); in mptcp_rmem_uncharge()
212 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) in mptcp_data_queue_ofo() argument
214 struct sock *sk = (struct sock *)msk; in mptcp_data_queue_ofo()
221 max_seq = atomic64_read(&msk->rcv_wnd_sent); in mptcp_data_queue_ofo()
223 pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq, in mptcp_data_queue_ofo()
224 RB_EMPTY_ROOT(&msk->out_of_order_queue)); in mptcp_data_queue_ofo()
230 (unsigned long long)atomic64_read(&msk->rcv_wnd_sent)); in mptcp_data_queue_ofo()
235 p = &msk->out_of_order_queue.rb_node; in mptcp_data_queue_ofo()
237 if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) { in mptcp_data_queue_ofo()
239 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo()
240 msk->ooo_last_skb = skb; in mptcp_data_queue_ofo()
247 if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) { in mptcp_data_queue_ofo()
254 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { in mptcp_data_queue_ofo()
256 parent = &msk->ooo_last_skb->rbnode; in mptcp_data_queue_ofo()
288 &msk->out_of_order_queue); in mptcp_data_queue_ofo()
293 } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) { in mptcp_data_queue_ofo()
303 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo()
310 rb_erase(&skb1->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo()
316 msk->ooo_last_skb = skb; in mptcp_data_queue_ofo()
325 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_rmem_schedule() local
328 if (size <= msk->rmem_fwd_alloc) in mptcp_rmem_schedule()
331 size -= msk->rmem_fwd_alloc; in mptcp_rmem_schedule()
341 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, in __mptcp_move_skb() argument
346 struct sock *sk = (struct sock *)msk; in __mptcp_move_skb()
372 if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { in __mptcp_move_skb()
374 msk->bytes_received += copy_len; in __mptcp_move_skb()
375 WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len); in __mptcp_move_skb()
383 } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) { in __mptcp_move_skb()
384 mptcp_data_queue_ofo(msk, skb); in __mptcp_move_skb()
420 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_pending_data_fin_ack() local
424 msk->write_seq == READ_ONCE(msk->snd_una); in mptcp_pending_data_fin_ack()
429 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_check_data_fin_ack() local
433 WRITE_ONCE(msk->snd_data_fin_enable, 0); in mptcp_check_data_fin_ack()
451 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_pending_data_fin() local
453 if (READ_ONCE(msk->rcv_data_fin) && in mptcp_pending_data_fin()
456 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq); in mptcp_pending_data_fin()
458 if (msk->ack_seq == rcv_data_fin_seq) { in mptcp_pending_data_fin()
524 static void mptcp_send_ack(struct mptcp_sock *msk) in mptcp_send_ack() argument
528 mptcp_for_each_subflow(msk, subflow) in mptcp_send_ack()
555 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied) in mptcp_cleanup_rbuf() argument
557 int old_space = READ_ONCE(msk->old_wspace); in mptcp_cleanup_rbuf()
559 struct sock *sk = (struct sock *)msk; in mptcp_cleanup_rbuf()
566 mptcp_for_each_subflow(msk, subflow) { in mptcp_cleanup_rbuf()
576 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_check_data_fin() local
594 WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1); in mptcp_check_data_fin()
595 WRITE_ONCE(msk->rcv_data_fin, 0); in mptcp_check_data_fin()
617 if (!__mptcp_check_fallback(msk)) in mptcp_check_data_fin()
618 mptcp_send_ack(msk); in mptcp_check_data_fin()
624 static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk) in mptcp_dss_corruption() argument
626 if (READ_ONCE(msk->allow_infinite_fallback)) { in mptcp_dss_corruption()
636 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, in __mptcp_move_skbs_from_subflow() argument
641 struct sock *sk = (struct sock *)msk; in __mptcp_move_skbs_from_subflow()
659 pr_debug("msk=%p ssk=%p\n", msk, ssk); in __mptcp_move_skbs_from_subflow()
682 if (__mptcp_check_fallback(msk)) { in __mptcp_move_skbs_from_subflow()
704 if (__mptcp_move_skb(msk, ssk, skb, offset, len)) in __mptcp_move_skbs_from_subflow()
710 mptcp_dss_corruption(msk, ssk); in __mptcp_move_skbs_from_subflow()
715 mptcp_dss_corruption(msk, ssk); in __mptcp_move_skbs_from_subflow()
735 static bool __mptcp_ofo_queue(struct mptcp_sock *msk) in __mptcp_ofo_queue() argument
737 struct sock *sk = (struct sock *)msk; in __mptcp_ofo_queue()
743 p = rb_first(&msk->out_of_order_queue); in __mptcp_ofo_queue()
744 pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); in __mptcp_ofo_queue()
747 if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) in __mptcp_ofo_queue()
751 rb_erase(&skb->rbnode, &msk->out_of_order_queue); in __mptcp_ofo_queue()
754 msk->ack_seq))) { in __mptcp_ofo_queue()
762 if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) { in __mptcp_ofo_queue()
763 int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; in __mptcp_ofo_queue()
767 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, in __mptcp_ofo_queue()
773 msk->bytes_received += end_seq - msk->ack_seq; in __mptcp_ofo_queue()
774 msk->ack_seq = end_seq; in __mptcp_ofo_queue()
813 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_error_report() local
815 mptcp_for_each_subflow(msk, subflow) in __mptcp_error_report()
823 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) in move_skbs_to_msk() argument
825 struct sock *sk = (struct sock *)msk; in move_skbs_to_msk()
828 __mptcp_move_skbs_from_subflow(msk, ssk, &moved); in move_skbs_to_msk()
829 __mptcp_ofo_queue(msk); in move_skbs_to_msk()
834 __set_bit(MPTCP_ERROR_REPORT, &msk->cb_flags); in move_skbs_to_msk()
850 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_data_ready() local
871 if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk)) in mptcp_data_ready()
876 static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk) in mptcp_subflow_joined() argument
878 mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq); in mptcp_subflow_joined()
879 WRITE_ONCE(msk->allow_infinite_fallback, false); in mptcp_subflow_joined()
880 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); in mptcp_subflow_joined()
883 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) in __mptcp_finish_join() argument
885 struct sock *sk = (struct sock *)msk; in __mptcp_finish_join()
896 mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++; in __mptcp_finish_join()
897 mptcp_sockopt_sync_locked(msk, ssk); in __mptcp_finish_join()
898 mptcp_subflow_joined(msk, ssk); in __mptcp_finish_join()
907 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_flush_join_list() local
913 list_move_tail(&subflow->node, &msk->conn_list); in __mptcp_flush_join_list()
914 if (!__mptcp_finish_join(msk, ssk)) in __mptcp_flush_join_list()
951 static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk) in mptcp_subflow_recv_lookup() argument
955 msk_owned_by_me(msk); in mptcp_subflow_recv_lookup()
957 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_recv_lookup()
984 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk, in mptcp_frag_can_collapse_to() argument
991 df->data_seq + df->data_len == msk->write_seq; in mptcp_frag_can_collapse_to()
1011 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_clean_una() local
1015 snd_una = msk->snd_una; in __mptcp_clean_una()
1016 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { in __mptcp_clean_una()
1020 if (unlikely(dfrag == msk->first_pending)) { in __mptcp_clean_una()
1022 if (WARN_ON_ONCE(!msk->recovery)) in __mptcp_clean_una()
1025 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); in __mptcp_clean_una()
1037 if (WARN_ON_ONCE(!msk->recovery)) in __mptcp_clean_una()
1053 if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt)) in __mptcp_clean_una()
1054 msk->recovery = false; in __mptcp_clean_una()
1057 if (snd_una == READ_ONCE(msk->snd_nxt) && in __mptcp_clean_una()
1058 snd_una == READ_ONCE(msk->write_seq)) { in __mptcp_clean_una()
1059 if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) in __mptcp_clean_una()
1084 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_enter_memory_pressure() local
1087 mptcp_for_each_subflow(msk, subflow) { in mptcp_enter_memory_pressure()
1113 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag, in mptcp_carve_data_frag() argument
1121 dfrag->data_seq = msk->write_seq; in mptcp_carve_data_frag()
1139 static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk, in mptcp_check_allowed_size() argument
1142 u64 window_end = mptcp_wnd_end(msk); in mptcp_check_allowed_size()
1145 if (__mptcp_check_fallback(msk)) in mptcp_check_allowed_size()
1224 static void mptcp_update_infinite_map(struct mptcp_sock *msk, in mptcp_update_infinite_map() argument
1236 pr_fallback(msk); in mptcp_update_infinite_map()
1248 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_sendmsg_frag() local
1258 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); in mptcp_sendmsg_frag()
1308 copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy); in mptcp_sendmsg_frag()
1310 u64 snd_una = READ_ONCE(msk->snd_una); in mptcp_sendmsg_frag()
1312 if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) { in mptcp_sendmsg_frag()
1365 if (READ_ONCE(msk->csum_enabled)) in mptcp_sendmsg_frag()
1371 if (READ_ONCE(msk->csum_enabled)) in mptcp_sendmsg_frag()
1374 mptcp_update_infinite_map(msk, ssk, mpext); in mptcp_sendmsg_frag()
1421 struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) in mptcp_subflow_get_send() argument
1425 struct sock *sk = (struct sock *)msk; in mptcp_subflow_get_send()
1438 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_send()
1484 burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt); in mptcp_subflow_get_send()
1493 msk->snd_burst = burst; in mptcp_subflow_get_send()
1503 static void mptcp_update_post_push(struct mptcp_sock *msk, in mptcp_update_post_push() argument
1511 msk->snd_burst -= sent; in mptcp_update_post_push()
1524 if (likely(after64(snd_nxt_new, msk->snd_nxt))) { in mptcp_update_post_push()
1525 msk->bytes_sent += snd_nxt_new - msk->snd_nxt; in mptcp_update_post_push()
1526 msk->snd_nxt = snd_nxt_new; in mptcp_update_post_push()
1542 struct mptcp_sock *msk = mptcp_sk(sk); in __subflow_push_pending() local
1563 mptcp_update_post_push(msk, dfrag, ret); in __subflow_push_pending()
1565 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); in __subflow_push_pending()
1567 if (msk->snd_burst <= 0 || in __subflow_push_pending()
1584 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_push_pending() local
1595 if (mptcp_sched_get_send(msk)) in __mptcp_push_pending()
1600 mptcp_for_each_subflow(msk, subflow) { in __mptcp_push_pending()
1648 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_subflow_push_pending() local
1674 if (mptcp_sched_get_send(msk)) in __mptcp_subflow_push_pending()
1685 mptcp_for_each_subflow(msk, subflow) { in __mptcp_subflow_push_pending()
1707 if (msk->snd_data_fin_enable && in __mptcp_subflow_push_pending()
1708 msk->snd_nxt + 1 == msk->write_seq) in __mptcp_subflow_push_pending()
1728 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_sendmsg_fastopen() local
1739 ssk = __mptcp_nmpc_sk(msk); in mptcp_sendmsg_fastopen()
1743 if (!msk->first) in mptcp_sendmsg_fastopen()
1746 ssk = msk->first; in mptcp_sendmsg_fastopen()
1750 msk->fastopening = 1; in mptcp_sendmsg_fastopen()
1752 msk->fastopening = 0; in mptcp_sendmsg_fastopen()
1784 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_sendmsg() local
1831 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag); in mptcp_sendmsg()
1839 dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset); in mptcp_sendmsg()
1867 WRITE_ONCE(msk->write_seq, msk->write_seq + psize); in mptcp_sendmsg()
1875 list_add_tail(&dfrag->list, &msk->rtx_queue); in mptcp_sendmsg()
1876 if (!msk->first_pending) in mptcp_sendmsg()
1877 WRITE_ONCE(msk->first_pending, dfrag); in mptcp_sendmsg()
1879 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk, in mptcp_sendmsg()
1908 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
1910 static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, in __mptcp_recvmsg_mskq() argument
1919 skb_queue_walk_safe(&msk->receive_queue, skb, tmp) { in __mptcp_recvmsg_mskq()
1945 msk->bytes_consumed += count; in __mptcp_recvmsg_mskq()
1953 WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize); in __mptcp_recvmsg_mskq()
1954 __skb_unlink(skb, &msk->receive_queue); in __mptcp_recvmsg_mskq()
1956 msk->bytes_consumed += count; in __mptcp_recvmsg_mskq()
1963 mptcp_rcv_space_adjust(msk, copied); in __mptcp_recvmsg_mskq()
1971 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) in mptcp_rcv_space_adjust() argument
1974 struct sock *sk = (struct sock *)msk; in mptcp_rcv_space_adjust()
1979 msk_owned_by_me(msk); in mptcp_rcv_space_adjust()
1984 if (!msk->rcvspace_init) in mptcp_rcv_space_adjust()
1985 mptcp_rcv_space_init(msk, msk->first); in mptcp_rcv_space_adjust()
1987 msk->rcvq_space.copied += copied; in mptcp_rcv_space_adjust()
1990 time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time); in mptcp_rcv_space_adjust()
1992 rtt_us = msk->rcvq_space.rtt_us; in mptcp_rcv_space_adjust()
1997 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
2012 msk->rcvq_space.rtt_us = rtt_us; in mptcp_rcv_space_adjust()
2013 msk->scaling_ratio = scaling_ratio; in mptcp_rcv_space_adjust()
2017 if (msk->rcvq_space.copied <= msk->rcvq_space.space) in mptcp_rcv_space_adjust()
2025 rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss; in mptcp_rcv_space_adjust()
2027 grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space); in mptcp_rcv_space_adjust()
2029 do_div(grow, msk->rcvq_space.space); in mptcp_rcv_space_adjust()
2046 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
2061 msk->rcvq_space.space = msk->rcvq_space.copied; in mptcp_rcv_space_adjust()
2063 msk->rcvq_space.copied = 0; in mptcp_rcv_space_adjust()
2064 msk->rcvq_space.time = mstamp; in mptcp_rcv_space_adjust()
2069 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_update_rmem() local
2071 if (!msk->rmem_released) in __mptcp_update_rmem()
2074 atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc); in __mptcp_update_rmem()
2075 mptcp_rmem_uncharge(sk, msk->rmem_released); in __mptcp_update_rmem()
2076 WRITE_ONCE(msk->rmem_released, 0); in __mptcp_update_rmem()
2081 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_splice_receive_queue() local
2083 skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue); in __mptcp_splice_receive_queue()
2086 static bool __mptcp_move_skbs(struct mptcp_sock *msk) in __mptcp_move_skbs() argument
2088 struct sock *sk = (struct sock *)msk; in __mptcp_move_skbs()
2093 struct sock *ssk = mptcp_subflow_recv_lookup(msk); in __mptcp_move_skbs()
2106 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved); in __mptcp_move_skbs()
2116 if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) || in __mptcp_move_skbs()
2120 ret |= __mptcp_ofo_queue(msk); in __mptcp_move_skbs()
2125 mptcp_check_data_fin((struct sock *)msk); in __mptcp_move_skbs()
2126 return !skb_queue_empty(&msk->receive_queue); in __mptcp_move_skbs()
2131 const struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_inq_hint() local
2134 skb = skb_peek(&msk->receive_queue); in mptcp_inq_hint()
2136 u64 hint_val = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; in mptcp_inq_hint()
2153 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_recvmsg() local
2174 if (unlikely(msk->recvmsg_inq)) in mptcp_recvmsg()
2180 bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags); in mptcp_recvmsg()
2189 if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk)) in mptcp_recvmsg()
2215 if (__mptcp_move_skbs(msk)) in mptcp_recvmsg()
2237 mptcp_cleanup_rbuf(msk, copied); in mptcp_recvmsg()
2245 mptcp_cleanup_rbuf(msk, copied); in mptcp_recvmsg()
2260 msk, skb_queue_empty_lockless(&sk->sk_receive_queue), in mptcp_recvmsg()
2261 skb_queue_empty(&msk->receive_queue), copied); in mptcp_recvmsg()
2272 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_retransmit_timer() local
2277 if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags)) in mptcp_retransmit_timer()
2281 __set_bit(MPTCP_RETRANSMIT, &msk->cb_flags); in mptcp_retransmit_timer()
2300 struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk) in mptcp_subflow_get_retrans() argument
2306 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_retrans()
2314 mptcp_pm_subflow_chk_stale(msk, ssk); in mptcp_subflow_get_retrans()
2339 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_retransmit_pending_data() local
2341 if (__mptcp_check_fallback(msk)) in __mptcp_retransmit_pending_data()
2356 msk->recovery_snd_nxt = msk->snd_nxt; in __mptcp_retransmit_pending_data()
2357 msk->recovery = true; in __mptcp_retransmit_pending_data()
2360 msk->first_pending = rtx_head; in __mptcp_retransmit_pending_data()
2361 msk->snd_burst = 0; in __mptcp_retransmit_pending_data()
2364 list_for_each_entry(cur, &msk->rtx_queue, list) { in __mptcp_retransmit_pending_data()
2409 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_close_ssk() local
2417 if (msk->in_accept_queue && msk->first == ssk && in __mptcp_close_ssk()
2427 dispose_it = msk->free_first || ssk != msk->first; in __mptcp_close_ssk()
2433 if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) { in __mptcp_close_ssk()
2473 if (ssk == msk->first) in __mptcp_close_ssk()
2474 WRITE_ONCE(msk->first, NULL); in __mptcp_close_ssk()
2486 if (list_is_singular(&msk->conn_list) && msk->first && in __mptcp_close_ssk()
2487 inet_sk_state_load(msk->first) == TCP_CLOSE) { in __mptcp_close_ssk()
2489 msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) { in __mptcp_close_ssk()
2526 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_close_subflow() local
2530 mptcp_for_each_subflow_safe(msk, subflow, tmp) { in __mptcp_close_subflow()
2558 static void mptcp_check_fastclose(struct mptcp_sock *msk) in mptcp_check_fastclose() argument
2561 struct sock *sk = (struct sock *)msk; in mptcp_check_fastclose()
2563 if (likely(!READ_ONCE(msk->rcv_fastclose))) in mptcp_check_fastclose()
2566 mptcp_token_destroy(msk); in mptcp_check_fastclose()
2568 mptcp_for_each_subflow_safe(msk, subflow, tmp) { in mptcp_check_fastclose()
2597 set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags); in mptcp_check_fastclose()
2609 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_retrans() local
2620 err = mptcp_sched_get_retrans(msk); in __mptcp_retrans()
2623 if (mptcp_data_fin_enabled(msk)) { in __mptcp_retrans()
2628 mptcp_send_ack(msk); in __mptcp_retrans()
2642 mptcp_for_each_subflow(msk, subflow) { in __mptcp_retrans()
2654 info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : in __mptcp_retrans()
2669 WRITE_ONCE(msk->allow_infinite_fallback, false); in __mptcp_retrans()
2676 msk->bytes_retrans += len; in __mptcp_retrans()
2689 void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout) in mptcp_reset_tout_timer() argument
2691 struct sock *sk = (struct sock *)msk; in mptcp_reset_tout_timer()
2708 static void mptcp_mp_fail_no_response(struct mptcp_sock *msk) in mptcp_mp_fail_no_response() argument
2710 struct sock *ssk = msk->first; in mptcp_mp_fail_no_response()
2727 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_do_fastclose() local
2730 mptcp_for_each_subflow_safe(msk, subflow, tmp) in mptcp_do_fastclose()
2737 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); in mptcp_worker() local
2738 struct sock *sk = (struct sock *)msk; in mptcp_worker()
2747 mptcp_check_fastclose(msk); in mptcp_worker()
2749 mptcp_pm_nl_work(msk); in mptcp_worker()
2755 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) in mptcp_worker()
2768 if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) in mptcp_worker()
2771 fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0; in mptcp_worker()
2773 mptcp_mp_fail_no_response(msk); in mptcp_worker()
2782 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_init_sock() local
2784 INIT_LIST_HEAD(&msk->conn_list); in __mptcp_init_sock()
2785 INIT_LIST_HEAD(&msk->join_list); in __mptcp_init_sock()
2786 INIT_LIST_HEAD(&msk->rtx_queue); in __mptcp_init_sock()
2787 INIT_WORK(&msk->work, mptcp_worker); in __mptcp_init_sock()
2788 __skb_queue_head_init(&msk->receive_queue); in __mptcp_init_sock()
2789 msk->out_of_order_queue = RB_ROOT; in __mptcp_init_sock()
2790 msk->first_pending = NULL; in __mptcp_init_sock()
2791 msk->rmem_fwd_alloc = 0; in __mptcp_init_sock()
2792 WRITE_ONCE(msk->rmem_released, 0); in __mptcp_init_sock()
2793 msk->timer_ival = TCP_RTO_MIN; in __mptcp_init_sock()
2794 msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; in __mptcp_init_sock()
2796 WRITE_ONCE(msk->first, NULL); in __mptcp_init_sock()
2798 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); in __mptcp_init_sock()
2799 WRITE_ONCE(msk->allow_infinite_fallback, true); in __mptcp_init_sock()
2800 msk->recovery = false; in __mptcp_init_sock()
2801 msk->subflow_id = 1; in __mptcp_init_sock()
2803 mptcp_pm_data_init(msk); in __mptcp_init_sock()
2806 timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); in __mptcp_init_sock()
2858 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_clear_xmit() local
2861 WRITE_ONCE(msk->first_pending, NULL); in __mptcp_clear_xmit()
2862 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) in __mptcp_clear_xmit()
2868 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_cancel_work() local
2870 if (cancel_work_sync(&msk->work)) in mptcp_cancel_work()
2962 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_check_send_data_fin() local
2965 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk), in mptcp_check_send_data_fin()
2966 msk->snd_nxt, msk->write_seq); in mptcp_check_send_data_fin()
2971 if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq || in mptcp_check_send_data_fin()
2975 WRITE_ONCE(msk->snd_nxt, msk->write_seq); in mptcp_check_send_data_fin()
2977 mptcp_for_each_subflow(msk, subflow) { in mptcp_check_send_data_fin()
2986 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_wr_shutdown() local
2989 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state, in __mptcp_wr_shutdown()
2993 WRITE_ONCE(msk->write_seq, msk->write_seq + 1); in __mptcp_wr_shutdown()
2994 WRITE_ONCE(msk->snd_data_fin_enable, 1); in __mptcp_wr_shutdown()
3001 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_destroy_sock() local
3003 pr_debug("msk=%p\n", msk); in __mptcp_destroy_sock()
3009 msk->pm.status = 0; in __mptcp_destroy_sock()
3010 mptcp_release_sched(msk); in __mptcp_destroy_sock()
3014 WARN_ON_ONCE(msk->rmem_fwd_alloc); in __mptcp_destroy_sock()
3015 WARN_ON_ONCE(msk->rmem_released); in __mptcp_destroy_sock()
3057 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_close() local
3069 if (mptcp_data_avail(msk) || timeout < 0) { in __mptcp_close()
3083 mptcp_for_each_subflow(msk, subflow) { in __mptcp_close()
3092 if (ssk == msk->first) in __mptcp_close()
3112 if (msk->token) in __mptcp_close()
3113 mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL); in __mptcp_close()
3139 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) in mptcp_copy_inaddrs() argument
3143 struct ipv6_pinfo *msk6 = inet6_sk(msk); in mptcp_copy_inaddrs()
3145 msk->sk_v6_daddr = ssk->sk_v6_daddr; in mptcp_copy_inaddrs()
3146 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; in mptcp_copy_inaddrs()
3154 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; in mptcp_copy_inaddrs()
3155 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; in mptcp_copy_inaddrs()
3156 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; in mptcp_copy_inaddrs()
3157 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; in mptcp_copy_inaddrs()
3158 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; in mptcp_copy_inaddrs()
3159 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; in mptcp_copy_inaddrs()
3164 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_disconnect() local
3170 if (msk->fastopening) in mptcp_disconnect()
3179 if (msk->token) in mptcp_disconnect()
3180 mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL); in mptcp_disconnect()
3185 mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE); in mptcp_disconnect()
3186 WRITE_ONCE(msk->flags, 0); in mptcp_disconnect()
3187 msk->cb_flags = 0; in mptcp_disconnect()
3188 msk->recovery = false; in mptcp_disconnect()
3189 msk->can_ack = false; in mptcp_disconnect()
3190 msk->fully_established = false; in mptcp_disconnect()
3191 msk->rcv_data_fin = false; in mptcp_disconnect()
3192 msk->snd_data_fin_enable = false; in mptcp_disconnect()
3193 msk->rcv_fastclose = false; in mptcp_disconnect()
3194 msk->use_64bit_ack = false; in mptcp_disconnect()
3195 msk->bytes_consumed = 0; in mptcp_disconnect()
3196 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); in mptcp_disconnect()
3197 mptcp_pm_data_reset(msk); in mptcp_disconnect()
3199 msk->bytes_acked = 0; in mptcp_disconnect()
3200 msk->bytes_received = 0; in mptcp_disconnect()
3201 msk->bytes_sent = 0; in mptcp_disconnect()
3202 msk->bytes_retrans = 0; in mptcp_disconnect()
3203 msk->rcvspace_init = 0; in mptcp_disconnect()
3269 struct mptcp_sock *msk; in mptcp_sk_clone_init() local
3288 msk = mptcp_sk(nsk); in mptcp_sk_clone_init()
3289 msk->local_key = subflow_req->local_key; in mptcp_sk_clone_init()
3290 msk->token = subflow_req->token; in mptcp_sk_clone_init()
3291 msk->in_accept_queue = 1; in mptcp_sk_clone_init()
3292 WRITE_ONCE(msk->fully_established, false); in mptcp_sk_clone_init()
3294 WRITE_ONCE(msk->csum_enabled, true); in mptcp_sk_clone_init()
3296 msk->write_seq = subflow_req->idsn + 1; in mptcp_sk_clone_init()
3297 msk->snd_nxt = msk->write_seq; in mptcp_sk_clone_init()
3298 msk->snd_una = msk->write_seq; in mptcp_sk_clone_init()
3299 msk->wnd_end = msk->snd_nxt + tcp_sk(ssk)->snd_wnd; in mptcp_sk_clone_init()
3300 msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq; in mptcp_sk_clone_init()
3301 mptcp_init_sched(msk, mptcp_sk(sk)->sched); in mptcp_sk_clone_init()
3304 msk->subflow_id = 2; in mptcp_sk_clone_init()
3315 WRITE_ONCE(msk->first, ssk); in mptcp_sk_clone_init()
3317 list_add(&subflow->node, &msk->conn_list); in mptcp_sk_clone_init()
3323 mptcp_token_accept(subflow_req, msk); in mptcp_sk_clone_init()
3331 mptcp_rcv_space_init(msk, ssk); in mptcp_sk_clone_init()
3334 __mptcp_subflow_fully_established(msk, subflow, mp_opt); in mptcp_sk_clone_init()
3341 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) in mptcp_rcv_space_init() argument
3345 msk->rcvspace_init = 1; in mptcp_rcv_space_init()
3346 msk->rcvq_space.copied = 0; in mptcp_rcv_space_init()
3347 msk->rcvq_space.rtt_us = 0; in mptcp_rcv_space_init()
3349 msk->rcvq_space.time = tp->tcp_mstamp; in mptcp_rcv_space_init()
3352 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd, in mptcp_rcv_space_init()
3354 if (msk->rcvq_space.space == 0) in mptcp_rcv_space_init()
3355 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; in mptcp_rcv_space_init()
3358 void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags) in mptcp_destroy_common() argument
3361 struct sock *sk = (struct sock *)msk; in mptcp_destroy_common()
3366 mptcp_for_each_subflow_safe(msk, subflow, tmp) in mptcp_destroy_common()
3371 skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue); in mptcp_destroy_common()
3373 skb_rbtree_purge(&msk->out_of_order_queue); in mptcp_destroy_common()
3379 sk_forward_alloc_add(sk, msk->rmem_fwd_alloc); in mptcp_destroy_common()
3380 WRITE_ONCE(msk->rmem_fwd_alloc, 0); in mptcp_destroy_common()
3381 mptcp_token_destroy(msk); in mptcp_destroy_common()
3382 mptcp_pm_free_anno_list(msk); in mptcp_destroy_common()
3383 mptcp_free_local_addr_list(msk); in mptcp_destroy_common()
3388 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_destroy() local
3391 msk->free_first = 1; in mptcp_destroy()
3392 mptcp_destroy_common(msk, 0); in mptcp_destroy()
3426 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_release_cb() local
3429 unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED); in mptcp_release_cb()
3436 list_splice_init(&msk->join_list, &join_list); in mptcp_release_cb()
3445 msk->cb_flags &= ~flags; in mptcp_release_cb()
3459 if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags)) in mptcp_release_cb()
3461 if (unlikely(msk->cb_flags)) { in mptcp_release_cb()
3466 if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first) in mptcp_release_cb()
3467 __mptcp_sync_state(sk, msk->pending_state); in mptcp_release_cb()
3468 if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags)) in mptcp_release_cb()
3470 if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags)) in mptcp_release_cb()
3544 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_get_port() local
3546 pr_debug("msk=%p, ssk=%p\n", msk, msk->first); in mptcp_get_port()
3547 if (WARN_ON_ONCE(!msk->first)) in mptcp_get_port()
3550 return inet_csk_get_port(msk->first, snum); in mptcp_get_port()
3556 struct mptcp_sock *msk; in mptcp_finish_connect() local
3561 msk = mptcp_sk(sk); in mptcp_finish_connect()
3571 WRITE_ONCE(msk->local_key, subflow->local_key); in mptcp_finish_connect()
3573 mptcp_pm_new_connection(msk, ssk, 0); in mptcp_finish_connect()
3588 struct mptcp_sock *msk = mptcp_sk(subflow->conn); in mptcp_finish_join() local
3589 struct sock *parent = (void *)msk; in mptcp_finish_join()
3592 pr_debug("msk=%p, subflow=%p\n", msk, subflow); in mptcp_finish_join()
3602 mptcp_subflow_joined(msk, ssk); in mptcp_finish_join()
3607 if (!mptcp_pm_allow_new_subflow(msk)) in mptcp_finish_join()
3615 ret = __mptcp_finish_join(msk, ssk); in mptcp_finish_join()
3618 list_add_tail(&subflow->node, &msk->conn_list); in mptcp_finish_join()
3622 list_add_tail(&subflow->node, &msk->join_list); in mptcp_finish_join()
3623 __set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags); in mptcp_finish_join()
3650 static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v) in mptcp_ioctl_outq() argument
3652 const struct sock *sk = (void *)msk; in mptcp_ioctl_outq()
3661 delta = msk->write_seq - v; in mptcp_ioctl_outq()
3662 if (__mptcp_check_fallback(msk) && msk->first) { in mptcp_ioctl_outq()
3663 struct tcp_sock *tp = tcp_sk(msk->first); in mptcp_ioctl_outq()
3669 if (!((1 << msk->first->sk_state) & in mptcp_ioctl_outq()
3681 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_ioctl() local
3690 __mptcp_move_skbs(msk); in mptcp_ioctl()
3696 *karg = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una)); in mptcp_ioctl()
3701 *karg = mptcp_ioctl_outq(msk, msk->snd_nxt); in mptcp_ioctl()
3711 static void mptcp_subflow_early_fallback(struct mptcp_sock *msk, in mptcp_subflow_early_fallback() argument
3715 __mptcp_do_fallback(msk); in mptcp_subflow_early_fallback()
3721 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_connect() local
3725 ssk = __mptcp_nmpc_sk(msk); in mptcp_connect()
3736 mptcp_subflow_early_fallback(msk, subflow); in mptcp_connect()
3740 mptcp_subflow_early_fallback(msk, subflow); in mptcp_connect()
3743 WRITE_ONCE(msk->write_seq, subflow->idsn); in mptcp_connect()
3744 WRITE_ONCE(msk->snd_nxt, subflow->idsn); in mptcp_connect()
3745 WRITE_ONCE(msk->snd_una, subflow->idsn); in mptcp_connect()
3746 if (likely(!__mptcp_check_fallback(msk))) in mptcp_connect()
3752 if (!msk->fastopening) in mptcp_connect()
3774 if (!msk->fastopening) in mptcp_connect()
3782 mptcp_token_destroy(msk); in mptcp_connect()
3826 struct mptcp_sock *msk = mptcp_sk(sock->sk); in mptcp_bind() local
3831 ssk = __mptcp_nmpc_sk(msk); in mptcp_bind()
3853 struct mptcp_sock *msk = mptcp_sk(sock->sk); in mptcp_listen() local
3858 pr_debug("msk=%p\n", msk); in mptcp_listen()
3866 ssk = __mptcp_nmpc_sk(msk); in mptcp_listen()
3894 struct mptcp_sock *msk = mptcp_sk(sock->sk); in mptcp_stream_accept() local
3898 pr_debug("msk=%p\n", msk); in mptcp_stream_accept()
3903 ssk = READ_ONCE(msk->first); in mptcp_stream_accept()
3936 msk = mptcp_sk(newsk); in mptcp_stream_accept()
3937 msk->in_accept_queue = 0; in mptcp_stream_accept()
3942 mptcp_for_each_subflow(msk, subflow) { in mptcp_stream_accept()
3952 if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) { in mptcp_stream_accept()
3953 __mptcp_close_ssk(newsk, msk->first, in mptcp_stream_accept()
3954 mptcp_subflow_ctx(msk->first), 0); in mptcp_stream_accept()
3955 if (unlikely(list_is_singular(&msk->conn_list))) in mptcp_stream_accept()
3977 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk) in mptcp_check_writeable() argument
3979 struct sock *sk = (struct sock *)msk; in mptcp_check_writeable()
3996 struct mptcp_sock *msk; in mptcp_poll() local
4001 msk = mptcp_sk(sk); in mptcp_poll()
4005 pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags); in mptcp_poll()
4007 struct sock *ssk = READ_ONCE(msk->first); in mptcp_poll()
4026 mask |= mptcp_check_writeable(msk); in mptcp_poll()