protocol.c (1e1d9d6f119c55c05e8ea78ed3e49046690abffd) | protocol.c (ff5a0b421cb23bf6b2898939ffef5b683045d9d3) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7#define pr_fmt(fmt) "MPTCP: " fmt 8 --- 1377 unchanged lines hidden (view full) --- 1386 sizeof(struct ipv6hdr) - \ 1387 sizeof(struct frag_hdr)) 1388 1389struct subflow_send_info { 1390 struct sock *ssk; 1391 u64 ratio; 1392}; 1393 | 1// SPDX-License-Identifier: GPL-2.0 2/* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7#define pr_fmt(fmt) "MPTCP: " fmt 8 --- 1377 unchanged lines hidden (view full) --- 1386 sizeof(struct ipv6hdr) - \ 1387 sizeof(struct frag_hdr)) 1388 1389struct subflow_send_info { 1390 struct sock *ssk; 1391 u64 ratio; 1392}; 1393 |
1394void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow) 1395{ 1396 if (!subflow->stale) 1397 return; 1398 1399 subflow->stale = 0; 1400} 1401 1402bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) 1403{ 1404 if (unlikely(subflow->stale)) { 1405 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp); 1406 1407 if (subflow->stale_rcv_tstamp == rcv_tstamp) 1408 return false; 1409 1410 mptcp_subflow_set_active(subflow); 1411 } 1412 return __mptcp_subflow_active(subflow); 1413} 1414 |
|
1394/* implement the mptcp packet scheduler; 1395 * returns the subflow that will transmit the next DSS 1396 * additionally updates the rtx timeout 1397 */ 1398static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) 1399{ 1400 struct subflow_send_info send_info[2]; 1401 struct mptcp_subflow_context *subflow; --- 65 unchanged lines hidden (view full) --- 1467 1468static void mptcp_push_release(struct sock *sk, struct sock *ssk, 1469 struct mptcp_sendmsg_info *info) 1470{ 1471 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal); 1472 release_sock(ssk); 1473} 1474 | 1415/* implement the mptcp packet scheduler; 1416 * returns the subflow that will transmit the next DSS 1417 * additionally updates the rtx timeout 1418 */ 1419static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) 1420{ 1421 struct subflow_send_info send_info[2]; 1422 struct mptcp_subflow_context *subflow; --- 65 unchanged lines hidden (view full) --- 1488 1489static void mptcp_push_release(struct sock *sk, struct sock *ssk, 1490 struct mptcp_sendmsg_info *info) 1491{ 1492 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal); 1493 release_sock(ssk); 1494} 1495 |
1475static void __mptcp_push_pending(struct sock *sk, unsigned int flags) | 1496void __mptcp_push_pending(struct sock *sk, unsigned int flags) |
1476{ 1477 struct sock *prev_ssk = NULL, *ssk = NULL; 1478 struct mptcp_sock *msk = mptcp_sk(sk); 1479 struct mptcp_sendmsg_info info = { 1480 .flags = flags, 1481 }; 1482 struct mptcp_data_frag *dfrag; 1483 int len, copied = 0; --- 626 unchanged lines hidden (view full) --- 2110 sock_put(sk); 2111} 2112 2113/* Find an idle subflow. Return NULL if there is unacked data at tcp 2114 * level. 2115 * 2116 * A backup subflow is returned only if that is the only kind available. 2117 */ | 1497{ 1498 struct sock *prev_ssk = NULL, *ssk = NULL; 1499 struct mptcp_sock *msk = mptcp_sk(sk); 1500 struct mptcp_sendmsg_info info = { 1501 .flags = flags, 1502 }; 1503 struct mptcp_data_frag *dfrag; 1504 int len, copied = 0; --- 626 unchanged lines hidden (view full) --- 2131 sock_put(sk); 2132} 2133 2134/* Find an idle subflow. Return NULL if there is unacked data at tcp 2135 * level. 2136 * 2137 * A backup subflow is returned only if that is the only kind available. 2138 */ |
2118static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk) | 2139static struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk) |
2119{ 2120 struct sock *backup = NULL, *pick = NULL; 2121 struct mptcp_subflow_context *subflow; 2122 int min_stale_count = INT_MAX; 2123 2124 sock_owned_by_me((const struct sock *)msk); 2125 2126 if (__mptcp_check_fallback(msk)) 2127 return NULL; 2128 2129 mptcp_for_each_subflow(msk, subflow) { 2130 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 2131 | 2140{ 2141 struct sock *backup = NULL, *pick = NULL; 2142 struct mptcp_subflow_context *subflow; 2143 int min_stale_count = INT_MAX; 2144 2145 sock_owned_by_me((const struct sock *)msk); 2146 2147 if (__mptcp_check_fallback(msk)) 2148 return NULL; 2149 2150 mptcp_for_each_subflow(msk, subflow) { 2151 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 2152 |
2132 if (!mptcp_subflow_active(subflow)) | 2153 if (!__mptcp_subflow_active(subflow)) |
2133 continue; 2134 2135 /* still data outstanding at TCP level? skip this */ 2136 if (!tcp_rtx_and_write_queues_empty(ssk)) { 2137 mptcp_pm_subflow_chk_stale(msk, ssk); 2138 min_stale_count = min_t(int, min_stale_count, subflow->stale_count); 2139 continue; 2140 } --- 1436 unchanged lines hidden --- | 2154 continue; 2155 2156 /* still data outstanding at TCP level? skip this */ 2157 if (!tcp_rtx_and_write_queues_empty(ssk)) { 2158 mptcp_pm_subflow_chk_stale(msk, ssk); 2159 min_stale_count = min_t(int, min_stale_count, subflow->stale_count); 2160 continue; 2161 } --- 1436 unchanged lines hidden --- |