tcp_output.c (4343f61103cdb8ccd6f3d5dd7168f1176a1cee37) | tcp_output.c (a842fe1425cb20f457abd3f8ef98b468f83ca98b) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * --- 1139 unchanged lines hidden (view full) --- 1148 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); 1149 1150 /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */ 1151 1152 /* Cleanup our debris for IP stacks */ 1153 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), 1154 sizeof(struct inet6_skb_parm))); 1155 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * --- 1139 unchanged lines hidden (view full) --- 1148 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); 1149 1150 /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */ 1151 1152 /* Cleanup our debris for IP stacks */ 1153 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), 1154 sizeof(struct inet6_skb_parm))); 1155 |
1156 tcp_add_tx_delay(skb, tp); 1157 |
|
1156 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); 1157 1158 if (unlikely(err > 0)) { 1159 tcp_enter_cwr(sk); 1160 err = net_xmit_eval(err); 1161 } 1162 if (!err && oskb) { 1163 tcp_update_skb_after_send(sk, oskb, prior_wstamp); --- 1065 unchanged lines hidden (view full) --- 2229 limit = max_t(unsigned long, 2230 2 * skb->truesize, 2231 sk->sk_pacing_rate >> sk->sk_pacing_shift); 2232 if (sk->sk_pacing_status == SK_PACING_NONE) 2233 limit = min_t(unsigned long, limit, 2234 sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes); 2235 limit <<= factor; 2236 | 1158 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); 1159 1160 if (unlikely(err > 0)) { 1161 tcp_enter_cwr(sk); 1162 err = net_xmit_eval(err); 1163 } 1164 if (!err && oskb) { 1165 tcp_update_skb_after_send(sk, oskb, prior_wstamp); --- 1065 unchanged lines hidden (view full) --- 2231 limit = max_t(unsigned long, 2232 2 * skb->truesize, 2233 sk->sk_pacing_rate >> sk->sk_pacing_shift); 2234 if (sk->sk_pacing_status == SK_PACING_NONE) 2235 limit = min_t(unsigned long, limit, 2236 sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes); 2237 limit <<= factor; 2238 |
2239 if (static_branch_unlikely(&tcp_tx_delay_enabled) && 2240 tcp_sk(sk)->tcp_tx_delay) { 2241 u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay; 2242 2243 /* TSQ is based on skb truesize sum (sk_wmem_alloc), so we 2244 * approximate our needs assuming an ~100% skb->truesize overhead. 2245 * USEC_PER_SEC is approximated by 2^20. 2246 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift. 2247 */ 2248 extra_bytes >>= (20 - 1); 2249 limit += extra_bytes; 2250 } |
|
2237 if (refcount_read(&sk->sk_wmem_alloc) > limit) { 2238 /* Always send skb if rtx queue is empty. 2239 * No need to wait for TX completion to call us back, 2240 * after softirq/tasklet schedule. 2241 * This helps when TX completions are delayed too much. 2242 */ 2243 if (tcp_rtx_queue_empty(sk)) 2244 return false; --- 962 unchanged lines hidden (view full) --- 3207 struct inet_request_sock *ireq = inet_rsk(req); 3208 const struct tcp_sock *tp = tcp_sk(sk); 3209 struct tcp_md5sig_key *md5 = NULL; 3210 struct tcp_out_options opts; 3211 struct sk_buff *skb; 3212 int tcp_header_size; 3213 struct tcphdr *th; 3214 int mss; | 2251 if (refcount_read(&sk->sk_wmem_alloc) > limit) { 2252 /* Always send skb if rtx queue is empty. 2253 * No need to wait for TX completion to call us back, 2254 * after softirq/tasklet schedule. 2255 * This helps when TX completions are delayed too much. 2256 */ 2257 if (tcp_rtx_queue_empty(sk)) 2258 return false; --- 962 unchanged lines hidden (view full) --- 3221 struct inet_request_sock *ireq = inet_rsk(req); 3222 const struct tcp_sock *tp = tcp_sk(sk); 3223 struct tcp_md5sig_key *md5 = NULL; 3224 struct tcp_out_options opts; 3225 struct sk_buff *skb; 3226 int tcp_header_size; 3227 struct tcphdr *th; 3228 int mss; |
3229 u64 now; |
|
3215 3216 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 3217 if (unlikely(!skb)) { 3218 dst_release(dst); 3219 return NULL; 3220 } 3221 /* Reserve space for headers. */ 3222 skb_reserve(skb, MAX_TCP_HEADER); --- 15 unchanged lines hidden (view full) --- 3238 skb_set_owner_w(skb, (struct sock *)sk); 3239 break; 3240 } 3241 skb_dst_set(skb, dst); 3242 3243 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 3244 3245 memset(&opts, 0, sizeof(opts)); | 3230 3231 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 3232 if (unlikely(!skb)) { 3233 dst_release(dst); 3234 return NULL; 3235 } 3236 /* Reserve space for headers. */ 3237 skb_reserve(skb, MAX_TCP_HEADER); --- 15 unchanged lines hidden (view full) --- 3253 skb_set_owner_w(skb, (struct sock *)sk); 3254 break; 3255 } 3256 skb_dst_set(skb, dst); 3257 3258 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 3259 3260 memset(&opts, 0, sizeof(opts)); |
3261 now = tcp_clock_ns(); |
|
3246#ifdef CONFIG_SYN_COOKIES 3247 if (unlikely(req->cookie_ts)) 3248 skb->skb_mstamp_ns = cookie_init_timestamp(req); 3249 else 3250#endif 3251 { | 3262#ifdef CONFIG_SYN_COOKIES 3263 if (unlikely(req->cookie_ts)) 3264 skb->skb_mstamp_ns = cookie_init_timestamp(req); 3265 else 3266#endif 3267 { |
3252 skb->skb_mstamp_ns = tcp_clock_ns(); | 3268 skb->skb_mstamp_ns = now; |
3253 if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ 3254 tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); 3255 } 3256 3257#ifdef CONFIG_TCP_MD5SIG 3258 rcu_read_lock(); 3259 md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); 3260#endif --- 26 unchanged lines hidden (view full) --- 3287#ifdef CONFIG_TCP_MD5SIG 3288 /* Okay, we have all we need - do the md5 hash if needed */ 3289 if (md5) 3290 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 3291 md5, req_to_sk(req), skb); 3292 rcu_read_unlock(); 3293#endif 3294 | 3269 if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ 3270 tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); 3271 } 3272 3273#ifdef CONFIG_TCP_MD5SIG 3274 rcu_read_lock(); 3275 md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); 3276#endif --- 26 unchanged lines hidden (view full) --- 3303#ifdef CONFIG_TCP_MD5SIG 3304 /* Okay, we have all we need - do the md5 hash if needed */ 3305 if (md5) 3306 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 3307 md5, req_to_sk(req), skb); 3308 rcu_read_unlock(); 3309#endif 3310 |
3295 /* Do not fool tcpdump (if any), clean our debris */ 3296 skb->tstamp = 0; | 3311 skb->skb_mstamp_ns = now; 3312 tcp_add_tx_delay(skb, tp); 3313 |
3297 return skb; 3298} 3299EXPORT_SYMBOL(tcp_make_synack); 3300 3301static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) 3302{ 3303 struct inet_connection_sock *icsk = inet_csk(sk); 3304 const struct tcp_congestion_ops *ca; --- 493 unchanged lines hidden --- | 3314 return skb; 3315} 3316EXPORT_SYMBOL(tcp_make_synack); 3317 3318static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) 3319{ 3320 struct inet_connection_sock *icsk = inet_csk(sk); 3321 const struct tcp_congestion_ops *ca; --- 493 unchanged lines hidden --- |