tcp_ipv4.c (1fba70e5b6bed53496ba1f1f16127f5be01b5fb6) | tcp_ipv4.c (c92e8c02fe664155ac4234516e32544bec0f113d) |
---|---|
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * IPv4 specific functions --- 466 unchanged lines hidden (view full) --- 475 if (sock_owned_by_user(sk)) 476 break; 477 478 icsk->icsk_backoff--; 479 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : 480 TCP_TIMEOUT_INIT; 481 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); 482 | 1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * IPv4 specific functions --- 466 unchanged lines hidden (view full) --- 475 if (sock_owned_by_user(sk)) 476 break; 477 478 icsk->icsk_backoff--; 479 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : 480 TCP_TIMEOUT_INIT; 481 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); 482 |
483 skb = tcp_rtx_queue_head(sk); | 483 skb = tcp_write_queue_head(sk); |
484 BUG_ON(!skb); 485 486 tcp_mstamp_refresh(tp); 487 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp); 488 remaining = icsk->icsk_rto - 489 usecs_to_jiffies(delta_us); 490 491 if (remaining > 0) { --- 380 unchanged lines hidden (view full) --- 872 873 skb = tcp_make_synack(sk, dst, req, foc, synack_type); 874 875 if (skb) { 876 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); 877 878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 879 ireq->ir_rmt_addr, | 484 BUG_ON(!skb); 485 486 tcp_mstamp_refresh(tp); 487 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp); 488 remaining = icsk->icsk_rto - 489 usecs_to_jiffies(delta_us); 490 491 if (remaining > 0) { --- 380 unchanged lines hidden (view full) --- 872 873 skb = tcp_make_synack(sk, dst, req, foc, synack_type); 874 875 if (skb) { 876 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); 877 878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 879 ireq->ir_rmt_addr, |
880 ireq->opt); | 880 rcu_dereference(ireq->ireq_opt)); |
881 err = net_xmit_eval(err); 882 } 883 884 return err; 885} 886 887/* 888 * IPv4 request_sock destructor. 889 */ 890static void tcp_v4_reqsk_destructor(struct request_sock *req) 891{ | 881 err = net_xmit_eval(err); 882 } 883 884 return err; 885} 886 887/* 888 * IPv4 request_sock destructor. 889 */ 890static void tcp_v4_reqsk_destructor(struct request_sock *req) 891{ |
892 kfree(inet_rsk(req)->opt); | 892 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1)); |
893} 894 895#ifdef CONFIG_TCP_MD5SIG 896/* 897 * RFC2385 MD5 checksumming requires a mapping of 898 * IP address->MD5 Key. 899 * We need to maintain these in the sk structure. 900 */ --- 359 unchanged lines hidden (view full) --- 1260 return false; 1261} 1262 1263static void tcp_v4_init_req(struct request_sock *req, 1264 const struct sock *sk_listener, 1265 struct sk_buff *skb) 1266{ 1267 struct inet_request_sock *ireq = inet_rsk(req); | 893} 894 895#ifdef CONFIG_TCP_MD5SIG 896/* 897 * RFC2385 MD5 checksumming requires a mapping of 898 * IP address->MD5 Key. 899 * We need to maintain these in the sk structure. 900 */ --- 359 unchanged lines hidden (view full) --- 1260 return false; 1261} 1262 1263static void tcp_v4_init_req(struct request_sock *req, 1264 const struct sock *sk_listener, 1265 struct sk_buff *skb) 1266{ 1267 struct inet_request_sock *ireq = inet_rsk(req); |
1268 struct net *net = sock_net(sk_listener); |
|
1268 1269 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); 1270 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); | 1269 1270 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); 1271 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); |
1271 ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb); | 1272 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb)); |
1272} 1273 1274static struct dst_entry *tcp_v4_route_req(const struct sock *sk, 1275 struct flowi *fl, 1276 const struct request_sock *req) 1277{ 1278 return inet_csk_route_req(sk, &fl->u.ip4, req); 1279} --- 70 unchanged lines hidden (view full) --- 1350 inet_sk_rx_dst_set(newsk, skb); 1351 1352 newtp = tcp_sk(newsk); 1353 newinet = inet_sk(newsk); 1354 ireq = inet_rsk(req); 1355 sk_daddr_set(newsk, ireq->ir_rmt_addr); 1356 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); 1357 newsk->sk_bound_dev_if = ireq->ir_iif; | 1273} 1274 1275static struct dst_entry *tcp_v4_route_req(const struct sock *sk, 1276 struct flowi *fl, 1277 const struct request_sock *req) 1278{ 1279 return inet_csk_route_req(sk, &fl->u.ip4, req); 1280} --- 70 unchanged lines hidden (view full) --- 1351 inet_sk_rx_dst_set(newsk, skb); 1352 1353 newtp = tcp_sk(newsk); 1354 newinet = inet_sk(newsk); 1355 ireq = inet_rsk(req); 1356 sk_daddr_set(newsk, ireq->ir_rmt_addr); 1357 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); 1358 newsk->sk_bound_dev_if = ireq->ir_iif; |
1358 newinet->inet_saddr = ireq->ir_loc_addr; 1359 inet_opt = ireq->opt; 1360 rcu_assign_pointer(newinet->inet_opt, inet_opt); 1361 ireq->opt = NULL; | 1359 newinet->inet_saddr = ireq->ir_loc_addr; 1360 inet_opt = rcu_dereference(ireq->ireq_opt); 1361 RCU_INIT_POINTER(newinet->inet_opt, inet_opt); |
1362 newinet->mc_index = inet_iif(skb); 1363 newinet->mc_ttl = ip_hdr(skb)->ttl; 1364 newinet->rcv_tos = ip_hdr(skb)->tos; 1365 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1366 if (inet_opt) 1367 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 1368 newinet->inet_id = newtp->write_seq ^ jiffies; 1369 --- 28 unchanged lines hidden (view full) --- 1398 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC); 1399 sk_nocaps_add(newsk, NETIF_F_GSO_MASK); 1400 } 1401#endif 1402 1403 if (__inet_inherit_port(sk, newsk) < 0) 1404 goto put_and_exit; 1405 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); | 1362 newinet->mc_index = inet_iif(skb); 1363 newinet->mc_ttl = ip_hdr(skb)->ttl; 1364 newinet->rcv_tos = ip_hdr(skb)->tos; 1365 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1366 if (inet_opt) 1367 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 1368 newinet->inet_id = newtp->write_seq ^ jiffies; 1369 --- 28 unchanged lines hidden (view full) --- 1398 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC); 1399 sk_nocaps_add(newsk, NETIF_F_GSO_MASK); 1400 } 1401#endif 1402 1403 if (__inet_inherit_port(sk, newsk) < 0) 1404 goto put_and_exit; 1405 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); |
1406 if (*own_req) | 1406 if (likely(*own_req)) { |
1407 tcp_move_syn(newtp, req); | 1407 tcp_move_syn(newtp, req); |
1408 | 1408 ireq->ireq_opt = NULL; 1409 } else { 1410 newinet->inet_opt = NULL; 1411 } |
1409 return newsk; 1410 1411exit_overflow: 1412 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1413exit_nonewsk: 1414 dst_release(dst); 1415exit: 1416 tcp_listendrop(sk); 1417 return NULL; 1418put_and_exit: | 1412 return newsk; 1413 1414exit_overflow: 1415 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1416exit_nonewsk: 1417 dst_release(dst); 1418exit: 1419 tcp_listendrop(sk); 1420 return NULL; 1421put_and_exit: |
1422 newinet->inet_opt = NULL; |
|
1419 inet_csk_prepare_forced_close(newsk); 1420 tcp_done(newsk); 1421 goto exit; 1422} 1423EXPORT_SYMBOL(tcp_v4_syn_recv_sock); 1424 1425static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) 1426{ --- 347 unchanged lines hidden (view full) --- 1774 inet_iif(skb), 1775 sdif); 1776 if (sk2) { 1777 inet_twsk_deschedule_put(inet_twsk(sk)); 1778 sk = sk2; 1779 refcounted = false; 1780 goto process; 1781 } | 1423 inet_csk_prepare_forced_close(newsk); 1424 tcp_done(newsk); 1425 goto exit; 1426} 1427EXPORT_SYMBOL(tcp_v4_syn_recv_sock); 1428 1429static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) 1430{ --- 347 unchanged lines hidden (view full) --- 1778 inet_iif(skb), 1779 sdif); 1780 if (sk2) { 1781 inet_twsk_deschedule_put(inet_twsk(sk)); 1782 sk = sk2; 1783 refcounted = false; 1784 goto process; 1785 } |
1786 /* Fall through to ACK */ |
|
1782 } | 1787 } |
1783 /* to ACK */ 1784 /* fall through */ | |
1785 case TCP_TW_ACK: 1786 tcp_v4_timewait_ack(sk, skb); 1787 break; 1788 case TCP_TW_RST: 1789 tcp_v4_send_reset(sk, skb); 1790 inet_twsk_deschedule_put(inet_twsk(sk)); 1791 goto discard_it; 1792 case TCP_TW_SUCCESS:; --- 95 unchanged lines hidden (view full) --- 1888 /* Clean up a referenced TCP bind bucket. */ 1889 if (inet_csk(sk)->icsk_bind_hash) 1890 inet_put_port(sk); 1891 1892 BUG_ON(tp->fastopen_rsk); 1893 1894 /* If socket is aborted during connect operation */ 1895 tcp_free_fastopen_req(tp); | 1788 case TCP_TW_ACK: 1789 tcp_v4_timewait_ack(sk, skb); 1790 break; 1791 case TCP_TW_RST: 1792 tcp_v4_send_reset(sk, skb); 1793 inet_twsk_deschedule_put(inet_twsk(sk)); 1794 goto discard_it; 1795 case TCP_TW_SUCCESS:; --- 95 unchanged lines hidden (view full) --- 1891 /* Clean up a referenced TCP bind bucket. */ 1892 if (inet_csk(sk)->icsk_bind_hash) 1893 inet_put_port(sk); 1894 1895 BUG_ON(tp->fastopen_rsk); 1896 1897 /* If socket is aborted during connect operation */ 1898 tcp_free_fastopen_req(tp); |
1896 tcp_fastopen_destroy_cipher(sk); | |
1897 tcp_saved_syn_free(tp); 1898 1899 sk_sockets_allocated_dec(sk); 1900} 1901EXPORT_SYMBOL(tcp_v4_destroy_sock); 1902 1903#ifdef CONFIG_PROC_FS 1904/* Proc filesystem TCP sock list dumping. */ --- 565 unchanged lines hidden (view full) --- 2470 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2; 2471 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; 2472 2473 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256); 2474 net->ipv4.sysctl_tcp_sack = 1; 2475 net->ipv4.sysctl_tcp_window_scaling = 1; 2476 net->ipv4.sysctl_tcp_timestamps = 1; 2477 | 1899 tcp_saved_syn_free(tp); 1900 1901 sk_sockets_allocated_dec(sk); 1902} 1903EXPORT_SYMBOL(tcp_v4_destroy_sock); 1904 1905#ifdef CONFIG_PROC_FS 1906/* Proc filesystem TCP sock list dumping. */ --- 565 unchanged lines hidden (view full) --- 2472 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2; 2473 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; 2474 2475 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256); 2476 net->ipv4.sysctl_tcp_sack = 1; 2477 net->ipv4.sysctl_tcp_window_scaling = 1; 2478 net->ipv4.sysctl_tcp_timestamps = 1; 2479 |
2478 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE; 2479 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock); 2480 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60; 2481 atomic_set(&net->ipv4.tfo_active_disable_times, 0); 2482 | |
2483 return 0; 2484fail: 2485 tcp_sk_exit(net); 2486 2487 return res; 2488} 2489 2490static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) 2491{ | 2480 return 0; 2481fail: 2482 tcp_sk_exit(net); 2483 2484 return res; 2485} 2486 2487static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) 2488{ |
2492 struct net *net; 2493 | |
2494 inet_twsk_purge(&tcp_hashinfo, AF_INET); | 2489 inet_twsk_purge(&tcp_hashinfo, AF_INET); |
2495 2496 list_for_each_entry(net, net_exit_list, exit_list) 2497 tcp_fastopen_ctx_destroy(net); | |
2498} 2499 2500static struct pernet_operations __net_initdata tcp_sk_ops = { 2501 .init = tcp_sk_init, 2502 .exit = tcp_sk_exit, 2503 .exit_batch = tcp_sk_exit_batch, 2504}; 2505 2506void __init tcp_v4_init(void) 2507{ 2508 if (register_pernet_subsys(&tcp_sk_ops)) 2509 panic("Failed to create the TCP control socket.\n"); 2510} | 2490} 2491 2492static struct pernet_operations __net_initdata tcp_sk_ops = { 2493 .init = tcp_sk_init, 2494 .exit = tcp_sk_exit, 2495 .exit_batch = tcp_sk_exit_batch, 2496}; 2497 2498void __init tcp_v4_init(void) 2499{ 2500 if (register_pernet_subsys(&tcp_sk_ops)) 2501 panic("Failed to create the TCP control socket.\n"); 2502} |