tcp_ipv6.c (a108772d03d8bdb43258218b00bfe43bbe1e8800) | tcp_ipv6.c (08eaef90403110e51861d93e8008a355af467bbe) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * TCP over IPv6 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * --- 132 unchanged lines hidden (view full) --- 141 142 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr); 143} 144 145static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 146 int addr_len) 147{ 148 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; | 1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * TCP over IPv6 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * --- 132 unchanged lines hidden (view full) --- 141 142 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr); 143} 144 145static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 146 int addr_len) 147{ 148 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; |
149 struct inet_sock *inet = inet_sk(sk); | |
150 struct inet_connection_sock *icsk = inet_csk(sk); | 149 struct inet_connection_sock *icsk = inet_csk(sk); |
150 struct in6_addr *saddr = NULL, *final_p, final; |
|
151 struct inet_timewait_death_row *tcp_death_row; 152 struct ipv6_pinfo *np = tcp_inet6_sk(sk); | 151 struct inet_timewait_death_row *tcp_death_row; 152 struct ipv6_pinfo *np = tcp_inet6_sk(sk); |
153 struct inet_sock *inet = inet_sk(sk); |
|
153 struct tcp_sock *tp = tcp_sk(sk); | 154 struct tcp_sock *tp = tcp_sk(sk); |
154 struct in6_addr *saddr = NULL, *final_p, final; | 155 struct net *net = sock_net(sk); |
155 struct ipv6_txoptions *opt; | 156 struct ipv6_txoptions *opt; |
156 struct flowi6 fl6; | |
157 struct dst_entry *dst; | 157 struct dst_entry *dst; |
158 struct flowi6 fl6; |
|
158 int addr_type; 159 int err; 160 161 if (addr_len < SIN6_LEN_RFC2133) 162 return -EINVAL; 163 164 if (usin->sin6_family != AF_INET6) 165 return -EAFNOSUPPORT; --- 109 unchanged lines hidden (view full) --- 275 fl6.fl6_sport = inet->inet_sport; 276 fl6.flowi6_uid = sk->sk_uid; 277 278 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); 279 final_p = fl6_update_dst(&fl6, opt, &final); 280 281 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 282 | 159 int addr_type; 160 int err; 161 162 if (addr_len < SIN6_LEN_RFC2133) 163 return -EINVAL; 164 165 if (usin->sin6_family != AF_INET6) 166 return -EAFNOSUPPORT; --- 109 unchanged lines hidden (view full) --- 276 fl6.fl6_sport = inet->inet_sport; 277 fl6.flowi6_uid = sk->sk_uid; 278 279 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); 280 final_p = fl6_update_dst(&fl6, opt, &final); 281 282 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 283 |
283 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); | 284 dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p); |
284 if (IS_ERR(dst)) { 285 err = PTR_ERR(dst); 286 goto failure; 287 } 288 289 if (!saddr) { | 285 if (IS_ERR(dst)) { 286 err = PTR_ERR(dst); 287 goto failure; 288 } 289 290 if (!saddr) { |
291 struct inet_bind_hashbucket *prev_addr_hashbucket = NULL; 292 struct in6_addr prev_v6_rcv_saddr; 293 294 if (icsk->icsk_bind2_hash) { 295 prev_addr_hashbucket = inet_bhashfn_portaddr(&tcp_hashinfo, 296 sk, net, inet->inet_num); 297 prev_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 298 } |
|
290 saddr = &fl6.saddr; 291 sk->sk_v6_rcv_saddr = *saddr; | 299 saddr = &fl6.saddr; 300 sk->sk_v6_rcv_saddr = *saddr; |
301 302 if (prev_addr_hashbucket) { 303 err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk); 304 if (err) { 305 sk->sk_v6_rcv_saddr = prev_v6_rcv_saddr; 306 goto failure; 307 } 308 } |
|
292 } 293 294 /* set the source address */ 295 np->saddr = *saddr; 296 inet->inet_rcv_saddr = LOOPBACK4_IPV6; 297 298 sk->sk_gso_type = SKB_GSO_TCPV6; 299 ip6_dst_store(sk, dst, NULL, NULL); 300 301 icsk->icsk_ext_hdr_len = 0; 302 if (opt) 303 icsk->icsk_ext_hdr_len = opt->opt_flen + 304 opt->opt_nflen; 305 306 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 307 308 inet->inet_dport = usin->sin6_port; 309 310 tcp_set_state(sk, TCP_SYN_SENT); | 309 } 310 311 /* set the source address */ 312 np->saddr = *saddr; 313 inet->inet_rcv_saddr = LOOPBACK4_IPV6; 314 315 sk->sk_gso_type = SKB_GSO_TCPV6; 316 ip6_dst_store(sk, dst, NULL, NULL); 317 318 icsk->icsk_ext_hdr_len = 0; 319 if (opt) 320 icsk->icsk_ext_hdr_len = opt->opt_flen + 321 opt->opt_nflen; 322 323 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 324 325 inet->inet_dport = usin->sin6_port; 326 327 tcp_set_state(sk, TCP_SYN_SENT); |
311 tcp_death_row = sock_net(sk)->ipv4.tcp_death_row; | 328 tcp_death_row = net->ipv4.tcp_death_row; |
312 err = inet6_hash_connect(tcp_death_row, sk); 313 if (err) 314 goto late_failure; 315 316 sk_set_txhash(sk); 317 318 if (likely(!tp->repair)) { 319 if (!tp->write_seq) 320 WRITE_ONCE(tp->write_seq, 321 secure_tcpv6_seq(np->saddr.s6_addr32, 322 sk->sk_v6_daddr.s6_addr32, 323 inet->inet_sport, 324 inet->inet_dport)); | 329 err = inet6_hash_connect(tcp_death_row, sk); 330 if (err) 331 goto late_failure; 332 333 sk_set_txhash(sk); 334 335 if (likely(!tp->repair)) { 336 if (!tp->write_seq) 337 WRITE_ONCE(tp->write_seq, 338 secure_tcpv6_seq(np->saddr.s6_addr32, 339 sk->sk_v6_daddr.s6_addr32, 340 inet->inet_sport, 341 inet->inet_dport)); |
325 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk), 326 np->saddr.s6_addr32, | 342 tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32, |
327 sk->sk_v6_daddr.s6_addr32); 328 } 329 330 if (tcp_fastopen_defer_connect(sk, &err)) 331 return err; 332 if (err) 333 goto late_failure; 334 --- 501 unchanged lines hidden (view full) --- 836 .init_seq = tcp_v6_init_seq, 837 .init_ts_off = tcp_v6_init_ts_off, 838 .send_synack = tcp_v6_send_synack, 839}; 840 841static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, 842 u32 ack, u32 win, u32 tsval, u32 tsecr, 843 int oif, struct tcp_md5sig_key *key, int rst, | 343 sk->sk_v6_daddr.s6_addr32); 344 } 345 346 if (tcp_fastopen_defer_connect(sk, &err)) 347 return err; 348 if (err) 349 goto late_failure; 350 --- 501 unchanged lines hidden (view full) --- 852 .init_seq = tcp_v6_init_seq, 853 .init_ts_off = tcp_v6_init_ts_off, 854 .send_synack = tcp_v6_send_synack, 855}; 856 857static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, 858 u32 ack, u32 win, u32 tsval, u32 tsecr, 859 int oif, struct tcp_md5sig_key *key, int rst, |
844 u8 tclass, __be32 label, u32 priority) | 860 u8 tclass, __be32 label, u32 priority, u32 txhash) |
845{ 846 const struct tcphdr *th = tcp_hdr(skb); 847 struct tcphdr *t1; 848 struct sk_buff *buff; 849 struct flowi6 fl6; 850 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); 851 struct sock *ctl_sk = net->ipv6.tcp_sk; 852 unsigned int tot_len = sizeof(struct tcphdr); --- 74 unchanged lines hidden (view full) --- 927 else { 928 if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) 929 oif = skb->skb_iif; 930 931 fl6.flowi6_oif = oif; 932 } 933 934 if (sk) { | 861{ 862 const struct tcphdr *th = tcp_hdr(skb); 863 struct tcphdr *t1; 864 struct sk_buff *buff; 865 struct flowi6 fl6; 866 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); 867 struct sock *ctl_sk = net->ipv6.tcp_sk; 868 unsigned int tot_len = sizeof(struct tcphdr); --- 74 unchanged lines hidden (view full) --- 943 else { 944 if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) 945 oif = skb->skb_iif; 946 947 fl6.flowi6_oif = oif; 948 } 949 950 if (sk) { |
935 if (sk->sk_state == TCP_TIME_WAIT) { | 951 if (sk->sk_state == TCP_TIME_WAIT) |
936 mark = inet_twsk(sk)->tw_mark; | 952 mark = inet_twsk(sk)->tw_mark; |
937 /* autoflowlabel relies on buff->hash */ 938 skb_set_hash(buff, inet_twsk(sk)->tw_txhash, 939 PKT_HASH_TYPE_L4); 940 } else { | 953 else |
941 mark = sk->sk_mark; | 954 mark = sk->sk_mark; |
942 } | |
943 skb_set_delivery_time(buff, tcp_transmit_time(sk), true); 944 } | 955 skb_set_delivery_time(buff, tcp_transmit_time(sk), true); 956 } |
957 if (txhash) { 958 /* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */ 959 skb_set_hash(buff, txhash, PKT_HASH_TYPE_L4); 960 } |
|
945 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark; 946 fl6.fl6_dport = t1->dest; 947 fl6.fl6_sport = t1->source; 948 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); 949 security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); 950 951 /* Pass a socket to ip6_dst_lookup either it is for RST 952 * Underlying function will use this to retrieve the network --- 110 unchanged lines hidden (view full) --- 1063 priority = inet_twsk(sk)->tw_priority; 1064 } 1065 } else { 1066 if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET) 1067 label = ip6_flowlabel(ipv6h); 1068 } 1069 1070 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, | 961 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark; 962 fl6.fl6_dport = t1->dest; 963 fl6.fl6_sport = t1->source; 964 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); 965 security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); 966 967 /* Pass a socket to ip6_dst_lookup either it is for RST 968 * Underlying function will use this to retrieve the network --- 110 unchanged lines hidden (view full) --- 1079 priority = inet_twsk(sk)->tw_priority; 1080 } 1081 } else { 1082 if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET) 1083 label = ip6_flowlabel(ipv6h); 1084 } 1085 1086 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, |
1071 ipv6_get_dsfield(ipv6h), label, priority); | 1087 ipv6_get_dsfield(ipv6h), label, priority, 0); |
1072 1073#ifdef CONFIG_TCP_MD5SIG 1074out: 1075 rcu_read_unlock(); 1076#endif 1077} 1078 1079static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, 1080 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, 1081 struct tcp_md5sig_key *key, u8 tclass, | 1088 1089#ifdef CONFIG_TCP_MD5SIG 1090out: 1091 rcu_read_unlock(); 1092#endif 1093} 1094 1095static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, 1096 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, 1097 struct tcp_md5sig_key *key, u8 tclass, |
1082 __be32 label, u32 priority) | 1098 __be32 label, u32 priority, u32 txhash) |
1083{ 1084 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, | 1099{ 1100 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, |
1085 tclass, label, priority); | 1101 tclass, label, priority, txhash); |
1086} 1087 1088static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 1089{ 1090 struct inet_timewait_sock *tw = inet_twsk(sk); 1091 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 1092 1093 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 1094 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 1095 tcp_time_stamp_raw() + tcptw->tw_ts_offset, 1096 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), | 1102} 1103 1104static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 1105{ 1106 struct inet_timewait_sock *tw = inet_twsk(sk); 1107 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 1108 1109 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 1110 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 1111 tcp_time_stamp_raw() + tcptw->tw_ts_offset, 1112 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), |
1097 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority); | 1113 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority, 1114 tw->tw_txhash); |
1098 1099 inet_twsk_put(tw); 1100} 1101 1102static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, 1103 struct request_sock *req) 1104{ 1105 int l3index; --- 10 unchanged lines hidden (view full) --- 1116 */ 1117 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? 1118 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, 1119 tcp_rsk(req)->rcv_nxt, 1120 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, 1121 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, 1122 req->ts_recent, sk->sk_bound_dev_if, 1123 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index), | 1115 1116 inet_twsk_put(tw); 1117} 1118 1119static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, 1120 struct request_sock *req) 1121{ 1122 int l3index; --- 10 unchanged lines hidden (view full) --- 1133 */ 1134 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? 1135 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, 1136 tcp_rsk(req)->rcv_nxt, 1137 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, 1138 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, 1139 req->ts_recent, sk->sk_bound_dev_if, 1140 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index), |
1124 ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority); | 1141 ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority, 1142 tcp_rsk(req)->txhash); |
1125} 1126 1127 1128static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb) 1129{ 1130#ifdef CONFIG_SYN_COOKIES 1131 const struct tcphdr *th = tcp_hdr(skb); 1132 --- 1128 unchanged lines hidden --- | 1143} 1144 1145 1146static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb) 1147{ 1148#ifdef CONFIG_SYN_COOKIES 1149 const struct tcphdr *th = tcp_hdr(skb); 1150 --- 1128 unchanged lines hidden --- |