tcp_ipv6.c (80c1834fc86c2bbacb54a8fc3c04a8b0066b0996) tcp_ipv6.c (46d3ceabd8d98ed0ad10f20c595ca784e34786c5)
1/*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:

--- 263 unchanged lines hidden (view full) ---

272 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
273
274 sk->sk_gso_type = SKB_GSO_TCPV6;
275 __ip6_dst_store(sk, dst, NULL, NULL);
276
277 rt = (struct rt6_info *) dst;
278 if (tcp_death_row.sysctl_tw_recycle &&
279 !tp->rx_opt.ts_recent_stamp &&
1/*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:

--- 263 unchanged lines hidden (view full) ---

272 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
273
274 sk->sk_gso_type = SKB_GSO_TCPV6;
275 __ip6_dst_store(sk, dst, NULL, NULL);
276
277 rt = (struct rt6_info *) dst;
278 if (tcp_death_row.sysctl_tw_recycle &&
279 !tp->rx_opt.ts_recent_stamp &&
280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 struct inet_peer *peer = rt6_get_peer(rt);
282 /*
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
287 */
288 if (peer) {
289 inet_peer_refcheck(peer);
290 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 tp->rx_opt.ts_recent = peer->tcp_ts;
293 }
294 }
295 }
280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
281 tcp_fetch_timewait_stamp(sk, dst);
296
297 icsk->icsk_ext_hdr_len = 0;
298 if (np->opt)
299 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
300 np->opt->opt_nflen);
301
302 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
303

--- 106 unchanged lines hidden (view full) ---

410 if (IS_ERR(dst)) {
411 sk->sk_err_soft = -PTR_ERR(dst);
412 goto out;
413 }
414
415 } else
416 dst_hold(dst);
417
282
283 icsk->icsk_ext_hdr_len = 0;
284 if (np->opt)
285 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
286 np->opt->opt_nflen);
287
288 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
289

--- 106 unchanged lines hidden (view full) ---

396 if (IS_ERR(dst)) {
397 sk->sk_err_soft = -PTR_ERR(dst);
398 goto out;
399 }
400
401 } else
402 dst_hold(dst);
403
404 dst->ops->update_pmtu(dst, ntohl(info));
405
418 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
419 tcp_sync_mss(sk, dst_mtu(dst));
420 tcp_simple_retransmit(sk);
421 } /* else let the usual retransmit timer handle it */
422 dst_release(dst);
423 goto out;
424 }
425

--- 44 unchanged lines hidden (view full) ---

470 sk->sk_err_soft = err;
471
472out:
473 bh_unlock_sock(sk);
474 sock_put(sk);
475}
476
477
406 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
407 tcp_sync_mss(sk, dst_mtu(dst));
408 tcp_simple_retransmit(sk);
409 } /* else let the usual retransmit timer handle it */
410 dst_release(dst);
411 goto out;
412 }
413

--- 44 unchanged lines hidden (view full) ---

458 sk->sk_err_soft = err;
459
460out:
461 bh_unlock_sock(sk);
462 sock_put(sk);
463}
464
465
478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
466static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
467 struct flowi6 *fl6,
468 struct request_sock *req,
479 struct request_values *rvp,
480 u16 queue_mapping)
481{
482 struct inet6_request_sock *treq = inet6_rsk(req);
483 struct ipv6_pinfo *np = inet6_sk(sk);
484 struct sk_buff * skb;
469 struct request_values *rvp,
470 u16 queue_mapping)
471{
472 struct inet6_request_sock *treq = inet6_rsk(req);
473 struct ipv6_pinfo *np = inet6_sk(sk);
474 struct sk_buff * skb;
485 struct ipv6_txoptions *opt = NULL;
486 struct in6_addr * final_p, final;
487 struct flowi6 fl6;
488 struct dst_entry *dst;
489 int err;
475 int err = -ENOMEM;
490
476
491 memset(&fl6, 0, sizeof(fl6));
492 fl6.flowi6_proto = IPPROTO_TCP;
493 fl6.daddr = treq->rmt_addr;
494 fl6.saddr = treq->loc_addr;
495 fl6.flowlabel = 0;
496 fl6.flowi6_oif = treq->iif;
497 fl6.flowi6_mark = sk->sk_mark;
498 fl6.fl6_dport = inet_rsk(req)->rmt_port;
499 fl6.fl6_sport = inet_rsk(req)->loc_port;
500 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
501
502 opt = np->opt;
503 final_p = fl6_update_dst(&fl6, opt, &final);
504
505 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
506 if (IS_ERR(dst)) {
507 err = PTR_ERR(dst);
508 dst = NULL;
477 /* First, grab a route. */
478 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
509 goto done;
479 goto done;
510 }
480
511 skb = tcp_make_synack(sk, dst, req, rvp);
481 skb = tcp_make_synack(sk, dst, req, rvp);
512 err = -ENOMEM;
482
513 if (skb) {
514 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
515
483 if (skb) {
484 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
485
516 fl6.daddr = treq->rmt_addr;
486 fl6->daddr = treq->rmt_addr;
517 skb_set_queue_mapping(skb, queue_mapping);
487 skb_set_queue_mapping(skb, queue_mapping);
518 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
488 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
519 err = net_xmit_eval(err);
520 }
521
522done:
489 err = net_xmit_eval(err);
490 }
491
492done:
523 if (opt && opt != np->opt)
524 sock_kfree_s(sk, opt, opt->tot_len);
525 dst_release(dst);
526 return err;
527}
528
529static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
530 struct request_values *rvp)
531{
493 return err;
494}
495
496static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
497 struct request_values *rvp)
498{
499 struct flowi6 fl6;
500
532 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
501 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
533 return tcp_v6_send_synack(sk, req, rvp, 0);
502 return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
534}
535
536static void tcp_v6_reqsk_destructor(struct request_sock *req)
537{
538 kfree_skb(inet6_rsk(req)->pktopts);
539}
540
541#ifdef CONFIG_TCP_MD5SIG

--- 510 unchanged lines hidden (view full) ---

1052 struct tcp_options_received tmp_opt;
1053 const u8 *hash_location;
1054 struct request_sock *req;
1055 struct inet6_request_sock *treq;
1056 struct ipv6_pinfo *np = inet6_sk(sk);
1057 struct tcp_sock *tp = tcp_sk(sk);
1058 __u32 isn = TCP_SKB_CB(skb)->when;
1059 struct dst_entry *dst = NULL;
503}
504
505static void tcp_v6_reqsk_destructor(struct request_sock *req)
506{
507 kfree_skb(inet6_rsk(req)->pktopts);
508}
509
510#ifdef CONFIG_TCP_MD5SIG

--- 510 unchanged lines hidden (view full) ---

1021 struct tcp_options_received tmp_opt;
1022 const u8 *hash_location;
1023 struct request_sock *req;
1024 struct inet6_request_sock *treq;
1025 struct ipv6_pinfo *np = inet6_sk(sk);
1026 struct tcp_sock *tp = tcp_sk(sk);
1027 __u32 isn = TCP_SKB_CB(skb)->when;
1028 struct dst_entry *dst = NULL;
1029 struct flowi6 fl6;
1060 bool want_cookie = false;
1061
1062 if (skb->protocol == htons(ETH_P_IP))
1063 return tcp_v4_conn_request(sk, skb);
1064
1065 if (!ipv6_unicast_destination(skb))
1066 goto drop;
1067

--- 77 unchanged lines hidden (view full) ---

1145 treq->iif = sk->sk_bound_dev_if;
1146
1147 /* So that link locals have meaning */
1148 if (!sk->sk_bound_dev_if &&
1149 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1150 treq->iif = inet6_iif(skb);
1151
1152 if (!isn) {
1030 bool want_cookie = false;
1031
1032 if (skb->protocol == htons(ETH_P_IP))
1033 return tcp_v4_conn_request(sk, skb);
1034
1035 if (!ipv6_unicast_destination(skb))
1036 goto drop;
1037

--- 77 unchanged lines hidden (view full) ---

1115 treq->iif = sk->sk_bound_dev_if;
1116
1117 /* So that link locals have meaning */
1118 if (!sk->sk_bound_dev_if &&
1119 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1120 treq->iif = inet6_iif(skb);
1121
1122 if (!isn) {
1153 struct inet_peer *peer = NULL;
1154
1155 if (ipv6_opt_accepted(sk, skb) ||
1156 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1157 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1158 atomic_inc(&skb->users);
1159 treq->pktopts = skb;
1160 }
1161
1162 if (want_cookie) {

--- 8 unchanged lines hidden (view full) ---

1171 * accepting new connection request.
1172 *
1173 * If "isn" is not zero, this request hit alive
1174 * timewait bucket, so that all the necessary checks
1175 * are made in the function processing timewait state.
1176 */
1177 if (tmp_opt.saw_tstamp &&
1178 tcp_death_row.sysctl_tw_recycle &&
1123 if (ipv6_opt_accepted(sk, skb) ||
1124 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1125 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1126 atomic_inc(&skb->users);
1127 treq->pktopts = skb;
1128 }
1129
1130 if (want_cookie) {

--- 8 unchanged lines hidden (view full) ---

1139 * accepting new connection request.
1140 *
1141 * If "isn" is not zero, this request hit alive
1142 * timewait bucket, so that all the necessary checks
1143 * are made in the function processing timewait state.
1144 */
1145 if (tmp_opt.saw_tstamp &&
1146 tcp_death_row.sysctl_tw_recycle &&
1179 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1180 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1181 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1182 &treq->rmt_addr)) {
1183 inet_peer_refcheck(peer);
1184 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1185 (s32)(peer->tcp_ts - req->ts_recent) >
1186 TCP_PAWS_WINDOW) {
1147 (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1148 if (!tcp_peer_is_proven(req, dst, true)) {
1187 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1188 goto drop_and_release;
1189 }
1190 }
1191 /* Kill the following clause, if you dislike this way. */
1192 else if (!sysctl_tcp_syncookies &&
1193 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1194 (sysctl_max_syn_backlog >> 2)) &&
1149 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1150 goto drop_and_release;
1151 }
1152 }
1153 /* Kill the following clause, if you dislike this way. */
1154 else if (!sysctl_tcp_syncookies &&
1155 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1156 (sysctl_max_syn_backlog >> 2)) &&
1195 (!peer || !peer->tcp_ts_stamp) &&
1196 (!dst || !dst_metric(dst, RTAX_RTT))) {
1157 !tcp_peer_is_proven(req, dst, false)) {
1197 /* Without syncookies last quarter of
1198 * backlog is filled with destinations,
1199 * proven to be alive.
1200 * It means that we continue to communicate
1201 * to destinations, already remembered
1202 * to the moment of synflood.
1203 */
1204 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",

--- 5 unchanged lines hidden (view full) ---

1210 }
1211have_isn:
1212 tcp_rsk(req)->snt_isn = isn;
1213 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1214
1215 if (security_inet_conn_request(sk, skb, req))
1216 goto drop_and_release;
1217
1158 /* Without syncookies last quarter of
1159 * backlog is filled with destinations,
1160 * proven to be alive.
1161 * It means that we continue to communicate
1162 * to destinations, already remembered
1163 * to the moment of synflood.
1164 */
1165 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",

--- 5 unchanged lines hidden (view full) ---

1171 }
1172have_isn:
1173 tcp_rsk(req)->snt_isn = isn;
1174 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1175
1176 if (security_inet_conn_request(sk, skb, req))
1177 goto drop_and_release;
1178
1218 if (tcp_v6_send_synack(sk, req,
1179 if (tcp_v6_send_synack(sk, dst, &fl6, req,
1219 (struct request_values *)&tmp_ext,
1220 skb_get_queue_mapping(skb)) ||
1221 want_cookie)
1222 goto drop_and_free;
1223
1224 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1225 return 0;
1226

--- 10 unchanged lines hidden (view full) ---

1237 struct dst_entry *dst)
1238{
1239 struct inet6_request_sock *treq;
1240 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1241 struct tcp6_sock *newtcp6sk;
1242 struct inet_sock *newinet;
1243 struct tcp_sock *newtp;
1244 struct sock *newsk;
1180 (struct request_values *)&tmp_ext,
1181 skb_get_queue_mapping(skb)) ||
1182 want_cookie)
1183 goto drop_and_free;
1184
1185 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1186 return 0;
1187

--- 10 unchanged lines hidden (view full) ---

1198 struct dst_entry *dst)
1199{
1200 struct inet6_request_sock *treq;
1201 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1202 struct tcp6_sock *newtcp6sk;
1203 struct inet_sock *newinet;
1204 struct tcp_sock *newtp;
1205 struct sock *newsk;
1245 struct ipv6_txoptions *opt;
1246#ifdef CONFIG_TCP_MD5SIG
1247 struct tcp_md5sig_key *key;
1248#endif
1206#ifdef CONFIG_TCP_MD5SIG
1207 struct tcp_md5sig_key *key;
1208#endif
1209 struct flowi6 fl6;
1249
1250 if (skb->protocol == htons(ETH_P_IP)) {
1251 /*
1252 * v6 mapped
1253 */
1254
1255 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1256

--- 40 unchanged lines hidden (view full) ---

1297 Sync it now.
1298 */
1299 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1300
1301 return newsk;
1302 }
1303
1304 treq = inet6_rsk(req);
1210
1211 if (skb->protocol == htons(ETH_P_IP)) {
1212 /*
1213 * v6 mapped
1214 */
1215
1216 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1217

--- 40 unchanged lines hidden (view full) ---

1258 Sync it now.
1259 */
1260 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1261
1262 return newsk;
1263 }
1264
1265 treq = inet6_rsk(req);
1305 opt = np->opt;
1306
1307 if (sk_acceptq_is_full(sk))
1308 goto out_overflow;
1309
1310 if (!dst) {
1266
1267 if (sk_acceptq_is_full(sk))
1268 goto out_overflow;
1269
1270 if (!dst) {
1311 dst = inet6_csk_route_req(sk, req);
1271 dst = inet6_csk_route_req(sk, &fl6, req);
1312 if (!dst)
1313 goto out;
1314 }
1315
1316 newsk = tcp_create_openreq_child(sk, req, skb);
1317 if (newsk == NULL)
1318 goto out_nonewsk;
1319

--- 46 unchanged lines hidden (view full) ---

1366 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1367
1368 /* Clone native IPv6 options from listening socket (if any)
1369
1370 Yes, keeping reference count would be much more clever,
1371 but we make one more one thing there: reattach optmem
1372 to newsk.
1373 */
1272 if (!dst)
1273 goto out;
1274 }
1275
1276 newsk = tcp_create_openreq_child(sk, req, skb);
1277 if (newsk == NULL)
1278 goto out_nonewsk;
1279

--- 46 unchanged lines hidden (view full) ---

1326 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1327
1328 /* Clone native IPv6 options from listening socket (if any)
1329
1330 Yes, keeping reference count would be much more clever,
1331 but we make one more one thing there: reattach optmem
1332 to newsk.
1333 */
1374 if (opt) {
1375 newnp->opt = ipv6_dup_options(newsk, opt);
1376 if (opt != np->opt)
1377 sock_kfree_s(sk, opt, opt->tot_len);
1378 }
1334 if (np->opt)
1335 newnp->opt = ipv6_dup_options(newsk, np->opt);
1379
1380 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1381 if (newnp->opt)
1382 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1383 newnp->opt->opt_flen);
1384
1385 tcp_mtup_init(newsk);
1386 tcp_sync_mss(newsk, dst_mtu(dst));

--- 30 unchanged lines hidden (view full) ---

1417 }
1418 __inet6_hash(newsk, NULL);
1419
1420 return newsk;
1421
1422out_overflow:
1423 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1424out_nonewsk:
1336
1337 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1338 if (newnp->opt)
1339 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1340 newnp->opt->opt_flen);
1341
1342 tcp_mtup_init(newsk);
1343 tcp_sync_mss(newsk, dst_mtu(dst));

--- 30 unchanged lines hidden (view full) ---

1374 }
1375 __inet6_hash(newsk, NULL);
1376
1377 return newsk;
1378
1379out_overflow:
1380 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1381out_nonewsk:
1425 if (opt && opt != np->opt)
1426 sock_kfree_s(sk, opt, opt->tot_len);
1427 dst_release(dst);
1428out:
1429 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1430 return NULL;
1431}
1432
1433static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1434{

--- 294 unchanged lines hidden (view full) ---

1729 break;
1730 case TCP_TW_RST:
1731 goto no_tcp_socket;
1732 case TCP_TW_SUCCESS:;
1733 }
1734 goto discard_it;
1735}
1736
1382 dst_release(dst);
1383out:
1384 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1385 return NULL;
1386}
1387
1388static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1389{

--- 294 unchanged lines hidden (view full) ---

1684 break;
1685 case TCP_TW_RST:
1686 goto no_tcp_socket;
1687 case TCP_TW_SUCCESS:;
1688 }
1689 goto discard_it;
1690}
1691
1737static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1738{
1739 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1740 struct ipv6_pinfo *np = inet6_sk(sk);
1741 struct inet_peer *peer;
1742
1743 if (!rt ||
1744 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1745 peer = inet_getpeer_v6(&np->daddr, 1);
1746 *release_it = true;
1747 } else {
1748 if (!rt->rt6i_peer)
1749 rt6_bind_peer(rt, 1);
1750 peer = rt->rt6i_peer;
1751 *release_it = false;
1752 }
1753
1754 return peer;
1755}
1756
1757static void *tcp_v6_tw_get_peer(struct sock *sk)
1758{
1759 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1760 const struct inet_timewait_sock *tw = inet_twsk(sk);
1761
1762 if (tw->tw_family == AF_INET)
1763 return tcp_v4_tw_get_peer(sk);
1764
1765 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1766}
1767
1768static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1769 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1770 .twsk_unique = tcp_twsk_unique,
1771 .twsk_destructor= tcp_twsk_destructor,
1692static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1693 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1694 .twsk_unique = tcp_twsk_unique,
1695 .twsk_destructor= tcp_twsk_destructor,
1772 .twsk_getpeer = tcp_v6_tw_get_peer,
1773};
1774
1775static const struct inet_connection_sock_af_ops ipv6_specific = {
1776 .queue_xmit = inet6_csk_xmit,
1777 .send_check = tcp_v6_send_check,
1778 .rebuild_header = inet6_sk_rebuild_header,
1779 .conn_request = tcp_v6_conn_request,
1780 .syn_recv_sock = tcp_v6_syn_recv_sock,
1696};
1697
1698static const struct inet_connection_sock_af_ops ipv6_specific = {
1699 .queue_xmit = inet6_csk_xmit,
1700 .send_check = tcp_v6_send_check,
1701 .rebuild_header = inet6_sk_rebuild_header,
1702 .conn_request = tcp_v6_conn_request,
1703 .syn_recv_sock = tcp_v6_syn_recv_sock,
1781 .get_peer = tcp_v6_get_peer,
1782 .net_header_len = sizeof(struct ipv6hdr),
1783 .net_frag_header_len = sizeof(struct frag_hdr),
1784 .setsockopt = ipv6_setsockopt,
1785 .getsockopt = ipv6_getsockopt,
1786 .addr2sockaddr = inet6_csk_addr2sockaddr,
1787 .sockaddr_len = sizeof(struct sockaddr_in6),
1788 .bind_conflict = inet6_csk_bind_conflict,
1789#ifdef CONFIG_COMPAT

--- 15 unchanged lines hidden (view full) ---

1805 */
1806
1807static const struct inet_connection_sock_af_ops ipv6_mapped = {
1808 .queue_xmit = ip_queue_xmit,
1809 .send_check = tcp_v4_send_check,
1810 .rebuild_header = inet_sk_rebuild_header,
1811 .conn_request = tcp_v6_conn_request,
1812 .syn_recv_sock = tcp_v6_syn_recv_sock,
1704 .net_header_len = sizeof(struct ipv6hdr),
1705 .net_frag_header_len = sizeof(struct frag_hdr),
1706 .setsockopt = ipv6_setsockopt,
1707 .getsockopt = ipv6_getsockopt,
1708 .addr2sockaddr = inet6_csk_addr2sockaddr,
1709 .sockaddr_len = sizeof(struct sockaddr_in6),
1710 .bind_conflict = inet6_csk_bind_conflict,
1711#ifdef CONFIG_COMPAT

--- 15 unchanged lines hidden (view full) ---

1727 */
1728
1729static const struct inet_connection_sock_af_ops ipv6_mapped = {
1730 .queue_xmit = ip_queue_xmit,
1731 .send_check = tcp_v4_send_check,
1732 .rebuild_header = inet_sk_rebuild_header,
1733 .conn_request = tcp_v6_conn_request,
1734 .syn_recv_sock = tcp_v6_syn_recv_sock,
1813 .get_peer = tcp_v4_get_peer,
1814 .net_header_len = sizeof(struct iphdr),
1815 .setsockopt = ipv6_setsockopt,
1816 .getsockopt = ipv6_getsockopt,
1817 .addr2sockaddr = inet6_csk_addr2sockaddr,
1818 .sockaddr_len = sizeof(struct sockaddr_in6),
1819 .bind_conflict = inet6_csk_bind_conflict,
1820#ifdef CONFIG_COMPAT
1821 .compat_setsockopt = compat_ipv6_setsockopt,

--- 222 unchanged lines hidden (view full) ---

2044 .destroy = tcp_v6_destroy_sock,
2045 .shutdown = tcp_shutdown,
2046 .setsockopt = tcp_setsockopt,
2047 .getsockopt = tcp_getsockopt,
2048 .recvmsg = tcp_recvmsg,
2049 .sendmsg = tcp_sendmsg,
2050 .sendpage = tcp_sendpage,
2051 .backlog_rcv = tcp_v6_do_rcv,
1735 .net_header_len = sizeof(struct iphdr),
1736 .setsockopt = ipv6_setsockopt,
1737 .getsockopt = ipv6_getsockopt,
1738 .addr2sockaddr = inet6_csk_addr2sockaddr,
1739 .sockaddr_len = sizeof(struct sockaddr_in6),
1740 .bind_conflict = inet6_csk_bind_conflict,
1741#ifdef CONFIG_COMPAT
1742 .compat_setsockopt = compat_ipv6_setsockopt,

--- 222 unchanged lines hidden (view full) ---

1965 .destroy = tcp_v6_destroy_sock,
1966 .shutdown = tcp_shutdown,
1967 .setsockopt = tcp_setsockopt,
1968 .getsockopt = tcp_getsockopt,
1969 .recvmsg = tcp_recvmsg,
1970 .sendmsg = tcp_sendmsg,
1971 .sendpage = tcp_sendpage,
1972 .backlog_rcv = tcp_v6_do_rcv,
1973 .release_cb = tcp_release_cb,
2052 .hash = tcp_v6_hash,
2053 .unhash = inet_unhash,
2054 .get_port = inet_csk_get_port,
2055 .enter_memory_pressure = tcp_enter_memory_pressure,
2056 .sockets_allocated = &tcp_sockets_allocated,
2057 .memory_allocated = &tcp_memory_allocated,
2058 .memory_pressure = &tcp_memory_pressure,
2059 .orphan_count = &tcp_orphan_count,

--- 92 unchanged lines hidden ---
1974 .hash = tcp_v6_hash,
1975 .unhash = inet_unhash,
1976 .get_port = inet_csk_get_port,
1977 .enter_memory_pressure = tcp_enter_memory_pressure,
1978 .sockets_allocated = &tcp_sockets_allocated,
1979 .memory_allocated = &tcp_memory_allocated,
1980 .memory_pressure = &tcp_memory_pressure,
1981 .orphan_count = &tcp_orphan_count,

--- 92 unchanged lines hidden ---