1 /* 2 * TCP over IPv6 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on: 9 * linux/net/ipv4/tcp.c 10 * linux/net/ipv4/tcp_input.c 11 * linux/net/ipv4/tcp_output.c 12 * 13 * Fixes: 14 * Hideaki YOSHIFUJI : sin6_scope_id support 15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 17 * a single port at the same time. 18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file. 19 * 20 * This program is free software; you can redistribute it and/or 21 * modify it under the terms of the GNU General Public License 22 * as published by the Free Software Foundation; either version 23 * 2 of the License, or (at your option) any later version. 24 */ 25 26 #include <linux/bottom_half.h> 27 #include <linux/module.h> 28 #include <linux/errno.h> 29 #include <linux/types.h> 30 #include <linux/socket.h> 31 #include <linux/sockios.h> 32 #include <linux/net.h> 33 #include <linux/jiffies.h> 34 #include <linux/in.h> 35 #include <linux/in6.h> 36 #include <linux/netdevice.h> 37 #include <linux/init.h> 38 #include <linux/jhash.h> 39 #include <linux/ipsec.h> 40 #include <linux/times.h> 41 #include <linux/slab.h> 42 #include <linux/uaccess.h> 43 #include <linux/ipv6.h> 44 #include <linux/icmpv6.h> 45 #include <linux/random.h> 46 47 #include <net/tcp.h> 48 #include <net/ndisc.h> 49 #include <net/inet6_hashtables.h> 50 #include <net/inet6_connection_sock.h> 51 #include <net/ipv6.h> 52 #include <net/transp_v6.h> 53 #include <net/addrconf.h> 54 #include <net/ip6_route.h> 55 #include <net/ip6_checksum.h> 56 #include <net/inet_ecn.h> 57 #include <net/protocol.h> 58 #include <net/xfrm.h> 59 #include <net/snmp.h> 60 #include <net/dsfield.h> 61 #include <net/timewait_sock.h> 62 #include <net/inet_common.h> 63 #include <net/secure_seq.h> 64 #include <net/busy_poll.h> 65 66 #include <linux/proc_fs.h> 67 #include <linux/seq_file.h> 68 69 #include <crypto/hash.h> 70 #include <linux/scatterlist.h> 71 72 #include <trace/events/tcp.h> 73 74 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb); 75 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, 76 struct request_sock *req); 77 78 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 79 80 static const struct inet_connection_sock_af_ops ipv6_mapped; 81 static const struct inet_connection_sock_af_ops ipv6_specific; 82 #ifdef CONFIG_TCP_MD5SIG 83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; 84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; 85 #else 86 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, 87 const struct in6_addr *addr) 88 { 89 return NULL; 90 } 91 #endif 92 93 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) 94 { 95 struct dst_entry *dst = skb_dst(skb); 96 97 if (dst && dst_hold_safe(dst)) { 98 const struct rt6_info *rt = (const struct rt6_info *)dst; 99 100 sk->sk_rx_dst = dst; 101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; 102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); 103 } 104 } 105 106 static u32 tcp_v6_init_seq(const struct sk_buff *skb) 107 { 108 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32, 109 ipv6_hdr(skb)->saddr.s6_addr32, 110 tcp_hdr(skb)->dest, 111 tcp_hdr(skb)->source); 112 } 113 114 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb) 115 { 116 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32, 117 ipv6_hdr(skb)->saddr.s6_addr32); 118 } 119 120 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 121 int addr_len) 122 { 123 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 124 struct inet_sock *inet = inet_sk(sk); 125 struct inet_connection_sock *icsk = inet_csk(sk); 126 struct ipv6_pinfo *np = inet6_sk(sk); 127 struct tcp_sock *tp = tcp_sk(sk); 128 struct in6_addr *saddr = NULL, *final_p, final; 129 struct ipv6_txoptions *opt; 130 struct flowi6 fl6; 131 struct dst_entry *dst; 132 int addr_type; 133 int err; 134 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; 135 136 if (addr_len < SIN6_LEN_RFC2133) 137 return -EINVAL; 138 139 if (usin->sin6_family != AF_INET6) 140 return -EAFNOSUPPORT; 141 142 memset(&fl6, 0, sizeof(fl6)); 143 144 if (np->sndflow) { 145 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; 146 IP6_ECN_flow_init(fl6.flowlabel); 147 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { 148 struct ip6_flowlabel *flowlabel; 149 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 150 if (!flowlabel) 151 return -EINVAL; 152 fl6_sock_release(flowlabel); 153 } 154 } 155 156 /* 157 * connect() to INADDR_ANY means loopback (BSD'ism). 158 */ 159 160 if (ipv6_addr_any(&usin->sin6_addr)) { 161 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) 162 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 163 &usin->sin6_addr); 164 else 165 usin->sin6_addr = in6addr_loopback; 166 } 167 168 addr_type = ipv6_addr_type(&usin->sin6_addr); 169 170 if (addr_type & IPV6_ADDR_MULTICAST) 171 return -ENETUNREACH; 172 173 if (addr_type&IPV6_ADDR_LINKLOCAL) { 174 if (addr_len >= sizeof(struct sockaddr_in6) && 175 usin->sin6_scope_id) { 176 /* If interface is set while binding, indices 177 * must coincide. 178 */ 179 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id)) 180 return -EINVAL; 181 182 sk->sk_bound_dev_if = usin->sin6_scope_id; 183 } 184 185 /* Connect to link-local address requires an interface */ 186 if (!sk->sk_bound_dev_if) 187 return -EINVAL; 188 } 189 190 if (tp->rx_opt.ts_recent_stamp && 191 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) { 192 tp->rx_opt.ts_recent = 0; 193 tp->rx_opt.ts_recent_stamp = 0; 194 tp->write_seq = 0; 195 } 196 197 sk->sk_v6_daddr = usin->sin6_addr; 198 np->flow_label = fl6.flowlabel; 199 200 /* 201 * TCP over IPv4 202 */ 203 204 if (addr_type & IPV6_ADDR_MAPPED) { 205 u32 exthdrlen = icsk->icsk_ext_hdr_len; 206 struct sockaddr_in sin; 207 208 SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); 209 210 if (__ipv6_only_sock(sk)) 211 return -ENETUNREACH; 212 213 sin.sin_family = AF_INET; 214 sin.sin_port = usin->sin6_port; 215 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; 216 217 icsk->icsk_af_ops = &ipv6_mapped; 218 sk->sk_backlog_rcv = tcp_v4_do_rcv; 219 #ifdef CONFIG_TCP_MD5SIG 220 tp->af_specific = &tcp_sock_ipv6_mapped_specific; 221 #endif 222 223 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); 224 225 if (err) { 226 icsk->icsk_ext_hdr_len = exthdrlen; 227 icsk->icsk_af_ops = &ipv6_specific; 228 sk->sk_backlog_rcv = tcp_v6_do_rcv; 229 #ifdef CONFIG_TCP_MD5SIG 230 tp->af_specific = &tcp_sock_ipv6_specific; 231 #endif 232 goto failure; 233 } 234 np->saddr = sk->sk_v6_rcv_saddr; 235 236 return err; 237 } 238 239 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) 240 saddr = &sk->sk_v6_rcv_saddr; 241 242 fl6.flowi6_proto = IPPROTO_TCP; 243 fl6.daddr = sk->sk_v6_daddr; 244 fl6.saddr = saddr ? *saddr : np->saddr; 245 fl6.flowi6_oif = sk->sk_bound_dev_if; 246 fl6.flowi6_mark = sk->sk_mark; 247 fl6.fl6_dport = usin->sin6_port; 248 fl6.fl6_sport = inet->inet_sport; 249 fl6.flowi6_uid = sk->sk_uid; 250 251 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); 252 final_p = fl6_update_dst(&fl6, opt, &final); 253 254 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 255 256 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 257 if (IS_ERR(dst)) { 258 err = PTR_ERR(dst); 259 goto failure; 260 } 261 262 if (!saddr) { 263 saddr = &fl6.saddr; 264 sk->sk_v6_rcv_saddr = *saddr; 265 } 266 267 /* set the source address */ 268 np->saddr = *saddr; 269 inet->inet_rcv_saddr = LOOPBACK4_IPV6; 270 271 sk->sk_gso_type = SKB_GSO_TCPV6; 272 ip6_dst_store(sk, dst, NULL, NULL); 273 274 icsk->icsk_ext_hdr_len = 0; 275 if (opt) 276 icsk->icsk_ext_hdr_len = opt->opt_flen + 277 opt->opt_nflen; 278 279 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 280 281 inet->inet_dport = usin->sin6_port; 282 283 tcp_set_state(sk, TCP_SYN_SENT); 284 err = inet6_hash_connect(tcp_death_row, sk); 285 if (err) 286 goto late_failure; 287 288 sk_set_txhash(sk); 289 290 if (likely(!tp->repair)) { 291 if (!tp->write_seq) 292 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32, 293 sk->sk_v6_daddr.s6_addr32, 294 inet->inet_sport, 295 inet->inet_dport); 296 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk), 297 np->saddr.s6_addr32, 298 sk->sk_v6_daddr.s6_addr32); 299 } 300 301 if (tcp_fastopen_defer_connect(sk, &err)) 302 return err; 303 if (err) 304 goto late_failure; 305 306 err = tcp_connect(sk); 307 if (err) 308 goto late_failure; 309 310 return 0; 311 312 late_failure: 313 tcp_set_state(sk, TCP_CLOSE); 314 failure: 315 inet->inet_dport = 0; 316 sk->sk_route_caps = 0; 317 return err; 318 } 319 320 static void tcp_v6_mtu_reduced(struct sock *sk) 321 { 322 struct dst_entry *dst; 323 324 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) 325 return; 326 327 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info); 328 if (!dst) 329 return; 330 331 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { 332 tcp_sync_mss(sk, dst_mtu(dst)); 333 tcp_simple_retransmit(sk); 334 } 335 } 336 337 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 338 u8 type, u8 code, int offset, __be32 info) 339 { 340 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 341 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); 342 struct net *net = dev_net(skb->dev); 343 struct request_sock *fastopen; 344 struct ipv6_pinfo *np; 345 struct tcp_sock *tp; 346 __u32 seq, snd_una; 347 struct sock *sk; 348 bool fatal; 349 int err; 350 351 sk = __inet6_lookup_established(net, &tcp_hashinfo, 352 &hdr->daddr, th->dest, 353 &hdr->saddr, ntohs(th->source), 354 skb->dev->ifindex, inet6_sdif(skb)); 355 356 if (!sk) { 357 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 358 ICMP6_MIB_INERRORS); 359 return; 360 } 361 362 if (sk->sk_state == TCP_TIME_WAIT) { 363 inet_twsk_put(inet_twsk(sk)); 364 return; 365 } 366 seq = ntohl(th->seq); 367 fatal = icmpv6_err_convert(type, code, &err); 368 if (sk->sk_state == TCP_NEW_SYN_RECV) 369 return tcp_req_err(sk, seq, fatal); 370 371 bh_lock_sock(sk); 372 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) 373 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); 374 375 if (sk->sk_state == TCP_CLOSE) 376 goto out; 377 378 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { 379 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); 380 goto out; 381 } 382 383 tp = tcp_sk(sk); 384 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ 385 fastopen = tp->fastopen_rsk; 386 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; 387 if (sk->sk_state != TCP_LISTEN && 388 !between(seq, snd_una, tp->snd_nxt)) { 389 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); 390 goto out; 391 } 392 393 np = inet6_sk(sk); 394 395 if (type == NDISC_REDIRECT) { 396 if (!sock_owned_by_user(sk)) { 397 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); 398 399 if (dst) 400 dst->ops->redirect(dst, sk, skb); 401 } 402 goto out; 403 } 404 405 if (type == ICMPV6_PKT_TOOBIG) { 406 /* We are not interested in TCP_LISTEN and open_requests 407 * (SYN-ACKs send out by Linux are always <576bytes so 408 * they should go through unfragmented). 409 */ 410 if (sk->sk_state == TCP_LISTEN) 411 goto out; 412 413 if (!ip6_sk_accept_pmtu(sk)) 414 goto out; 415 416 tp->mtu_info = ntohl(info); 417 if (!sock_owned_by_user(sk)) 418 tcp_v6_mtu_reduced(sk); 419 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, 420 &sk->sk_tsq_flags)) 421 sock_hold(sk); 422 goto out; 423 } 424 425 426 /* Might be for an request_sock */ 427 switch (sk->sk_state) { 428 case TCP_SYN_SENT: 429 case TCP_SYN_RECV: 430 /* Only in fast or simultaneous open. If a fast open socket is 431 * is already accepted it is treated as a connected one below. 432 */ 433 if (fastopen && !fastopen->sk) 434 break; 435 436 if (!sock_owned_by_user(sk)) { 437 sk->sk_err = err; 438 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 439 440 tcp_done(sk); 441 } else 442 sk->sk_err_soft = err; 443 goto out; 444 } 445 446 if (!sock_owned_by_user(sk) && np->recverr) { 447 sk->sk_err = err; 448 sk->sk_error_report(sk); 449 } else 450 sk->sk_err_soft = err; 451 452 out: 453 bh_unlock_sock(sk); 454 sock_put(sk); 455 } 456 457 458 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, 459 struct flowi *fl, 460 struct request_sock *req, 461 struct tcp_fastopen_cookie *foc, 462 enum tcp_synack_type synack_type) 463 { 464 struct inet_request_sock *ireq = inet_rsk(req); 465 struct ipv6_pinfo *np = inet6_sk(sk); 466 struct ipv6_txoptions *opt; 467 struct flowi6 *fl6 = &fl->u.ip6; 468 struct sk_buff *skb; 469 int err = -ENOMEM; 470 471 /* First, grab a route. */ 472 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req, 473 IPPROTO_TCP)) == NULL) 474 goto done; 475 476 skb = tcp_make_synack(sk, dst, req, foc, synack_type); 477 478 if (skb) { 479 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, 480 &ireq->ir_v6_rmt_addr); 481 482 fl6->daddr = ireq->ir_v6_rmt_addr; 483 if (np->repflow && ireq->pktopts) 484 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); 485 486 rcu_read_lock(); 487 opt = ireq->ipv6_opt; 488 if (!opt) 489 opt = rcu_dereference(np->opt); 490 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass); 491 rcu_read_unlock(); 492 err = net_xmit_eval(err); 493 } 494 495 done: 496 return err; 497 } 498 499 500 static void tcp_v6_reqsk_destructor(struct request_sock *req) 501 { 502 kfree(inet_rsk(req)->ipv6_opt); 503 kfree_skb(inet_rsk(req)->pktopts); 504 } 505 506 #ifdef CONFIG_TCP_MD5SIG 507 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, 508 const struct in6_addr *addr) 509 { 510 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6); 511 } 512 513 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk, 514 const struct sock *addr_sk) 515 { 516 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr); 517 } 518 519 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname, 520 char __user *optval, int optlen) 521 { 522 struct tcp_md5sig cmd; 523 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; 524 u8 prefixlen; 525 526 if (optlen < sizeof(cmd)) 527 return -EINVAL; 528 529 if (copy_from_user(&cmd, optval, sizeof(cmd))) 530 return -EFAULT; 531 532 if (sin6->sin6_family != AF_INET6) 533 return -EINVAL; 534 535 if (optname == TCP_MD5SIG_EXT && 536 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) { 537 prefixlen = cmd.tcpm_prefixlen; 538 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) && 539 prefixlen > 32)) 540 return -EINVAL; 541 } else { 542 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128; 543 } 544 545 if (!cmd.tcpm_keylen) { 546 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) 547 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], 548 AF_INET, prefixlen); 549 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr, 550 AF_INET6, prefixlen); 551 } 552 553 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) 554 return -EINVAL; 555 556 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) 557 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], 558 AF_INET, prefixlen, cmd.tcpm_key, 559 cmd.tcpm_keylen, GFP_KERNEL); 560 561 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr, 562 AF_INET6, prefixlen, cmd.tcpm_key, 563 cmd.tcpm_keylen, GFP_KERNEL); 564 } 565 566 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp, 567 const struct in6_addr *daddr, 568 const struct in6_addr *saddr, 569 const struct tcphdr *th, int nbytes) 570 { 571 struct tcp6_pseudohdr *bp; 572 struct scatterlist sg; 573 struct tcphdr *_th; 574 575 bp = hp->scratch; 576 /* 1. TCP pseudo-header (RFC2460) */ 577 bp->saddr = *saddr; 578 bp->daddr = *daddr; 579 bp->protocol = cpu_to_be32(IPPROTO_TCP); 580 bp->len = cpu_to_be32(nbytes); 581 582 _th = (struct tcphdr *)(bp + 1); 583 memcpy(_th, th, sizeof(*th)); 584 _th->check = 0; 585 586 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th)); 587 ahash_request_set_crypt(hp->md5_req, &sg, NULL, 588 sizeof(*bp) + sizeof(*th)); 589 return crypto_ahash_update(hp->md5_req); 590 } 591 592 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, 593 const struct in6_addr *daddr, struct in6_addr *saddr, 594 const struct tcphdr *th) 595 { 596 struct tcp_md5sig_pool *hp; 597 struct ahash_request *req; 598 599 hp = tcp_get_md5sig_pool(); 600 if (!hp) 601 goto clear_hash_noput; 602 req = hp->md5_req; 603 604 if (crypto_ahash_init(req)) 605 goto clear_hash; 606 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2)) 607 goto clear_hash; 608 if (tcp_md5_hash_key(hp, key)) 609 goto clear_hash; 610 ahash_request_set_crypt(req, NULL, md5_hash, 0); 611 if (crypto_ahash_final(req)) 612 goto clear_hash; 613 614 tcp_put_md5sig_pool(); 615 return 0; 616 617 clear_hash: 618 tcp_put_md5sig_pool(); 619 clear_hash_noput: 620 memset(md5_hash, 0, 16); 621 return 1; 622 } 623 624 static int tcp_v6_md5_hash_skb(char *md5_hash, 625 const struct tcp_md5sig_key *key, 626 const struct sock *sk, 627 const struct sk_buff *skb) 628 { 629 const struct in6_addr *saddr, *daddr; 630 struct tcp_md5sig_pool *hp; 631 struct ahash_request *req; 632 const struct tcphdr *th = tcp_hdr(skb); 633 634 if (sk) { /* valid for establish/request sockets */ 635 saddr = &sk->sk_v6_rcv_saddr; 636 daddr = &sk->sk_v6_daddr; 637 } else { 638 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 639 saddr = &ip6h->saddr; 640 daddr = &ip6h->daddr; 641 } 642 643 hp = tcp_get_md5sig_pool(); 644 if (!hp) 645 goto clear_hash_noput; 646 req = hp->md5_req; 647 648 if (crypto_ahash_init(req)) 649 goto clear_hash; 650 651 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len)) 652 goto clear_hash; 653 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) 654 goto clear_hash; 655 if (tcp_md5_hash_key(hp, key)) 656 goto clear_hash; 657 ahash_request_set_crypt(req, NULL, md5_hash, 0); 658 if (crypto_ahash_final(req)) 659 goto clear_hash; 660 661 tcp_put_md5sig_pool(); 662 return 0; 663 664 clear_hash: 665 tcp_put_md5sig_pool(); 666 clear_hash_noput: 667 memset(md5_hash, 0, 16); 668 return 1; 669 } 670 671 #endif 672 673 static bool tcp_v6_inbound_md5_hash(const struct sock *sk, 674 const struct sk_buff *skb) 675 { 676 #ifdef CONFIG_TCP_MD5SIG 677 const __u8 *hash_location = NULL; 678 struct tcp_md5sig_key *hash_expected; 679 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 680 const struct tcphdr *th = tcp_hdr(skb); 681 int genhash; 682 u8 newhash[16]; 683 684 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); 685 hash_location = tcp_parse_md5sig_option(th); 686 687 /* We've parsed the options - do we have a hash? */ 688 if (!hash_expected && !hash_location) 689 return false; 690 691 if (hash_expected && !hash_location) { 692 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 693 return true; 694 } 695 696 if (!hash_expected && hash_location) { 697 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 698 return true; 699 } 700 701 /* check the signature */ 702 genhash = tcp_v6_md5_hash_skb(newhash, 703 hash_expected, 704 NULL, skb); 705 706 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 707 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); 708 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", 709 genhash ? "failed" : "mismatch", 710 &ip6h->saddr, ntohs(th->source), 711 &ip6h->daddr, ntohs(th->dest)); 712 return true; 713 } 714 #endif 715 return false; 716 } 717 718 static void tcp_v6_init_req(struct request_sock *req, 719 const struct sock *sk_listener, 720 struct sk_buff *skb) 721 { 722 struct inet_request_sock *ireq = inet_rsk(req); 723 const struct ipv6_pinfo *np = inet6_sk(sk_listener); 724 725 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; 726 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; 727 728 /* So that link locals have meaning */ 729 if (!sk_listener->sk_bound_dev_if && 730 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) 731 ireq->ir_iif = tcp_v6_iif(skb); 732 733 if (!TCP_SKB_CB(skb)->tcp_tw_isn && 734 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) || 735 np->rxopt.bits.rxinfo || 736 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || 737 np->rxopt.bits.rxohlim || np->repflow)) { 738 refcount_inc(&skb->users); 739 ireq->pktopts = skb; 740 } 741 } 742 743 static struct dst_entry *tcp_v6_route_req(const struct sock *sk, 744 struct flowi *fl, 745 const struct request_sock *req) 746 { 747 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP); 748 } 749 750 struct request_sock_ops tcp6_request_sock_ops __read_mostly = { 751 .family = AF_INET6, 752 .obj_size = sizeof(struct tcp6_request_sock), 753 .rtx_syn_ack = tcp_rtx_synack, 754 .send_ack = tcp_v6_reqsk_send_ack, 755 .destructor = tcp_v6_reqsk_destructor, 756 .send_reset = tcp_v6_send_reset, 757 .syn_ack_timeout = tcp_syn_ack_timeout, 758 }; 759 760 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { 761 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - 762 sizeof(struct ipv6hdr), 763 #ifdef CONFIG_TCP_MD5SIG 764 .req_md5_lookup = tcp_v6_md5_lookup, 765 .calc_md5_hash = tcp_v6_md5_hash_skb, 766 #endif 767 .init_req = tcp_v6_init_req, 768 #ifdef CONFIG_SYN_COOKIES 769 .cookie_init_seq = cookie_v6_init_sequence, 770 #endif 771 .route_req = tcp_v6_route_req, 772 .init_seq = tcp_v6_init_seq, 773 .init_ts_off = tcp_v6_init_ts_off, 774 .send_synack = tcp_v6_send_synack, 775 }; 776 777 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, 778 u32 ack, u32 win, u32 tsval, u32 tsecr, 779 int oif, struct tcp_md5sig_key *key, int rst, 780 u8 tclass, __be32 label) 781 { 782 const struct tcphdr *th = tcp_hdr(skb); 783 struct tcphdr *t1; 784 struct sk_buff *buff; 785 struct flowi6 fl6; 786 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); 787 struct sock *ctl_sk = net->ipv6.tcp_sk; 788 unsigned int tot_len = sizeof(struct tcphdr); 789 struct dst_entry *dst; 790 __be32 *topt; 791 792 if (tsecr) 793 tot_len += TCPOLEN_TSTAMP_ALIGNED; 794 #ifdef CONFIG_TCP_MD5SIG 795 if (key) 796 tot_len += TCPOLEN_MD5SIG_ALIGNED; 797 #endif 798 799 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 800 GFP_ATOMIC); 801 if (!buff) 802 return; 803 804 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); 805 806 t1 = skb_push(buff, tot_len); 807 skb_reset_transport_header(buff); 808 809 /* Swap the send and the receive. */ 810 memset(t1, 0, sizeof(*t1)); 811 t1->dest = th->source; 812 t1->source = th->dest; 813 t1->doff = tot_len / 4; 814 t1->seq = htonl(seq); 815 t1->ack_seq = htonl(ack); 816 t1->ack = !rst || !th->ack; 817 t1->rst = rst; 818 t1->window = htons(win); 819 820 topt = (__be32 *)(t1 + 1); 821 822 if (tsecr) { 823 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 824 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 825 *topt++ = htonl(tsval); 826 *topt++ = htonl(tsecr); 827 } 828 829 #ifdef CONFIG_TCP_MD5SIG 830 if (key) { 831 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 832 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 833 tcp_v6_md5_hash_hdr((__u8 *)topt, key, 834 &ipv6_hdr(skb)->saddr, 835 &ipv6_hdr(skb)->daddr, t1); 836 } 837 #endif 838 839 memset(&fl6, 0, sizeof(fl6)); 840 fl6.daddr = ipv6_hdr(skb)->saddr; 841 fl6.saddr = ipv6_hdr(skb)->daddr; 842 fl6.flowlabel = label; 843 844 buff->ip_summed = CHECKSUM_PARTIAL; 845 buff->csum = 0; 846 847 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); 848 849 fl6.flowi6_proto = IPPROTO_TCP; 850 if (rt6_need_strict(&fl6.daddr) && !oif) 851 fl6.flowi6_oif = tcp_v6_iif(skb); 852 else { 853 if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) 854 oif = skb->skb_iif; 855 856 fl6.flowi6_oif = oif; 857 } 858 859 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); 860 fl6.fl6_dport = t1->dest; 861 fl6.fl6_sport = t1->source; 862 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); 863 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 864 865 /* Pass a socket to ip6_dst_lookup either it is for RST 866 * Underlying function will use this to retrieve the network 867 * namespace 868 */ 869 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 870 if (!IS_ERR(dst)) { 871 skb_dst_set(buff, dst); 872 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass); 873 TCP_INC_STATS(net, TCP_MIB_OUTSEGS); 874 if (rst) 875 TCP_INC_STATS(net, TCP_MIB_OUTRSTS); 876 return; 877 } 878 879 kfree_skb(buff); 880 } 881 882 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) 883 { 884 const struct tcphdr *th = tcp_hdr(skb); 885 u32 seq = 0, ack_seq = 0; 886 struct tcp_md5sig_key *key = NULL; 887 #ifdef CONFIG_TCP_MD5SIG 888 const __u8 *hash_location = NULL; 889 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 890 unsigned char newhash[16]; 891 int genhash; 892 struct sock *sk1 = NULL; 893 #endif 894 int oif = 0; 895 896 if (th->rst) 897 return; 898 899 /* If sk not NULL, it means we did a successful lookup and incoming 900 * route had to be correct. prequeue might have dropped our dst. 901 */ 902 if (!sk && !ipv6_unicast_destination(skb)) 903 return; 904 905 #ifdef CONFIG_TCP_MD5SIG 906 rcu_read_lock(); 907 hash_location = tcp_parse_md5sig_option(th); 908 if (sk && sk_fullsock(sk)) { 909 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr); 910 } else if (hash_location) { 911 /* 912 * active side is lost. Try to find listening socket through 913 * source port, and then find md5 key through listening socket. 914 * we are not loose security here: 915 * Incoming packet is checked with md5 hash with finding key, 916 * no RST generated if md5 hash doesn't match. 917 */ 918 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), 919 &tcp_hashinfo, NULL, 0, 920 &ipv6h->saddr, 921 th->source, &ipv6h->daddr, 922 ntohs(th->source), tcp_v6_iif(skb), 923 tcp_v6_sdif(skb)); 924 if (!sk1) 925 goto out; 926 927 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr); 928 if (!key) 929 goto out; 930 931 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb); 932 if (genhash || memcmp(hash_location, newhash, 16) != 0) 933 goto out; 934 } 935 #endif 936 937 if (th->ack) 938 seq = ntohl(th->ack_seq); 939 else 940 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - 941 (th->doff << 2); 942 943 if (sk) { 944 oif = sk->sk_bound_dev_if; 945 if (sk_fullsock(sk)) 946 trace_tcp_send_reset(sk, skb); 947 } 948 949 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); 950 951 #ifdef CONFIG_TCP_MD5SIG 952 out: 953 rcu_read_unlock(); 954 #endif 955 } 956 957 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, 958 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, 959 struct tcp_md5sig_key *key, u8 tclass, 960 __be32 label) 961 { 962 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, 963 tclass, label); 964 } 965 966 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 967 { 968 struct inet_timewait_sock *tw = inet_twsk(sk); 969 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 970 971 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 972 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 973 tcp_time_stamp_raw() + tcptw->tw_ts_offset, 974 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), 975 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel)); 976 977 inet_twsk_put(tw); 978 } 979 980 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, 981 struct request_sock *req) 982 { 983 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV 984 * sk->sk_state == TCP_SYN_RECV -> for Fast Open. 985 */ 986 /* RFC 7323 2.3 987 * The window field (SEG.WND) of every outgoing segment, with the 988 * exception of <SYN> segments, MUST be right-shifted by 989 * Rcv.Wind.Shift bits: 990 */ 991 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? 992 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, 993 tcp_rsk(req)->rcv_nxt, 994 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, 995 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, 996 req->ts_recent, sk->sk_bound_dev_if, 997 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr), 998 0, 0); 999 } 1000 1001 1002 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb) 1003 { 1004 #ifdef CONFIG_SYN_COOKIES 1005 const struct tcphdr *th = tcp_hdr(skb); 1006 1007 if (!th->syn) 1008 sk = cookie_v6_check(sk, skb); 1009 #endif 1010 return sk; 1011 } 1012 1013 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) 1014 { 1015 if (skb->protocol == htons(ETH_P_IP)) 1016 return tcp_v4_conn_request(sk, skb); 1017 1018 if (!ipv6_unicast_destination(skb)) 1019 goto drop; 1020 1021 return tcp_conn_request(&tcp6_request_sock_ops, 1022 &tcp_request_sock_ipv6_ops, sk, skb); 1023 1024 drop: 1025 tcp_listendrop(sk); 1026 return 0; /* don't send reset */ 1027 } 1028 1029 static void tcp_v6_restore_cb(struct sk_buff *skb) 1030 { 1031 /* We need to move header back to the beginning if xfrm6_policy_check() 1032 * and tcp_v6_fill_cb() are going to be called again. 1033 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. 1034 */ 1035 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, 1036 sizeof(struct inet6_skb_parm)); 1037 } 1038 1039 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, 1040 struct request_sock *req, 1041 struct dst_entry *dst, 1042 struct request_sock *req_unhash, 1043 bool *own_req) 1044 { 1045 struct inet_request_sock *ireq; 1046 struct ipv6_pinfo *newnp; 1047 const struct ipv6_pinfo *np = inet6_sk(sk); 1048 struct ipv6_txoptions *opt; 1049 struct tcp6_sock *newtcp6sk; 1050 struct inet_sock *newinet; 1051 struct tcp_sock *newtp; 1052 struct sock *newsk; 1053 #ifdef CONFIG_TCP_MD5SIG 1054 struct tcp_md5sig_key *key; 1055 #endif 1056 struct flowi6 fl6; 1057 1058 if (skb->protocol == htons(ETH_P_IP)) { 1059 /* 1060 * v6 mapped 1061 */ 1062 1063 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst, 1064 req_unhash, own_req); 1065 1066 if (!newsk) 1067 return NULL; 1068 1069 newtcp6sk = (struct tcp6_sock *)newsk; 1070 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 1071 1072 newinet = inet_sk(newsk); 1073 newnp = inet6_sk(newsk); 1074 newtp = tcp_sk(newsk); 1075 1076 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1077 1078 newnp->saddr = newsk->sk_v6_rcv_saddr; 1079 1080 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; 1081 newsk->sk_backlog_rcv = tcp_v4_do_rcv; 1082 #ifdef CONFIG_TCP_MD5SIG 1083 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1084 #endif 1085 1086 newnp->ipv6_mc_list = NULL; 1087 newnp->ipv6_ac_list = NULL; 1088 newnp->ipv6_fl_list = NULL; 1089 newnp->pktoptions = NULL; 1090 newnp->opt = NULL; 1091 newnp->mcast_oif = tcp_v6_iif(skb); 1092 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1093 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); 1094 if (np->repflow) 1095 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); 1096 1097 /* 1098 * No need to charge this sock to the relevant IPv6 refcnt debug socks count 1099 * here, tcp_create_openreq_child now does this for us, see the comment in 1100 * that function for the gory details. -acme 1101 */ 1102 1103 /* It is tricky place. Until this moment IPv4 tcp 1104 worked with IPv6 icsk.icsk_af_ops. 1105 Sync it now. 1106 */ 1107 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); 1108 1109 return newsk; 1110 } 1111 1112 ireq = inet_rsk(req); 1113 1114 if (sk_acceptq_is_full(sk)) 1115 goto out_overflow; 1116 1117 if (!dst) { 1118 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP); 1119 if (!dst) 1120 goto out; 1121 } 1122 1123 newsk = tcp_create_openreq_child(sk, req, skb); 1124 if (!newsk) 1125 goto out_nonewsk; 1126 1127 /* 1128 * No need to charge this sock to the relevant IPv6 refcnt debug socks 1129 * count here, tcp_create_openreq_child now does this for us, see the 1130 * comment in that function for the gory details. -acme 1131 */ 1132 1133 newsk->sk_gso_type = SKB_GSO_TCPV6; 1134 ip6_dst_store(newsk, dst, NULL, NULL); 1135 inet6_sk_rx_dst_set(newsk, skb); 1136 1137 newtcp6sk = (struct tcp6_sock *)newsk; 1138 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 1139 1140 newtp = tcp_sk(newsk); 1141 newinet = inet_sk(newsk); 1142 newnp = inet6_sk(newsk); 1143 1144 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1145 1146 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr; 1147 newnp->saddr = ireq->ir_v6_loc_addr; 1148 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; 1149 newsk->sk_bound_dev_if = ireq->ir_iif; 1150 1151 /* Now IPv6 options... 1152 1153 First: no IPv4 options. 1154 */ 1155 newinet->inet_opt = NULL; 1156 newnp->ipv6_mc_list = NULL; 1157 newnp->ipv6_ac_list = NULL; 1158 newnp->ipv6_fl_list = NULL; 1159 1160 /* Clone RX bits */ 1161 newnp->rxopt.all = np->rxopt.all; 1162 1163 newnp->pktoptions = NULL; 1164 newnp->opt = NULL; 1165 newnp->mcast_oif = tcp_v6_iif(skb); 1166 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1167 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); 1168 if (np->repflow) 1169 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); 1170 1171 /* Clone native IPv6 options from listening socket (if any) 1172 1173 Yes, keeping reference count would be much more clever, 1174 but we make one more one thing there: reattach optmem 1175 to newsk. 1176 */ 1177 opt = ireq->ipv6_opt; 1178 if (!opt) 1179 opt = rcu_dereference(np->opt); 1180 if (opt) { 1181 opt = ipv6_dup_options(newsk, opt); 1182 RCU_INIT_POINTER(newnp->opt, opt); 1183 } 1184 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1185 if (opt) 1186 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + 1187 opt->opt_flen; 1188 1189 tcp_ca_openreq_child(newsk, dst); 1190 1191 tcp_sync_mss(newsk, dst_mtu(dst)); 1192 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst)); 1193 1194 tcp_initialize_rcv_mss(newsk); 1195 1196 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; 1197 newinet->inet_rcv_saddr = LOOPBACK4_IPV6; 1198 1199 #ifdef CONFIG_TCP_MD5SIG 1200 /* Copy over the MD5 key from the original socket */ 1201 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr); 1202 if (key) { 1203 /* We're using one, so create a matching key 1204 * on the newsk structure. If we fail to get 1205 * memory, then we end up not copying the key 1206 * across. Shucks. 1207 */ 1208 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr, 1209 AF_INET6, 128, key->key, key->keylen, 1210 sk_gfp_mask(sk, GFP_ATOMIC)); 1211 } 1212 #endif 1213 1214 if (__inet_inherit_port(sk, newsk) < 0) { 1215 inet_csk_prepare_forced_close(newsk); 1216 tcp_done(newsk); 1217 goto out; 1218 } 1219 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); 1220 if (*own_req) { 1221 tcp_move_syn(newtp, req); 1222 1223 /* Clone pktoptions received with SYN, if we own the req */ 1224 if (ireq->pktopts) { 1225 newnp->pktoptions = skb_clone(ireq->pktopts, 1226 sk_gfp_mask(sk, GFP_ATOMIC)); 1227 consume_skb(ireq->pktopts); 1228 ireq->pktopts = NULL; 1229 if (newnp->pktoptions) { 1230 tcp_v6_restore_cb(newnp->pktoptions); 1231 skb_set_owner_r(newnp->pktoptions, newsk); 1232 } 1233 } 1234 } 1235 1236 return newsk; 1237 1238 out_overflow: 1239 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1240 out_nonewsk: 1241 dst_release(dst); 1242 out: 1243 tcp_listendrop(sk); 1244 return NULL; 1245 } 1246 1247 /* The socket must have it's spinlock held when we get 1248 * here, unless it is a TCP_LISTEN socket. 1249 * 1250 * We have a potential double-lock case here, so even when 1251 * doing backlog processing we use the BH locking scheme. 1252 * This is because we cannot sleep with the original spinlock 1253 * held. 1254 */ 1255 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) 1256 { 1257 struct ipv6_pinfo *np = inet6_sk(sk); 1258 struct tcp_sock *tp; 1259 struct sk_buff *opt_skb = NULL; 1260 1261 /* Imagine: socket is IPv6. IPv4 packet arrives, 1262 goes to IPv4 receive handler and backlogged. 1263 From backlog it always goes here. Kerboom... 1264 Fortunately, tcp_rcv_established and rcv_established 1265 handle them correctly, but it is not case with 1266 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK 1267 */ 1268 1269 if (skb->protocol == htons(ETH_P_IP)) 1270 return tcp_v4_do_rcv(sk, skb); 1271 1272 /* 1273 * socket locking is here for SMP purposes as backlog rcv 1274 * is currently called with bh processing disabled. 1275 */ 1276 1277 /* Do Stevens' IPV6_PKTOPTIONS. 1278 1279 Yes, guys, it is the only place in our code, where we 1280 may make it not affecting IPv4. 1281 The rest of code is protocol independent, 1282 and I do not like idea to uglify IPv4. 1283 1284 Actually, all the idea behind IPV6_PKTOPTIONS 1285 looks not very well thought. For now we latch 1286 options, received in the last packet, enqueued 1287 by tcp. Feel free to propose better solution. 1288 --ANK (980728) 1289 */ 1290 if (np->rxopt.all) 1291 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); 1292 1293 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1294 struct dst_entry *dst = sk->sk_rx_dst; 1295 1296 sock_rps_save_rxhash(sk, skb); 1297 sk_mark_napi_id(sk, skb); 1298 if (dst) { 1299 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || 1300 dst->ops->check(dst, np->rx_dst_cookie) == NULL) { 1301 dst_release(dst); 1302 sk->sk_rx_dst = NULL; 1303 } 1304 } 1305 1306 tcp_rcv_established(sk, skb, tcp_hdr(skb)); 1307 if (opt_skb) 1308 goto ipv6_pktoptions; 1309 return 0; 1310 } 1311 1312 if (tcp_checksum_complete(skb)) 1313 goto csum_err; 1314 1315 if (sk->sk_state == TCP_LISTEN) { 1316 struct sock *nsk = tcp_v6_cookie_check(sk, skb); 1317 1318 if (!nsk) 1319 goto discard; 1320 1321 if (nsk != sk) { 1322 if (tcp_child_process(sk, nsk, skb)) 1323 goto reset; 1324 if (opt_skb) 1325 __kfree_skb(opt_skb); 1326 return 0; 1327 } 1328 } else 1329 sock_rps_save_rxhash(sk, skb); 1330 1331 if (tcp_rcv_state_process(sk, skb)) 1332 goto reset; 1333 if (opt_skb) 1334 goto ipv6_pktoptions; 1335 return 0; 1336 1337 reset: 1338 tcp_v6_send_reset(sk, skb); 1339 discard: 1340 if (opt_skb) 1341 __kfree_skb(opt_skb); 1342 kfree_skb(skb); 1343 return 0; 1344 csum_err: 1345 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); 1346 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 1347 goto discard; 1348 1349 1350 ipv6_pktoptions: 1351 /* Do you ask, what is it? 1352 1353 1. skb was enqueued by tcp. 1354 2. skb is added to tail of read queue, rather than out of order. 1355 3. socket is not in passive state. 1356 4. Finally, it really contains options, which user wants to receive. 1357 */ 1358 tp = tcp_sk(sk); 1359 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && 1360 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 1361 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) 1362 np->mcast_oif = tcp_v6_iif(opt_skb); 1363 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) 1364 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; 1365 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass) 1366 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb)); 1367 if (np->repflow) 1368 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); 1369 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { 1370 skb_set_owner_r(opt_skb, sk); 1371 tcp_v6_restore_cb(opt_skb); 1372 opt_skb = xchg(&np->pktoptions, opt_skb); 1373 } else { 1374 __kfree_skb(opt_skb); 1375 opt_skb = xchg(&np->pktoptions, NULL); 1376 } 1377 } 1378 1379 kfree_skb(opt_skb); 1380 return 0; 1381 } 1382 1383 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, 1384 const struct tcphdr *th) 1385 { 1386 /* This is tricky: we move IP6CB at its correct location into 1387 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because 1388 * _decode_session6() uses IP6CB(). 1389 * barrier() makes sure compiler won't play aliasing games. 1390 */ 1391 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb), 1392 sizeof(struct inet6_skb_parm)); 1393 barrier(); 1394 1395 TCP_SKB_CB(skb)->seq = ntohl(th->seq); 1396 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + 1397 skb->len - th->doff*4); 1398 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1399 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); 1400 TCP_SKB_CB(skb)->tcp_tw_isn = 0; 1401 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); 1402 TCP_SKB_CB(skb)->sacked = 0; 1403 TCP_SKB_CB(skb)->has_rxtstamp = 1404 skb->tstamp || skb_hwtstamps(skb)->hwtstamp; 1405 } 1406 1407 static int tcp_v6_rcv(struct sk_buff *skb) 1408 { 1409 int sdif = inet6_sdif(skb); 1410 const struct tcphdr *th; 1411 const struct ipv6hdr *hdr; 1412 bool refcounted; 1413 struct sock *sk; 1414 int ret; 1415 struct net *net = dev_net(skb->dev); 1416 1417 if (skb->pkt_type != PACKET_HOST) 1418 goto discard_it; 1419 1420 /* 1421 * Count it even if it's bad. 1422 */ 1423 __TCP_INC_STATS(net, TCP_MIB_INSEGS); 1424 1425 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 1426 goto discard_it; 1427 1428 th = (const struct tcphdr *)skb->data; 1429 1430 if (unlikely(th->doff < sizeof(struct tcphdr)/4)) 1431 goto bad_packet; 1432 if (!pskb_may_pull(skb, th->doff*4)) 1433 goto discard_it; 1434 1435 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo)) 1436 goto csum_error; 1437 1438 th = (const struct tcphdr *)skb->data; 1439 hdr = ipv6_hdr(skb); 1440 1441 lookup: 1442 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), 1443 th->source, th->dest, inet6_iif(skb), sdif, 1444 &refcounted); 1445 if (!sk) 1446 goto no_tcp_socket; 1447 1448 process: 1449 if (sk->sk_state == TCP_TIME_WAIT) 1450 goto do_time_wait; 1451 1452 if (sk->sk_state == TCP_NEW_SYN_RECV) { 1453 struct request_sock *req = inet_reqsk(sk); 1454 bool req_stolen = false; 1455 struct sock *nsk; 1456 1457 sk = req->rsk_listener; 1458 if (tcp_v6_inbound_md5_hash(sk, skb)) { 1459 sk_drops_add(sk, skb); 1460 reqsk_put(req); 1461 goto discard_it; 1462 } 1463 if (unlikely(sk->sk_state != TCP_LISTEN)) { 1464 inet_csk_reqsk_queue_drop_and_put(sk, req); 1465 goto lookup; 1466 } 1467 sock_hold(sk); 1468 refcounted = true; 1469 nsk = NULL; 1470 if (!tcp_filter(sk, skb)) { 1471 th = (const struct tcphdr *)skb->data; 1472 hdr = ipv6_hdr(skb); 1473 tcp_v6_fill_cb(skb, hdr, th); 1474 nsk = tcp_check_req(sk, skb, req, false, &req_stolen); 1475 } 1476 if (!nsk) { 1477 reqsk_put(req); 1478 if (req_stolen) { 1479 /* Another cpu got exclusive access to req 1480 * and created a full blown socket. 1481 * Try to feed this packet to this socket 1482 * instead of discarding it. 1483 */ 1484 tcp_v6_restore_cb(skb); 1485 sock_put(sk); 1486 goto lookup; 1487 } 1488 goto discard_and_relse; 1489 } 1490 if (nsk == sk) { 1491 reqsk_put(req); 1492 tcp_v6_restore_cb(skb); 1493 } else if (tcp_child_process(sk, nsk, skb)) { 1494 tcp_v6_send_reset(nsk, skb); 1495 goto discard_and_relse; 1496 } else { 1497 sock_put(sk); 1498 return 0; 1499 } 1500 } 1501 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { 1502 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); 1503 goto discard_and_relse; 1504 } 1505 1506 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1507 goto discard_and_relse; 1508 1509 if (tcp_v6_inbound_md5_hash(sk, skb)) 1510 goto discard_and_relse; 1511 1512 if (tcp_filter(sk, skb)) 1513 goto discard_and_relse; 1514 th = (const struct tcphdr *)skb->data; 1515 hdr = ipv6_hdr(skb); 1516 tcp_v6_fill_cb(skb, hdr, th); 1517 1518 skb->dev = NULL; 1519 1520 if (sk->sk_state == TCP_LISTEN) { 1521 ret = tcp_v6_do_rcv(sk, skb); 1522 goto put_and_return; 1523 } 1524 1525 sk_incoming_cpu_update(sk); 1526 1527 bh_lock_sock_nested(sk); 1528 tcp_segs_in(tcp_sk(sk), skb); 1529 ret = 0; 1530 if (!sock_owned_by_user(sk)) { 1531 ret = tcp_v6_do_rcv(sk, skb); 1532 } else if (tcp_add_backlog(sk, skb)) { 1533 goto discard_and_relse; 1534 } 1535 bh_unlock_sock(sk); 1536 1537 put_and_return: 1538 if (refcounted) 1539 sock_put(sk); 1540 return ret ? -1 : 0; 1541 1542 no_tcp_socket: 1543 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1544 goto discard_it; 1545 1546 tcp_v6_fill_cb(skb, hdr, th); 1547 1548 if (tcp_checksum_complete(skb)) { 1549 csum_error: 1550 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); 1551 bad_packet: 1552 __TCP_INC_STATS(net, TCP_MIB_INERRS); 1553 } else { 1554 tcp_v6_send_reset(NULL, skb); 1555 } 1556 1557 discard_it: 1558 kfree_skb(skb); 1559 return 0; 1560 1561 discard_and_relse: 1562 sk_drops_add(sk, skb); 1563 if (refcounted) 1564 sock_put(sk); 1565 goto discard_it; 1566 1567 do_time_wait: 1568 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1569 inet_twsk_put(inet_twsk(sk)); 1570 goto discard_it; 1571 } 1572 1573 tcp_v6_fill_cb(skb, hdr, th); 1574 1575 if (tcp_checksum_complete(skb)) { 1576 inet_twsk_put(inet_twsk(sk)); 1577 goto csum_error; 1578 } 1579 1580 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { 1581 case TCP_TW_SYN: 1582 { 1583 struct sock *sk2; 1584 1585 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, 1586 skb, __tcp_hdrlen(th), 1587 &ipv6_hdr(skb)->saddr, th->source, 1588 &ipv6_hdr(skb)->daddr, 1589 ntohs(th->dest), tcp_v6_iif(skb), 1590 sdif); 1591 if (sk2) { 1592 struct inet_timewait_sock *tw = inet_twsk(sk); 1593 inet_twsk_deschedule_put(tw); 1594 sk = sk2; 1595 tcp_v6_restore_cb(skb); 1596 refcounted = false; 1597 goto process; 1598 } 1599 } 1600 /* to ACK */ 1601 /* fall through */ 1602 case TCP_TW_ACK: 1603 tcp_v6_timewait_ack(sk, skb); 1604 break; 1605 case TCP_TW_RST: 1606 tcp_v6_send_reset(sk, skb); 1607 inet_twsk_deschedule_put(inet_twsk(sk)); 1608 goto discard_it; 1609 case TCP_TW_SUCCESS: 1610 ; 1611 } 1612 goto discard_it; 1613 } 1614 1615 static void tcp_v6_early_demux(struct sk_buff *skb) 1616 { 1617 const struct ipv6hdr *hdr; 1618 const struct tcphdr *th; 1619 struct sock *sk; 1620 1621 if (skb->pkt_type != PACKET_HOST) 1622 return; 1623 1624 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) 1625 return; 1626 1627 hdr = ipv6_hdr(skb); 1628 th = tcp_hdr(skb); 1629 1630 if (th->doff < sizeof(struct tcphdr) / 4) 1631 return; 1632 1633 /* Note : We use inet6_iif() here, not tcp_v6_iif() */ 1634 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo, 1635 &hdr->saddr, th->source, 1636 &hdr->daddr, ntohs(th->dest), 1637 inet6_iif(skb), inet6_sdif(skb)); 1638 if (sk) { 1639 skb->sk = sk; 1640 skb->destructor = sock_edemux; 1641 if (sk_fullsock(sk)) { 1642 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); 1643 1644 if (dst) 1645 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); 1646 if (dst && 1647 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) 1648 skb_dst_set_noref(skb, dst); 1649 } 1650 } 1651 } 1652 1653 static struct timewait_sock_ops tcp6_timewait_sock_ops = { 1654 .twsk_obj_size = sizeof(struct tcp6_timewait_sock), 1655 .twsk_unique = tcp_twsk_unique, 1656 .twsk_destructor = tcp_twsk_destructor, 1657 }; 1658 1659 static const struct inet_connection_sock_af_ops ipv6_specific = { 1660 .queue_xmit = inet6_csk_xmit, 1661 .send_check = tcp_v6_send_check, 1662 .rebuild_header = inet6_sk_rebuild_header, 1663 .sk_rx_dst_set = inet6_sk_rx_dst_set, 1664 .conn_request = tcp_v6_conn_request, 1665 .syn_recv_sock = tcp_v6_syn_recv_sock, 1666 .net_header_len = sizeof(struct ipv6hdr), 1667 .net_frag_header_len = sizeof(struct frag_hdr), 1668 .setsockopt = ipv6_setsockopt, 1669 .getsockopt = ipv6_getsockopt, 1670 .addr2sockaddr = inet6_csk_addr2sockaddr, 1671 .sockaddr_len = sizeof(struct sockaddr_in6), 1672 #ifdef CONFIG_COMPAT 1673 .compat_setsockopt = compat_ipv6_setsockopt, 1674 .compat_getsockopt = compat_ipv6_getsockopt, 1675 #endif 1676 .mtu_reduced = tcp_v6_mtu_reduced, 1677 }; 1678 1679 #ifdef CONFIG_TCP_MD5SIG 1680 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { 1681 .md5_lookup = tcp_v6_md5_lookup, 1682 .calc_md5_hash = tcp_v6_md5_hash_skb, 1683 .md5_parse = tcp_v6_parse_md5_keys, 1684 }; 1685 #endif 1686 1687 /* 1688 * TCP over IPv4 via INET6 API 1689 */ 1690 static const struct inet_connection_sock_af_ops ipv6_mapped = { 1691 .queue_xmit = ip_queue_xmit, 1692 .send_check = tcp_v4_send_check, 1693 .rebuild_header = inet_sk_rebuild_header, 1694 .sk_rx_dst_set = inet_sk_rx_dst_set, 1695 .conn_request = tcp_v6_conn_request, 1696 .syn_recv_sock = tcp_v6_syn_recv_sock, 1697 .net_header_len = sizeof(struct iphdr), 1698 .setsockopt = ipv6_setsockopt, 1699 .getsockopt = ipv6_getsockopt, 1700 .addr2sockaddr = inet6_csk_addr2sockaddr, 1701 .sockaddr_len = sizeof(struct sockaddr_in6), 1702 #ifdef CONFIG_COMPAT 1703 .compat_setsockopt = compat_ipv6_setsockopt, 1704 .compat_getsockopt = compat_ipv6_getsockopt, 1705 #endif 1706 .mtu_reduced = tcp_v4_mtu_reduced, 1707 }; 1708 1709 #ifdef CONFIG_TCP_MD5SIG 1710 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { 1711 .md5_lookup = tcp_v4_md5_lookup, 1712 .calc_md5_hash = tcp_v4_md5_hash_skb, 1713 .md5_parse = tcp_v6_parse_md5_keys, 1714 }; 1715 #endif 1716 1717 /* NOTE: A lot of things set to zero explicitly by call to 1718 * sk_alloc() so need not be done here. 1719 */ 1720 static int tcp_v6_init_sock(struct sock *sk) 1721 { 1722 struct inet_connection_sock *icsk = inet_csk(sk); 1723 1724 tcp_init_sock(sk); 1725 1726 icsk->icsk_af_ops = &ipv6_specific; 1727 1728 #ifdef CONFIG_TCP_MD5SIG 1729 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific; 1730 #endif 1731 1732 return 0; 1733 } 1734 1735 static void tcp_v6_destroy_sock(struct sock *sk) 1736 { 1737 tcp_v4_destroy_sock(sk); 1738 inet6_destroy_sock(sk); 1739 } 1740 1741 #ifdef CONFIG_PROC_FS 1742 /* Proc filesystem TCPv6 sock list dumping. */ 1743 static void get_openreq6(struct seq_file *seq, 1744 const struct request_sock *req, int i) 1745 { 1746 long ttd = req->rsk_timer.expires - jiffies; 1747 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; 1748 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; 1749 1750 if (ttd < 0) 1751 ttd = 0; 1752 1753 seq_printf(seq, 1754 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1755 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n", 1756 i, 1757 src->s6_addr32[0], src->s6_addr32[1], 1758 src->s6_addr32[2], src->s6_addr32[3], 1759 inet_rsk(req)->ir_num, 1760 dest->s6_addr32[0], dest->s6_addr32[1], 1761 dest->s6_addr32[2], dest->s6_addr32[3], 1762 ntohs(inet_rsk(req)->ir_rmt_port), 1763 TCP_SYN_RECV, 1764 0, 0, /* could print option size, but that is af dependent. */ 1765 1, /* timers active (only the expire timer) */ 1766 jiffies_to_clock_t(ttd), 1767 req->num_timeout, 1768 from_kuid_munged(seq_user_ns(seq), 1769 sock_i_uid(req->rsk_listener)), 1770 0, /* non standard timer */ 1771 0, /* open_requests have no inode */ 1772 0, req); 1773 } 1774 1775 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) 1776 { 1777 const struct in6_addr *dest, *src; 1778 __u16 destp, srcp; 1779 int timer_active; 1780 unsigned long timer_expires; 1781 const struct inet_sock *inet = inet_sk(sp); 1782 const struct tcp_sock *tp = tcp_sk(sp); 1783 const struct inet_connection_sock *icsk = inet_csk(sp); 1784 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; 1785 int rx_queue; 1786 int state; 1787 1788 dest = &sp->sk_v6_daddr; 1789 src = &sp->sk_v6_rcv_saddr; 1790 destp = ntohs(inet->inet_dport); 1791 srcp = ntohs(inet->inet_sport); 1792 1793 if (icsk->icsk_pending == ICSK_TIME_RETRANS || 1794 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || 1795 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 1796 timer_active = 1; 1797 timer_expires = icsk->icsk_timeout; 1798 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 1799 timer_active = 4; 1800 timer_expires = icsk->icsk_timeout; 1801 } else if (timer_pending(&sp->sk_timer)) { 1802 timer_active = 2; 1803 timer_expires = sp->sk_timer.expires; 1804 } else { 1805 timer_active = 0; 1806 timer_expires = jiffies; 1807 } 1808 1809 state = inet_sk_state_load(sp); 1810 if (state == TCP_LISTEN) 1811 rx_queue = sp->sk_ack_backlog; 1812 else 1813 /* Because we don't lock the socket, 1814 * we might find a transient negative value. 1815 */ 1816 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 1817 1818 seq_printf(seq, 1819 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1820 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n", 1821 i, 1822 src->s6_addr32[0], src->s6_addr32[1], 1823 src->s6_addr32[2], src->s6_addr32[3], srcp, 1824 dest->s6_addr32[0], dest->s6_addr32[1], 1825 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1826 state, 1827 tp->write_seq - tp->snd_una, 1828 rx_queue, 1829 timer_active, 1830 jiffies_delta_to_clock_t(timer_expires - jiffies), 1831 icsk->icsk_retransmits, 1832 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), 1833 icsk->icsk_probes_out, 1834 sock_i_ino(sp), 1835 refcount_read(&sp->sk_refcnt), sp, 1836 jiffies_to_clock_t(icsk->icsk_rto), 1837 jiffies_to_clock_t(icsk->icsk_ack.ato), 1838 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 1839 tp->snd_cwnd, 1840 state == TCP_LISTEN ? 1841 fastopenq->max_qlen : 1842 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) 1843 ); 1844 } 1845 1846 static void get_timewait6_sock(struct seq_file *seq, 1847 struct inet_timewait_sock *tw, int i) 1848 { 1849 long delta = tw->tw_timer.expires - jiffies; 1850 const struct in6_addr *dest, *src; 1851 __u16 destp, srcp; 1852 1853 dest = &tw->tw_v6_daddr; 1854 src = &tw->tw_v6_rcv_saddr; 1855 destp = ntohs(tw->tw_dport); 1856 srcp = ntohs(tw->tw_sport); 1857 1858 seq_printf(seq, 1859 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1860 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", 1861 i, 1862 src->s6_addr32[0], src->s6_addr32[1], 1863 src->s6_addr32[2], src->s6_addr32[3], srcp, 1864 dest->s6_addr32[0], dest->s6_addr32[1], 1865 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1866 tw->tw_substate, 0, 0, 1867 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, 1868 refcount_read(&tw->tw_refcnt), tw); 1869 } 1870 1871 static int tcp6_seq_show(struct seq_file *seq, void *v) 1872 { 1873 struct tcp_iter_state *st; 1874 struct sock *sk = v; 1875 1876 if (v == SEQ_START_TOKEN) { 1877 seq_puts(seq, 1878 " sl " 1879 "local_address " 1880 "remote_address " 1881 "st tx_queue rx_queue tr tm->when retrnsmt" 1882 " uid timeout inode\n"); 1883 goto out; 1884 } 1885 st = seq->private; 1886 1887 if (sk->sk_state == TCP_TIME_WAIT) 1888 get_timewait6_sock(seq, v, st->num); 1889 else if (sk->sk_state == TCP_NEW_SYN_RECV) 1890 get_openreq6(seq, v, st->num); 1891 else 1892 get_tcp6_sock(seq, v, st->num); 1893 out: 1894 return 0; 1895 } 1896 1897 static const struct file_operations tcp6_afinfo_seq_fops = { 1898 .open = tcp_seq_open, 1899 .read = seq_read, 1900 .llseek = seq_lseek, 1901 .release = seq_release_net 1902 }; 1903 1904 static struct tcp_seq_afinfo tcp6_seq_afinfo = { 1905 .name = "tcp6", 1906 .family = AF_INET6, 1907 .seq_fops = &tcp6_afinfo_seq_fops, 1908 .seq_ops = { 1909 .show = tcp6_seq_show, 1910 }, 1911 }; 1912 1913 int __net_init tcp6_proc_init(struct net *net) 1914 { 1915 return tcp_proc_register(net, &tcp6_seq_afinfo); 1916 } 1917 1918 void tcp6_proc_exit(struct net *net) 1919 { 1920 tcp_proc_unregister(net, &tcp6_seq_afinfo); 1921 } 1922 #endif 1923 1924 struct proto tcpv6_prot = { 1925 .name = "TCPv6", 1926 .owner = THIS_MODULE, 1927 .close = tcp_close, 1928 .connect = tcp_v6_connect, 1929 .disconnect = tcp_disconnect, 1930 .accept = inet_csk_accept, 1931 .ioctl = tcp_ioctl, 1932 .init = tcp_v6_init_sock, 1933 .destroy = tcp_v6_destroy_sock, 1934 .shutdown = tcp_shutdown, 1935 .setsockopt = tcp_setsockopt, 1936 .getsockopt = tcp_getsockopt, 1937 .keepalive = tcp_set_keepalive, 1938 .recvmsg = tcp_recvmsg, 1939 .sendmsg = tcp_sendmsg, 1940 .sendpage = tcp_sendpage, 1941 .backlog_rcv = tcp_v6_do_rcv, 1942 .release_cb = tcp_release_cb, 1943 .hash = inet6_hash, 1944 .unhash = inet_unhash, 1945 .get_port = inet_csk_get_port, 1946 .enter_memory_pressure = tcp_enter_memory_pressure, 1947 .leave_memory_pressure = tcp_leave_memory_pressure, 1948 .stream_memory_free = tcp_stream_memory_free, 1949 .sockets_allocated = &tcp_sockets_allocated, 1950 .memory_allocated = &tcp_memory_allocated, 1951 .memory_pressure = &tcp_memory_pressure, 1952 .orphan_count = &tcp_orphan_count, 1953 .sysctl_mem = sysctl_tcp_mem, 1954 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), 1955 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), 1956 .max_header = MAX_TCP_HEADER, 1957 .obj_size = sizeof(struct tcp6_sock), 1958 .slab_flags = SLAB_TYPESAFE_BY_RCU, 1959 .twsk_prot = &tcp6_timewait_sock_ops, 1960 .rsk_prot = &tcp6_request_sock_ops, 1961 .h.hashinfo = &tcp_hashinfo, 1962 .no_autobind = true, 1963 #ifdef CONFIG_COMPAT 1964 .compat_setsockopt = compat_tcp_setsockopt, 1965 .compat_getsockopt = compat_tcp_getsockopt, 1966 #endif 1967 .diag_destroy = tcp_abort, 1968 }; 1969 1970 /* thinking of making this const? Don't. 1971 * early_demux can change based on sysctl. 1972 */ 1973 static struct inet6_protocol tcpv6_protocol = { 1974 .early_demux = tcp_v6_early_demux, 1975 .early_demux_handler = tcp_v6_early_demux, 1976 .handler = tcp_v6_rcv, 1977 .err_handler = tcp_v6_err, 1978 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1979 }; 1980 1981 static struct inet_protosw tcpv6_protosw = { 1982 .type = SOCK_STREAM, 1983 .protocol = IPPROTO_TCP, 1984 .prot = &tcpv6_prot, 1985 .ops = &inet6_stream_ops, 1986 .flags = INET_PROTOSW_PERMANENT | 1987 INET_PROTOSW_ICSK, 1988 }; 1989 1990 static int __net_init tcpv6_net_init(struct net *net) 1991 { 1992 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, 1993 SOCK_RAW, IPPROTO_TCP, net); 1994 } 1995 1996 static void __net_exit tcpv6_net_exit(struct net *net) 1997 { 1998 inet_ctl_sock_destroy(net->ipv6.tcp_sk); 1999 } 2000 2001 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list) 2002 { 2003 inet_twsk_purge(&tcp_hashinfo, AF_INET6); 2004 } 2005 2006 static struct pernet_operations tcpv6_net_ops = { 2007 .init = tcpv6_net_init, 2008 .exit = tcpv6_net_exit, 2009 .exit_batch = tcpv6_net_exit_batch, 2010 }; 2011 2012 int __init tcpv6_init(void) 2013 { 2014 int ret; 2015 2016 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP); 2017 if (ret) 2018 goto out; 2019 2020 /* register inet6 protocol */ 2021 ret = inet6_register_protosw(&tcpv6_protosw); 2022 if (ret) 2023 goto out_tcpv6_protocol; 2024 2025 ret = register_pernet_subsys(&tcpv6_net_ops); 2026 if (ret) 2027 goto out_tcpv6_protosw; 2028 out: 2029 return ret; 2030 2031 out_tcpv6_protosw: 2032 inet6_unregister_protosw(&tcpv6_protosw); 2033 out_tcpv6_protocol: 2034 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); 2035 goto out; 2036 } 2037 2038 void tcpv6_exit(void) 2039 { 2040 unregister_pernet_subsys(&tcpv6_net_ops); 2041 inet6_unregister_protosw(&tcpv6_protosw); 2042 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); 2043 } 2044