1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * IPv4 specific functions 9 * 10 * 11 * code split from: 12 * linux/ipv4/tcp.c 13 * linux/ipv4/tcp_input.c 14 * linux/ipv4/tcp_output.c 15 * 16 * See tcp.c for author information 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License 20 * as published by the Free Software Foundation; either version 21 * 2 of the License, or (at your option) any later version. 22 */ 23 24 /* 25 * Changes: 26 * David S. Miller : New socket lookup architecture. 27 * This code is dedicated to John Dyson. 28 * David S. Miller : Change semantics of established hash, 29 * half is devoted to TIME_WAIT sockets 30 * and the rest go in the other half. 31 * Andi Kleen : Add support for syncookies and fixed 32 * some bugs: ip options weren't passed to 33 * the TCP layer, missed a check for an 34 * ACK bit. 35 * Andi Kleen : Implemented fast path mtu discovery. 36 * Fixed many serious bugs in the 37 * request_sock handling and moved 38 * most of it into the af independent code. 39 * Added tail drop and some other bugfixes. 40 * Added new listen semantics. 41 * Mike McLagan : Routing by source 42 * Juan Jose Ciarlante: ip_dynaddr bits 43 * Andi Kleen: various fixes. 44 * Vitaly E. Lavrov : Transparent proxy revived after year 45 * coma. 46 * Andi Kleen : Fix new listen. 47 * Andi Kleen : Fix accept error reporting. 48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 50 * a single port at the same time. 51 */ 52 53 #define pr_fmt(fmt) "TCP: " fmt 54 55 #include <linux/bottom_half.h> 56 #include <linux/types.h> 57 #include <linux/fcntl.h> 58 #include <linux/module.h> 59 #include <linux/random.h> 60 #include <linux/cache.h> 61 #include <linux/jhash.h> 62 #include <linux/init.h> 63 #include <linux/times.h> 64 #include <linux/slab.h> 65 66 #include <net/net_namespace.h> 67 #include <net/icmp.h> 68 #include <net/inet_hashtables.h> 69 #include <net/tcp.h> 70 #include <net/transp_v6.h> 71 #include <net/ipv6.h> 72 #include <net/inet_common.h> 73 #include <net/timewait_sock.h> 74 #include <net/xfrm.h> 75 #include <net/secure_seq.h> 76 #include <net/busy_poll.h> 77 78 #include <linux/inet.h> 79 #include <linux/ipv6.h> 80 #include <linux/stddef.h> 81 #include <linux/proc_fs.h> 82 #include <linux/seq_file.h> 83 #include <linux/inetdevice.h> 84 85 #include <crypto/hash.h> 86 #include <linux/scatterlist.h> 87 88 #ifdef CONFIG_TCP_MD5SIG 89 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, 90 __be32 daddr, __be32 saddr, const struct tcphdr *th); 91 #endif 92 93 struct inet_hashinfo tcp_hashinfo; 94 EXPORT_SYMBOL(tcp_hashinfo); 95 96 static u32 tcp_v4_init_seq(const struct sk_buff *skb) 97 { 98 return secure_tcp_seq(ip_hdr(skb)->daddr, 99 ip_hdr(skb)->saddr, 100 tcp_hdr(skb)->dest, 101 tcp_hdr(skb)->source); 102 } 103 104 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb) 105 { 106 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr); 107 } 108 109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) 110 { 111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); 112 struct tcp_sock *tp = tcp_sk(sk); 113 114 /* With PAWS, it is safe from the viewpoint 115 of data integrity. Even without PAWS it is safe provided sequence 116 spaces do not overlap i.e. at data rates <= 80Mbit/sec. 117 118 Actually, the idea is close to VJ's one, only timestamp cache is 119 held not per host, but per port pair and TW bucket is used as state 120 holder. 121 122 If TW bucket has been already destroyed we fall back to VJ's scheme 123 and use initial timestamp retrieved from peer table. 124 */ 125 if (tcptw->tw_ts_recent_stamp && 126 (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse && 127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { 128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; 129 if (tp->write_seq == 0) 130 tp->write_seq = 1; 131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent; 132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; 133 sock_hold(sktw); 134 return 1; 135 } 136 137 return 0; 138 } 139 EXPORT_SYMBOL_GPL(tcp_twsk_unique); 140 141 /* This will initiate an outgoing connection. */ 142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 143 { 144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; 145 struct inet_sock *inet = inet_sk(sk); 146 struct tcp_sock *tp = tcp_sk(sk); 147 __be16 orig_sport, orig_dport; 148 __be32 daddr, nexthop; 149 struct flowi4 *fl4; 150 struct rtable *rt; 151 int err; 152 struct ip_options_rcu *inet_opt; 153 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; 154 155 if (addr_len < sizeof(struct sockaddr_in)) 156 return -EINVAL; 157 158 if (usin->sin_family != AF_INET) 159 return -EAFNOSUPPORT; 160 161 nexthop = daddr = usin->sin_addr.s_addr; 162 inet_opt = rcu_dereference_protected(inet->inet_opt, 163 lockdep_sock_is_held(sk)); 164 if (inet_opt && inet_opt->opt.srr) { 165 if (!daddr) 166 return -EINVAL; 167 nexthop = inet_opt->opt.faddr; 168 } 169 170 orig_sport = inet->inet_sport; 171 orig_dport = usin->sin_port; 172 fl4 = &inet->cork.fl.u.ip4; 173 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr, 174 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 175 IPPROTO_TCP, 176 orig_sport, orig_dport, sk); 177 if (IS_ERR(rt)) { 178 err = PTR_ERR(rt); 179 if (err == -ENETUNREACH) 180 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 181 return err; 182 } 183 184 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 185 ip_rt_put(rt); 186 return -ENETUNREACH; 187 } 188 189 if (!inet_opt || !inet_opt->opt.srr) 190 daddr = fl4->daddr; 191 192 if (!inet->inet_saddr) 193 inet->inet_saddr = fl4->saddr; 194 sk_rcv_saddr_set(sk, inet->inet_saddr); 195 196 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { 197 /* Reset inherited state */ 198 tp->rx_opt.ts_recent = 0; 199 tp->rx_opt.ts_recent_stamp = 0; 200 if (likely(!tp->repair)) 201 tp->write_seq = 0; 202 } 203 204 inet->inet_dport = usin->sin_port; 205 sk_daddr_set(sk, daddr); 206 207 inet_csk(sk)->icsk_ext_hdr_len = 0; 208 if (inet_opt) 209 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 210 211 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; 212 213 /* Socket identity is still unknown (sport may be zero). 214 * However we set state to SYN-SENT and not releasing socket 215 * lock select source port, enter ourselves into the hash tables and 216 * complete initialization after this. 217 */ 218 tcp_set_state(sk, TCP_SYN_SENT); 219 err = inet_hash_connect(tcp_death_row, sk); 220 if (err) 221 goto failure; 222 223 sk_set_txhash(sk); 224 225 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, 226 inet->inet_sport, inet->inet_dport, sk); 227 if (IS_ERR(rt)) { 228 err = PTR_ERR(rt); 229 rt = NULL; 230 goto failure; 231 } 232 /* OK, now commit destination to socket. */ 233 sk->sk_gso_type = SKB_GSO_TCPV4; 234 sk_setup_caps(sk, &rt->dst); 235 rt = NULL; 236 237 if (likely(!tp->repair)) { 238 if (!tp->write_seq) 239 tp->write_seq = secure_tcp_seq(inet->inet_saddr, 240 inet->inet_daddr, 241 inet->inet_sport, 242 usin->sin_port); 243 tp->tsoffset = secure_tcp_ts_off(sock_net(sk), 244 inet->inet_saddr, 245 inet->inet_daddr); 246 } 247 248 inet->inet_id = tp->write_seq ^ jiffies; 249 250 if (tcp_fastopen_defer_connect(sk, &err)) 251 return err; 252 if (err) 253 goto failure; 254 255 err = tcp_connect(sk); 256 257 if (err) 258 goto failure; 259 260 return 0; 261 262 failure: 263 /* 264 * This unhashes the socket and releases the local port, 265 * if necessary. 266 */ 267 tcp_set_state(sk, TCP_CLOSE); 268 ip_rt_put(rt); 269 sk->sk_route_caps = 0; 270 inet->inet_dport = 0; 271 return err; 272 } 273 EXPORT_SYMBOL(tcp_v4_connect); 274 275 /* 276 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191. 277 * It can be called through tcp_release_cb() if socket was owned by user 278 * at the time tcp_v4_err() was called to handle ICMP message. 279 */ 280 void tcp_v4_mtu_reduced(struct sock *sk) 281 { 282 struct inet_sock *inet = inet_sk(sk); 283 struct dst_entry *dst; 284 u32 mtu; 285 286 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) 287 return; 288 mtu = tcp_sk(sk)->mtu_info; 289 dst = inet_csk_update_pmtu(sk, mtu); 290 if (!dst) 291 return; 292 293 /* Something is about to be wrong... Remember soft error 294 * for the case, if this connection will not able to recover. 295 */ 296 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) 297 sk->sk_err_soft = EMSGSIZE; 298 299 mtu = dst_mtu(dst); 300 301 if (inet->pmtudisc != IP_PMTUDISC_DONT && 302 ip_sk_accept_pmtu(sk) && 303 inet_csk(sk)->icsk_pmtu_cookie > mtu) { 304 tcp_sync_mss(sk, mtu); 305 306 /* Resend the TCP packet because it's 307 * clear that the old packet has been 308 * dropped. This is the new "fast" path mtu 309 * discovery. 310 */ 311 tcp_simple_retransmit(sk); 312 } /* else let the usual retransmit timer handle it */ 313 } 314 EXPORT_SYMBOL(tcp_v4_mtu_reduced); 315 316 static void do_redirect(struct sk_buff *skb, struct sock *sk) 317 { 318 struct dst_entry *dst = __sk_dst_check(sk, 0); 319 320 if (dst) 321 dst->ops->redirect(dst, sk, skb); 322 } 323 324 325 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ 326 void tcp_req_err(struct sock *sk, u32 seq, bool abort) 327 { 328 struct request_sock *req = inet_reqsk(sk); 329 struct net *net = sock_net(sk); 330 331 /* ICMPs are not backlogged, hence we cannot get 332 * an established socket here. 333 */ 334 if (seq != tcp_rsk(req)->snt_isn) { 335 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); 336 } else if (abort) { 337 /* 338 * Still in SYN_RECV, just remove it silently. 339 * There is no good way to pass the error to the newly 340 * created socket, and POSIX does not want network 341 * errors returned from accept(). 342 */ 343 inet_csk_reqsk_queue_drop(req->rsk_listener, req); 344 tcp_listendrop(req->rsk_listener); 345 } 346 reqsk_put(req); 347 } 348 EXPORT_SYMBOL(tcp_req_err); 349 350 /* 351 * This routine is called by the ICMP module when it gets some 352 * sort of error condition. If err < 0 then the socket should 353 * be closed and the error returned to the user. If err > 0 354 * it's just the icmp type << 8 | icmp code. After adjustment 355 * header points to the first 8 bytes of the tcp header. We need 356 * to find the appropriate port. 357 * 358 * The locking strategy used here is very "optimistic". When 359 * someone else accesses the socket the ICMP is just dropped 360 * and for some paths there is no check at all. 361 * A more general error queue to queue errors for later handling 362 * is probably better. 363 * 364 */ 365 366 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) 367 { 368 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data; 369 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2)); 370 struct inet_connection_sock *icsk; 371 struct tcp_sock *tp; 372 struct inet_sock *inet; 373 const int type = icmp_hdr(icmp_skb)->type; 374 const int code = icmp_hdr(icmp_skb)->code; 375 struct sock *sk; 376 struct sk_buff *skb; 377 struct request_sock *fastopen; 378 u32 seq, snd_una; 379 s32 remaining; 380 u32 delta_us; 381 int err; 382 struct net *net = dev_net(icmp_skb->dev); 383 384 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr, 385 th->dest, iph->saddr, ntohs(th->source), 386 inet_iif(icmp_skb), 0); 387 if (!sk) { 388 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 389 return; 390 } 391 if (sk->sk_state == TCP_TIME_WAIT) { 392 inet_twsk_put(inet_twsk(sk)); 393 return; 394 } 395 seq = ntohl(th->seq); 396 if (sk->sk_state == TCP_NEW_SYN_RECV) 397 return tcp_req_err(sk, seq, 398 type == ICMP_PARAMETERPROB || 399 type == ICMP_TIME_EXCEEDED || 400 (type == ICMP_DEST_UNREACH && 401 (code == ICMP_NET_UNREACH || 402 code == ICMP_HOST_UNREACH))); 403 404 bh_lock_sock(sk); 405 /* If too many ICMPs get dropped on busy 406 * servers this needs to be solved differently. 407 * We do take care of PMTU discovery (RFC1191) special case : 408 * we can receive locally generated ICMP messages while socket is held. 409 */ 410 if (sock_owned_by_user(sk)) { 411 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) 412 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); 413 } 414 if (sk->sk_state == TCP_CLOSE) 415 goto out; 416 417 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { 418 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); 419 goto out; 420 } 421 422 icsk = inet_csk(sk); 423 tp = tcp_sk(sk); 424 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ 425 fastopen = tp->fastopen_rsk; 426 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; 427 if (sk->sk_state != TCP_LISTEN && 428 !between(seq, snd_una, tp->snd_nxt)) { 429 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); 430 goto out; 431 } 432 433 switch (type) { 434 case ICMP_REDIRECT: 435 if (!sock_owned_by_user(sk)) 436 do_redirect(icmp_skb, sk); 437 goto out; 438 case ICMP_SOURCE_QUENCH: 439 /* Just silently ignore these. */ 440 goto out; 441 case ICMP_PARAMETERPROB: 442 err = EPROTO; 443 break; 444 case ICMP_DEST_UNREACH: 445 if (code > NR_ICMP_UNREACH) 446 goto out; 447 448 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ 449 /* We are not interested in TCP_LISTEN and open_requests 450 * (SYN-ACKs send out by Linux are always <576bytes so 451 * they should go through unfragmented). 452 */ 453 if (sk->sk_state == TCP_LISTEN) 454 goto out; 455 456 tp->mtu_info = info; 457 if (!sock_owned_by_user(sk)) { 458 tcp_v4_mtu_reduced(sk); 459 } else { 460 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags)) 461 sock_hold(sk); 462 } 463 goto out; 464 } 465 466 err = icmp_err_convert[code].errno; 467 /* check if icmp_skb allows revert of backoff 468 * (see draft-zimmermann-tcp-lcd) */ 469 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH) 470 break; 471 if (seq != tp->snd_una || !icsk->icsk_retransmits || 472 !icsk->icsk_backoff || fastopen) 473 break; 474 475 if (sock_owned_by_user(sk)) 476 break; 477 478 icsk->icsk_backoff--; 479 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : 480 TCP_TIMEOUT_INIT; 481 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); 482 483 skb = tcp_write_queue_head(sk); 484 BUG_ON(!skb); 485 486 tcp_mstamp_refresh(tp); 487 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp); 488 remaining = icsk->icsk_rto - 489 usecs_to_jiffies(delta_us); 490 491 if (remaining > 0) { 492 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 493 remaining, TCP_RTO_MAX); 494 } else { 495 /* RTO revert clocked out retransmission. 496 * Will retransmit now */ 497 tcp_retransmit_timer(sk); 498 } 499 500 break; 501 case ICMP_TIME_EXCEEDED: 502 err = EHOSTUNREACH; 503 break; 504 default: 505 goto out; 506 } 507 508 switch (sk->sk_state) { 509 case TCP_SYN_SENT: 510 case TCP_SYN_RECV: 511 /* Only in fast or simultaneous open. If a fast open socket is 512 * is already accepted it is treated as a connected one below. 513 */ 514 if (fastopen && !fastopen->sk) 515 break; 516 517 if (!sock_owned_by_user(sk)) { 518 sk->sk_err = err; 519 520 sk->sk_error_report(sk); 521 522 tcp_done(sk); 523 } else { 524 sk->sk_err_soft = err; 525 } 526 goto out; 527 } 528 529 /* If we've already connected we will keep trying 530 * until we time out, or the user gives up. 531 * 532 * rfc1122 4.2.3.9 allows to consider as hard errors 533 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, 534 * but it is obsoleted by pmtu discovery). 535 * 536 * Note, that in modern internet, where routing is unreliable 537 * and in each dark corner broken firewalls sit, sending random 538 * errors ordered by their masters even this two messages finally lose 539 * their original sense (even Linux sends invalid PORT_UNREACHs) 540 * 541 * Now we are in compliance with RFCs. 542 * --ANK (980905) 543 */ 544 545 inet = inet_sk(sk); 546 if (!sock_owned_by_user(sk) && inet->recverr) { 547 sk->sk_err = err; 548 sk->sk_error_report(sk); 549 } else { /* Only an error on timeout */ 550 sk->sk_err_soft = err; 551 } 552 553 out: 554 bh_unlock_sock(sk); 555 sock_put(sk); 556 } 557 558 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr) 559 { 560 struct tcphdr *th = tcp_hdr(skb); 561 562 if (skb->ip_summed == CHECKSUM_PARTIAL) { 563 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); 564 skb->csum_start = skb_transport_header(skb) - skb->head; 565 skb->csum_offset = offsetof(struct tcphdr, check); 566 } else { 567 th->check = tcp_v4_check(skb->len, saddr, daddr, 568 csum_partial(th, 569 th->doff << 2, 570 skb->csum)); 571 } 572 } 573 574 /* This routine computes an IPv4 TCP checksum. */ 575 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) 576 { 577 const struct inet_sock *inet = inet_sk(sk); 578 579 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); 580 } 581 EXPORT_SYMBOL(tcp_v4_send_check); 582 583 /* 584 * This routine will send an RST to the other tcp. 585 * 586 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.) 587 * for reset. 588 * Answer: if a packet caused RST, it is not for a socket 589 * existing in our system, if it is matched to a socket, 590 * it is just duplicate segment or bug in other side's TCP. 591 * So that we build reply only basing on parameters 592 * arrived with segment. 593 * Exception: precedence violation. We do not implement it in any case. 594 */ 595 596 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) 597 { 598 const struct tcphdr *th = tcp_hdr(skb); 599 struct { 600 struct tcphdr th; 601 #ifdef CONFIG_TCP_MD5SIG 602 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)]; 603 #endif 604 } rep; 605 struct ip_reply_arg arg; 606 #ifdef CONFIG_TCP_MD5SIG 607 struct tcp_md5sig_key *key = NULL; 608 const __u8 *hash_location = NULL; 609 unsigned char newhash[16]; 610 int genhash; 611 struct sock *sk1 = NULL; 612 #endif 613 struct net *net; 614 615 /* Never send a reset in response to a reset. */ 616 if (th->rst) 617 return; 618 619 /* If sk not NULL, it means we did a successful lookup and incoming 620 * route had to be correct. prequeue might have dropped our dst. 621 */ 622 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) 623 return; 624 625 /* Swap the send and the receive. */ 626 memset(&rep, 0, sizeof(rep)); 627 rep.th.dest = th->source; 628 rep.th.source = th->dest; 629 rep.th.doff = sizeof(struct tcphdr) / 4; 630 rep.th.rst = 1; 631 632 if (th->ack) { 633 rep.th.seq = th->ack_seq; 634 } else { 635 rep.th.ack = 1; 636 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + 637 skb->len - (th->doff << 2)); 638 } 639 640 memset(&arg, 0, sizeof(arg)); 641 arg.iov[0].iov_base = (unsigned char *)&rep; 642 arg.iov[0].iov_len = sizeof(rep.th); 643 644 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); 645 #ifdef CONFIG_TCP_MD5SIG 646 rcu_read_lock(); 647 hash_location = tcp_parse_md5sig_option(th); 648 if (sk && sk_fullsock(sk)) { 649 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *) 650 &ip_hdr(skb)->saddr, AF_INET); 651 } else if (hash_location) { 652 /* 653 * active side is lost. Try to find listening socket through 654 * source port, and then find md5 key through listening socket. 655 * we are not loose security here: 656 * Incoming packet is checked with md5 hash with finding key, 657 * no RST generated if md5 hash doesn't match. 658 */ 659 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0, 660 ip_hdr(skb)->saddr, 661 th->source, ip_hdr(skb)->daddr, 662 ntohs(th->source), inet_iif(skb), 663 tcp_v4_sdif(skb)); 664 /* don't send rst if it can't find key */ 665 if (!sk1) 666 goto out; 667 668 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *) 669 &ip_hdr(skb)->saddr, AF_INET); 670 if (!key) 671 goto out; 672 673 674 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb); 675 if (genhash || memcmp(hash_location, newhash, 16) != 0) 676 goto out; 677 678 } 679 680 if (key) { 681 rep.opt[0] = htonl((TCPOPT_NOP << 24) | 682 (TCPOPT_NOP << 16) | 683 (TCPOPT_MD5SIG << 8) | 684 TCPOLEN_MD5SIG); 685 /* Update length and the length the header thinks exists */ 686 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; 687 rep.th.doff = arg.iov[0].iov_len / 4; 688 689 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1], 690 key, ip_hdr(skb)->saddr, 691 ip_hdr(skb)->daddr, &rep.th); 692 } 693 #endif 694 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 695 ip_hdr(skb)->saddr, /* XXX */ 696 arg.iov[0].iov_len, IPPROTO_TCP, 0); 697 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 698 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0; 699 700 /* When socket is gone, all binding information is lost. 701 * routing might fail in this case. No choice here, if we choose to force 702 * input interface, we will misroute in case of asymmetric route. 703 */ 704 if (sk) 705 arg.bound_dev_if = sk->sk_bound_dev_if; 706 707 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) != 708 offsetof(struct inet_timewait_sock, tw_bound_dev_if)); 709 710 arg.tos = ip_hdr(skb)->tos; 711 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); 712 local_bh_disable(); 713 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), 714 skb, &TCP_SKB_CB(skb)->header.h4.opt, 715 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 716 &arg, arg.iov[0].iov_len); 717 718 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); 719 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); 720 local_bh_enable(); 721 722 #ifdef CONFIG_TCP_MD5SIG 723 out: 724 rcu_read_unlock(); 725 #endif 726 } 727 728 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states 729 outside socket context is ugly, certainly. What can I do? 730 */ 731 732 static void tcp_v4_send_ack(const struct sock *sk, 733 struct sk_buff *skb, u32 seq, u32 ack, 734 u32 win, u32 tsval, u32 tsecr, int oif, 735 struct tcp_md5sig_key *key, 736 int reply_flags, u8 tos) 737 { 738 const struct tcphdr *th = tcp_hdr(skb); 739 struct { 740 struct tcphdr th; 741 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) 742 #ifdef CONFIG_TCP_MD5SIG 743 + (TCPOLEN_MD5SIG_ALIGNED >> 2) 744 #endif 745 ]; 746 } rep; 747 struct net *net = sock_net(sk); 748 struct ip_reply_arg arg; 749 750 memset(&rep.th, 0, sizeof(struct tcphdr)); 751 memset(&arg, 0, sizeof(arg)); 752 753 arg.iov[0].iov_base = (unsigned char *)&rep; 754 arg.iov[0].iov_len = sizeof(rep.th); 755 if (tsecr) { 756 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 757 (TCPOPT_TIMESTAMP << 8) | 758 TCPOLEN_TIMESTAMP); 759 rep.opt[1] = htonl(tsval); 760 rep.opt[2] = htonl(tsecr); 761 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED; 762 } 763 764 /* Swap the send and the receive. */ 765 rep.th.dest = th->source; 766 rep.th.source = th->dest; 767 rep.th.doff = arg.iov[0].iov_len / 4; 768 rep.th.seq = htonl(seq); 769 rep.th.ack_seq = htonl(ack); 770 rep.th.ack = 1; 771 rep.th.window = htons(win); 772 773 #ifdef CONFIG_TCP_MD5SIG 774 if (key) { 775 int offset = (tsecr) ? 3 : 0; 776 777 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) | 778 (TCPOPT_NOP << 16) | 779 (TCPOPT_MD5SIG << 8) | 780 TCPOLEN_MD5SIG); 781 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; 782 rep.th.doff = arg.iov[0].iov_len/4; 783 784 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset], 785 key, ip_hdr(skb)->saddr, 786 ip_hdr(skb)->daddr, &rep.th); 787 } 788 #endif 789 arg.flags = reply_flags; 790 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 791 ip_hdr(skb)->saddr, /* XXX */ 792 arg.iov[0].iov_len, IPPROTO_TCP, 0); 793 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 794 if (oif) 795 arg.bound_dev_if = oif; 796 arg.tos = tos; 797 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL); 798 local_bh_disable(); 799 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), 800 skb, &TCP_SKB_CB(skb)->header.h4.opt, 801 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 802 &arg, arg.iov[0].iov_len); 803 804 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); 805 local_bh_enable(); 806 } 807 808 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) 809 { 810 struct inet_timewait_sock *tw = inet_twsk(sk); 811 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 812 813 tcp_v4_send_ack(sk, skb, 814 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 815 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 816 tcp_time_stamp_raw() + tcptw->tw_ts_offset, 817 tcptw->tw_ts_recent, 818 tw->tw_bound_dev_if, 819 tcp_twsk_md5_key(tcptw), 820 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0, 821 tw->tw_tos 822 ); 823 824 inet_twsk_put(tw); 825 } 826 827 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, 828 struct request_sock *req) 829 { 830 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV 831 * sk->sk_state == TCP_SYN_RECV -> for Fast Open. 832 */ 833 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : 834 tcp_sk(sk)->snd_nxt; 835 836 /* RFC 7323 2.3 837 * The window field (SEG.WND) of every outgoing segment, with the 838 * exception of <SYN> segments, MUST be right-shifted by 839 * Rcv.Wind.Shift bits: 840 */ 841 tcp_v4_send_ack(sk, skb, seq, 842 tcp_rsk(req)->rcv_nxt, 843 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, 844 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, 845 req->ts_recent, 846 0, 847 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, 848 AF_INET), 849 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, 850 ip_hdr(skb)->tos); 851 } 852 853 /* 854 * Send a SYN-ACK after having received a SYN. 855 * This still operates on a request_sock only, not on a big 856 * socket. 857 */ 858 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, 859 struct flowi *fl, 860 struct request_sock *req, 861 struct tcp_fastopen_cookie *foc, 862 enum tcp_synack_type synack_type) 863 { 864 const struct inet_request_sock *ireq = inet_rsk(req); 865 struct flowi4 fl4; 866 int err = -1; 867 struct sk_buff *skb; 868 869 /* First, grab a route. */ 870 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) 871 return -1; 872 873 skb = tcp_make_synack(sk, dst, req, foc, synack_type); 874 875 if (skb) { 876 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); 877 878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 879 ireq->ir_rmt_addr, 880 ireq->opt); 881 err = net_xmit_eval(err); 882 } 883 884 return err; 885 } 886 887 /* 888 * IPv4 request_sock destructor. 889 */ 890 static void tcp_v4_reqsk_destructor(struct request_sock *req) 891 { 892 kfree(inet_rsk(req)->opt); 893 } 894 895 #ifdef CONFIG_TCP_MD5SIG 896 /* 897 * RFC2385 MD5 checksumming requires a mapping of 898 * IP address->MD5 Key. 899 * We need to maintain these in the sk structure. 900 */ 901 902 /* Find the Key structure for an address. */ 903 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, 904 const union tcp_md5_addr *addr, 905 int family) 906 { 907 const struct tcp_sock *tp = tcp_sk(sk); 908 struct tcp_md5sig_key *key; 909 const struct tcp_md5sig_info *md5sig; 910 __be32 mask; 911 struct tcp_md5sig_key *best_match = NULL; 912 bool match; 913 914 /* caller either holds rcu_read_lock() or socket lock */ 915 md5sig = rcu_dereference_check(tp->md5sig_info, 916 lockdep_sock_is_held(sk)); 917 if (!md5sig) 918 return NULL; 919 920 hlist_for_each_entry_rcu(key, &md5sig->head, node) { 921 if (key->family != family) 922 continue; 923 924 if (family == AF_INET) { 925 mask = inet_make_mask(key->prefixlen); 926 match = (key->addr.a4.s_addr & mask) == 927 (addr->a4.s_addr & mask); 928 #if IS_ENABLED(CONFIG_IPV6) 929 } else if (family == AF_INET6) { 930 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6, 931 key->prefixlen); 932 #endif 933 } else { 934 match = false; 935 } 936 937 if (match && (!best_match || 938 key->prefixlen > best_match->prefixlen)) 939 best_match = key; 940 } 941 return best_match; 942 } 943 EXPORT_SYMBOL(tcp_md5_do_lookup); 944 945 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk, 946 const union tcp_md5_addr *addr, 947 int family, u8 prefixlen) 948 { 949 const struct tcp_sock *tp = tcp_sk(sk); 950 struct tcp_md5sig_key *key; 951 unsigned int size = sizeof(struct in_addr); 952 const struct tcp_md5sig_info *md5sig; 953 954 /* caller either holds rcu_read_lock() or socket lock */ 955 md5sig = rcu_dereference_check(tp->md5sig_info, 956 lockdep_sock_is_held(sk)); 957 if (!md5sig) 958 return NULL; 959 #if IS_ENABLED(CONFIG_IPV6) 960 if (family == AF_INET6) 961 size = sizeof(struct in6_addr); 962 #endif 963 hlist_for_each_entry_rcu(key, &md5sig->head, node) { 964 if (key->family != family) 965 continue; 966 if (!memcmp(&key->addr, addr, size) && 967 key->prefixlen == prefixlen) 968 return key; 969 } 970 return NULL; 971 } 972 973 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, 974 const struct sock *addr_sk) 975 { 976 const union tcp_md5_addr *addr; 977 978 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr; 979 return tcp_md5_do_lookup(sk, addr, AF_INET); 980 } 981 EXPORT_SYMBOL(tcp_v4_md5_lookup); 982 983 /* This can be called on a newly created socket, from other files */ 984 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, 985 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen, 986 gfp_t gfp) 987 { 988 /* Add Key to the list */ 989 struct tcp_md5sig_key *key; 990 struct tcp_sock *tp = tcp_sk(sk); 991 struct tcp_md5sig_info *md5sig; 992 993 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen); 994 if (key) { 995 /* Pre-existing entry - just update that one. */ 996 memcpy(key->key, newkey, newkeylen); 997 key->keylen = newkeylen; 998 return 0; 999 } 1000 1001 md5sig = rcu_dereference_protected(tp->md5sig_info, 1002 lockdep_sock_is_held(sk)); 1003 if (!md5sig) { 1004 md5sig = kmalloc(sizeof(*md5sig), gfp); 1005 if (!md5sig) 1006 return -ENOMEM; 1007 1008 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 1009 INIT_HLIST_HEAD(&md5sig->head); 1010 rcu_assign_pointer(tp->md5sig_info, md5sig); 1011 } 1012 1013 key = sock_kmalloc(sk, sizeof(*key), gfp); 1014 if (!key) 1015 return -ENOMEM; 1016 if (!tcp_alloc_md5sig_pool()) { 1017 sock_kfree_s(sk, key, sizeof(*key)); 1018 return -ENOMEM; 1019 } 1020 1021 memcpy(key->key, newkey, newkeylen); 1022 key->keylen = newkeylen; 1023 key->family = family; 1024 key->prefixlen = prefixlen; 1025 memcpy(&key->addr, addr, 1026 (family == AF_INET6) ? sizeof(struct in6_addr) : 1027 sizeof(struct in_addr)); 1028 hlist_add_head_rcu(&key->node, &md5sig->head); 1029 return 0; 1030 } 1031 EXPORT_SYMBOL(tcp_md5_do_add); 1032 1033 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family, 1034 u8 prefixlen) 1035 { 1036 struct tcp_md5sig_key *key; 1037 1038 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen); 1039 if (!key) 1040 return -ENOENT; 1041 hlist_del_rcu(&key->node); 1042 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); 1043 kfree_rcu(key, rcu); 1044 return 0; 1045 } 1046 EXPORT_SYMBOL(tcp_md5_do_del); 1047 1048 static void tcp_clear_md5_list(struct sock *sk) 1049 { 1050 struct tcp_sock *tp = tcp_sk(sk); 1051 struct tcp_md5sig_key *key; 1052 struct hlist_node *n; 1053 struct tcp_md5sig_info *md5sig; 1054 1055 md5sig = rcu_dereference_protected(tp->md5sig_info, 1); 1056 1057 hlist_for_each_entry_safe(key, n, &md5sig->head, node) { 1058 hlist_del_rcu(&key->node); 1059 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); 1060 kfree_rcu(key, rcu); 1061 } 1062 } 1063 1064 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname, 1065 char __user *optval, int optlen) 1066 { 1067 struct tcp_md5sig cmd; 1068 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; 1069 u8 prefixlen = 32; 1070 1071 if (optlen < sizeof(cmd)) 1072 return -EINVAL; 1073 1074 if (copy_from_user(&cmd, optval, sizeof(cmd))) 1075 return -EFAULT; 1076 1077 if (sin->sin_family != AF_INET) 1078 return -EINVAL; 1079 1080 if (optname == TCP_MD5SIG_EXT && 1081 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) { 1082 prefixlen = cmd.tcpm_prefixlen; 1083 if (prefixlen > 32) 1084 return -EINVAL; 1085 } 1086 1087 if (!cmd.tcpm_keylen) 1088 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, 1089 AF_INET, prefixlen); 1090 1091 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) 1092 return -EINVAL; 1093 1094 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, 1095 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen, 1096 GFP_KERNEL); 1097 } 1098 1099 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp, 1100 __be32 daddr, __be32 saddr, 1101 const struct tcphdr *th, int nbytes) 1102 { 1103 struct tcp4_pseudohdr *bp; 1104 struct scatterlist sg; 1105 struct tcphdr *_th; 1106 1107 bp = hp->scratch; 1108 bp->saddr = saddr; 1109 bp->daddr = daddr; 1110 bp->pad = 0; 1111 bp->protocol = IPPROTO_TCP; 1112 bp->len = cpu_to_be16(nbytes); 1113 1114 _th = (struct tcphdr *)(bp + 1); 1115 memcpy(_th, th, sizeof(*th)); 1116 _th->check = 0; 1117 1118 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th)); 1119 ahash_request_set_crypt(hp->md5_req, &sg, NULL, 1120 sizeof(*bp) + sizeof(*th)); 1121 return crypto_ahash_update(hp->md5_req); 1122 } 1123 1124 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, 1125 __be32 daddr, __be32 saddr, const struct tcphdr *th) 1126 { 1127 struct tcp_md5sig_pool *hp; 1128 struct ahash_request *req; 1129 1130 hp = tcp_get_md5sig_pool(); 1131 if (!hp) 1132 goto clear_hash_noput; 1133 req = hp->md5_req; 1134 1135 if (crypto_ahash_init(req)) 1136 goto clear_hash; 1137 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2)) 1138 goto clear_hash; 1139 if (tcp_md5_hash_key(hp, key)) 1140 goto clear_hash; 1141 ahash_request_set_crypt(req, NULL, md5_hash, 0); 1142 if (crypto_ahash_final(req)) 1143 goto clear_hash; 1144 1145 tcp_put_md5sig_pool(); 1146 return 0; 1147 1148 clear_hash: 1149 tcp_put_md5sig_pool(); 1150 clear_hash_noput: 1151 memset(md5_hash, 0, 16); 1152 return 1; 1153 } 1154 1155 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, 1156 const struct sock *sk, 1157 const struct sk_buff *skb) 1158 { 1159 struct tcp_md5sig_pool *hp; 1160 struct ahash_request *req; 1161 const struct tcphdr *th = tcp_hdr(skb); 1162 __be32 saddr, daddr; 1163 1164 if (sk) { /* valid for establish/request sockets */ 1165 saddr = sk->sk_rcv_saddr; 1166 daddr = sk->sk_daddr; 1167 } else { 1168 const struct iphdr *iph = ip_hdr(skb); 1169 saddr = iph->saddr; 1170 daddr = iph->daddr; 1171 } 1172 1173 hp = tcp_get_md5sig_pool(); 1174 if (!hp) 1175 goto clear_hash_noput; 1176 req = hp->md5_req; 1177 1178 if (crypto_ahash_init(req)) 1179 goto clear_hash; 1180 1181 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len)) 1182 goto clear_hash; 1183 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) 1184 goto clear_hash; 1185 if (tcp_md5_hash_key(hp, key)) 1186 goto clear_hash; 1187 ahash_request_set_crypt(req, NULL, md5_hash, 0); 1188 if (crypto_ahash_final(req)) 1189 goto clear_hash; 1190 1191 tcp_put_md5sig_pool(); 1192 return 0; 1193 1194 clear_hash: 1195 tcp_put_md5sig_pool(); 1196 clear_hash_noput: 1197 memset(md5_hash, 0, 16); 1198 return 1; 1199 } 1200 EXPORT_SYMBOL(tcp_v4_md5_hash_skb); 1201 1202 #endif 1203 1204 /* Called with rcu_read_lock() */ 1205 static bool tcp_v4_inbound_md5_hash(const struct sock *sk, 1206 const struct sk_buff *skb) 1207 { 1208 #ifdef CONFIG_TCP_MD5SIG 1209 /* 1210 * This gets called for each TCP segment that arrives 1211 * so we want to be efficient. 1212 * We have 3 drop cases: 1213 * o No MD5 hash and one expected. 1214 * o MD5 hash and we're not expecting one. 1215 * o MD5 hash and its wrong. 1216 */ 1217 const __u8 *hash_location = NULL; 1218 struct tcp_md5sig_key *hash_expected; 1219 const struct iphdr *iph = ip_hdr(skb); 1220 const struct tcphdr *th = tcp_hdr(skb); 1221 int genhash; 1222 unsigned char newhash[16]; 1223 1224 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr, 1225 AF_INET); 1226 hash_location = tcp_parse_md5sig_option(th); 1227 1228 /* We've parsed the options - do we have a hash? */ 1229 if (!hash_expected && !hash_location) 1230 return false; 1231 1232 if (hash_expected && !hash_location) { 1233 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 1234 return true; 1235 } 1236 1237 if (!hash_expected && hash_location) { 1238 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 1239 return true; 1240 } 1241 1242 /* Okay, so this is hash_expected and hash_location - 1243 * so we need to calculate the checksum. 1244 */ 1245 genhash = tcp_v4_md5_hash_skb(newhash, 1246 hash_expected, 1247 NULL, skb); 1248 1249 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 1250 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); 1251 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", 1252 &iph->saddr, ntohs(th->source), 1253 &iph->daddr, ntohs(th->dest), 1254 genhash ? " tcp_v4_calc_md5_hash failed" 1255 : ""); 1256 return true; 1257 } 1258 return false; 1259 #endif 1260 return false; 1261 } 1262 1263 static void tcp_v4_init_req(struct request_sock *req, 1264 const struct sock *sk_listener, 1265 struct sk_buff *skb) 1266 { 1267 struct inet_request_sock *ireq = inet_rsk(req); 1268 1269 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); 1270 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); 1271 ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb); 1272 } 1273 1274 static struct dst_entry *tcp_v4_route_req(const struct sock *sk, 1275 struct flowi *fl, 1276 const struct request_sock *req) 1277 { 1278 return inet_csk_route_req(sk, &fl->u.ip4, req); 1279 } 1280 1281 struct request_sock_ops tcp_request_sock_ops __read_mostly = { 1282 .family = PF_INET, 1283 .obj_size = sizeof(struct tcp_request_sock), 1284 .rtx_syn_ack = tcp_rtx_synack, 1285 .send_ack = tcp_v4_reqsk_send_ack, 1286 .destructor = tcp_v4_reqsk_destructor, 1287 .send_reset = tcp_v4_send_reset, 1288 .syn_ack_timeout = tcp_syn_ack_timeout, 1289 }; 1290 1291 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { 1292 .mss_clamp = TCP_MSS_DEFAULT, 1293 #ifdef CONFIG_TCP_MD5SIG 1294 .req_md5_lookup = tcp_v4_md5_lookup, 1295 .calc_md5_hash = tcp_v4_md5_hash_skb, 1296 #endif 1297 .init_req = tcp_v4_init_req, 1298 #ifdef CONFIG_SYN_COOKIES 1299 .cookie_init_seq = cookie_v4_init_sequence, 1300 #endif 1301 .route_req = tcp_v4_route_req, 1302 .init_seq = tcp_v4_init_seq, 1303 .init_ts_off = tcp_v4_init_ts_off, 1304 .send_synack = tcp_v4_send_synack, 1305 }; 1306 1307 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1308 { 1309 /* Never answer to SYNs send to broadcast or multicast */ 1310 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1311 goto drop; 1312 1313 return tcp_conn_request(&tcp_request_sock_ops, 1314 &tcp_request_sock_ipv4_ops, sk, skb); 1315 1316 drop: 1317 tcp_listendrop(sk); 1318 return 0; 1319 } 1320 EXPORT_SYMBOL(tcp_v4_conn_request); 1321 1322 1323 /* 1324 * The three way handshake has completed - we got a valid synack - 1325 * now create the new socket. 1326 */ 1327 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, 1328 struct request_sock *req, 1329 struct dst_entry *dst, 1330 struct request_sock *req_unhash, 1331 bool *own_req) 1332 { 1333 struct inet_request_sock *ireq; 1334 struct inet_sock *newinet; 1335 struct tcp_sock *newtp; 1336 struct sock *newsk; 1337 #ifdef CONFIG_TCP_MD5SIG 1338 struct tcp_md5sig_key *key; 1339 #endif 1340 struct ip_options_rcu *inet_opt; 1341 1342 if (sk_acceptq_is_full(sk)) 1343 goto exit_overflow; 1344 1345 newsk = tcp_create_openreq_child(sk, req, skb); 1346 if (!newsk) 1347 goto exit_nonewsk; 1348 1349 newsk->sk_gso_type = SKB_GSO_TCPV4; 1350 inet_sk_rx_dst_set(newsk, skb); 1351 1352 newtp = tcp_sk(newsk); 1353 newinet = inet_sk(newsk); 1354 ireq = inet_rsk(req); 1355 sk_daddr_set(newsk, ireq->ir_rmt_addr); 1356 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); 1357 newsk->sk_bound_dev_if = ireq->ir_iif; 1358 newinet->inet_saddr = ireq->ir_loc_addr; 1359 inet_opt = ireq->opt; 1360 rcu_assign_pointer(newinet->inet_opt, inet_opt); 1361 ireq->opt = NULL; 1362 newinet->mc_index = inet_iif(skb); 1363 newinet->mc_ttl = ip_hdr(skb)->ttl; 1364 newinet->rcv_tos = ip_hdr(skb)->tos; 1365 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1366 if (inet_opt) 1367 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 1368 newinet->inet_id = newtp->write_seq ^ jiffies; 1369 1370 if (!dst) { 1371 dst = inet_csk_route_child_sock(sk, newsk, req); 1372 if (!dst) 1373 goto put_and_exit; 1374 } else { 1375 /* syncookie case : see end of cookie_v4_check() */ 1376 } 1377 sk_setup_caps(newsk, dst); 1378 1379 tcp_ca_openreq_child(newsk, dst); 1380 1381 tcp_sync_mss(newsk, dst_mtu(dst)); 1382 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst)); 1383 1384 tcp_initialize_rcv_mss(newsk); 1385 1386 #ifdef CONFIG_TCP_MD5SIG 1387 /* Copy over the MD5 key from the original socket */ 1388 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, 1389 AF_INET); 1390 if (key) { 1391 /* 1392 * We're using one, so create a matching key 1393 * on the newsk structure. If we fail to get 1394 * memory, then we end up not copying the key 1395 * across. Shucks. 1396 */ 1397 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr, 1398 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC); 1399 sk_nocaps_add(newsk, NETIF_F_GSO_MASK); 1400 } 1401 #endif 1402 1403 if (__inet_inherit_port(sk, newsk) < 0) 1404 goto put_and_exit; 1405 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); 1406 if (*own_req) 1407 tcp_move_syn(newtp, req); 1408 1409 return newsk; 1410 1411 exit_overflow: 1412 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1413 exit_nonewsk: 1414 dst_release(dst); 1415 exit: 1416 tcp_listendrop(sk); 1417 return NULL; 1418 put_and_exit: 1419 inet_csk_prepare_forced_close(newsk); 1420 tcp_done(newsk); 1421 goto exit; 1422 } 1423 EXPORT_SYMBOL(tcp_v4_syn_recv_sock); 1424 1425 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) 1426 { 1427 #ifdef CONFIG_SYN_COOKIES 1428 const struct tcphdr *th = tcp_hdr(skb); 1429 1430 if (!th->syn) 1431 sk = cookie_v4_check(sk, skb); 1432 #endif 1433 return sk; 1434 } 1435 1436 /* The socket must have it's spinlock held when we get 1437 * here, unless it is a TCP_LISTEN socket. 1438 * 1439 * We have a potential double-lock case here, so even when 1440 * doing backlog processing we use the BH locking scheme. 1441 * This is because we cannot sleep with the original spinlock 1442 * held. 1443 */ 1444 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) 1445 { 1446 struct sock *rsk; 1447 1448 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1449 struct dst_entry *dst = sk->sk_rx_dst; 1450 1451 sock_rps_save_rxhash(sk, skb); 1452 sk_mark_napi_id(sk, skb); 1453 if (dst) { 1454 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || 1455 !dst->ops->check(dst, 0)) { 1456 dst_release(dst); 1457 sk->sk_rx_dst = NULL; 1458 } 1459 } 1460 tcp_rcv_established(sk, skb, tcp_hdr(skb)); 1461 return 0; 1462 } 1463 1464 if (tcp_checksum_complete(skb)) 1465 goto csum_err; 1466 1467 if (sk->sk_state == TCP_LISTEN) { 1468 struct sock *nsk = tcp_v4_cookie_check(sk, skb); 1469 1470 if (!nsk) 1471 goto discard; 1472 if (nsk != sk) { 1473 if (tcp_child_process(sk, nsk, skb)) { 1474 rsk = nsk; 1475 goto reset; 1476 } 1477 return 0; 1478 } 1479 } else 1480 sock_rps_save_rxhash(sk, skb); 1481 1482 if (tcp_rcv_state_process(sk, skb)) { 1483 rsk = sk; 1484 goto reset; 1485 } 1486 return 0; 1487 1488 reset: 1489 tcp_v4_send_reset(rsk, skb); 1490 discard: 1491 kfree_skb(skb); 1492 /* Be careful here. If this function gets more complicated and 1493 * gcc suffers from register pressure on the x86, sk (in %ebx) 1494 * might be destroyed here. This current version compiles correctly, 1495 * but you have been warned. 1496 */ 1497 return 0; 1498 1499 csum_err: 1500 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); 1501 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 1502 goto discard; 1503 } 1504 EXPORT_SYMBOL(tcp_v4_do_rcv); 1505 1506 int tcp_v4_early_demux(struct sk_buff *skb) 1507 { 1508 const struct iphdr *iph; 1509 const struct tcphdr *th; 1510 struct sock *sk; 1511 1512 if (skb->pkt_type != PACKET_HOST) 1513 return 0; 1514 1515 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) 1516 return 0; 1517 1518 iph = ip_hdr(skb); 1519 th = tcp_hdr(skb); 1520 1521 if (th->doff < sizeof(struct tcphdr) / 4) 1522 return 0; 1523 1524 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, 1525 iph->saddr, th->source, 1526 iph->daddr, ntohs(th->dest), 1527 skb->skb_iif, inet_sdif(skb)); 1528 if (sk) { 1529 skb->sk = sk; 1530 skb->destructor = sock_edemux; 1531 if (sk_fullsock(sk)) { 1532 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); 1533 1534 if (dst) 1535 dst = dst_check(dst, 0); 1536 if (dst && 1537 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) 1538 skb_dst_set_noref(skb, dst); 1539 } 1540 } 1541 return 0; 1542 } 1543 1544 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) 1545 { 1546 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf; 1547 1548 /* Only socket owner can try to collapse/prune rx queues 1549 * to reduce memory overhead, so add a little headroom here. 1550 * Few sockets backlog are possibly concurrently non empty. 1551 */ 1552 limit += 64*1024; 1553 1554 /* In case all data was pulled from skb frags (in __pskb_pull_tail()), 1555 * we can fix skb->truesize to its real value to avoid future drops. 1556 * This is valid because skb is not yet charged to the socket. 1557 * It has been noticed pure SACK packets were sometimes dropped 1558 * (if cooked by drivers without copybreak feature). 1559 */ 1560 skb_condense(skb); 1561 1562 if (unlikely(sk_add_backlog(sk, skb, limit))) { 1563 bh_unlock_sock(sk); 1564 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP); 1565 return true; 1566 } 1567 return false; 1568 } 1569 EXPORT_SYMBOL(tcp_add_backlog); 1570 1571 int tcp_filter(struct sock *sk, struct sk_buff *skb) 1572 { 1573 struct tcphdr *th = (struct tcphdr *)skb->data; 1574 unsigned int eaten = skb->len; 1575 int err; 1576 1577 err = sk_filter_trim_cap(sk, skb, th->doff * 4); 1578 if (!err) { 1579 eaten -= skb->len; 1580 TCP_SKB_CB(skb)->end_seq -= eaten; 1581 } 1582 return err; 1583 } 1584 EXPORT_SYMBOL(tcp_filter); 1585 1586 /* 1587 * From tcp_input.c 1588 */ 1589 1590 int tcp_v4_rcv(struct sk_buff *skb) 1591 { 1592 struct net *net = dev_net(skb->dev); 1593 int sdif = inet_sdif(skb); 1594 const struct iphdr *iph; 1595 const struct tcphdr *th; 1596 bool refcounted; 1597 struct sock *sk; 1598 int ret; 1599 1600 if (skb->pkt_type != PACKET_HOST) 1601 goto discard_it; 1602 1603 /* Count it even if it's bad */ 1604 __TCP_INC_STATS(net, TCP_MIB_INSEGS); 1605 1606 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 1607 goto discard_it; 1608 1609 th = (const struct tcphdr *)skb->data; 1610 1611 if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) 1612 goto bad_packet; 1613 if (!pskb_may_pull(skb, th->doff * 4)) 1614 goto discard_it; 1615 1616 /* An explanation is required here, I think. 1617 * Packet length and doff are validated by header prediction, 1618 * provided case of th->doff==0 is eliminated. 1619 * So, we defer the checks. */ 1620 1621 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo)) 1622 goto csum_error; 1623 1624 th = (const struct tcphdr *)skb->data; 1625 iph = ip_hdr(skb); 1626 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() 1627 * barrier() makes sure compiler wont play fool^Waliasing games. 1628 */ 1629 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb), 1630 sizeof(struct inet_skb_parm)); 1631 barrier(); 1632 1633 TCP_SKB_CB(skb)->seq = ntohl(th->seq); 1634 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + 1635 skb->len - th->doff * 4); 1636 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1637 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); 1638 TCP_SKB_CB(skb)->tcp_tw_isn = 0; 1639 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); 1640 TCP_SKB_CB(skb)->sacked = 0; 1641 TCP_SKB_CB(skb)->has_rxtstamp = 1642 skb->tstamp || skb_hwtstamps(skb)->hwtstamp; 1643 1644 lookup: 1645 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, 1646 th->dest, sdif, &refcounted); 1647 if (!sk) 1648 goto no_tcp_socket; 1649 1650 process: 1651 if (sk->sk_state == TCP_TIME_WAIT) 1652 goto do_time_wait; 1653 1654 if (sk->sk_state == TCP_NEW_SYN_RECV) { 1655 struct request_sock *req = inet_reqsk(sk); 1656 struct sock *nsk; 1657 1658 sk = req->rsk_listener; 1659 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) { 1660 sk_drops_add(sk, skb); 1661 reqsk_put(req); 1662 goto discard_it; 1663 } 1664 if (unlikely(sk->sk_state != TCP_LISTEN)) { 1665 inet_csk_reqsk_queue_drop_and_put(sk, req); 1666 goto lookup; 1667 } 1668 /* We own a reference on the listener, increase it again 1669 * as we might lose it too soon. 1670 */ 1671 sock_hold(sk); 1672 refcounted = true; 1673 nsk = NULL; 1674 if (!tcp_filter(sk, skb)) 1675 nsk = tcp_check_req(sk, skb, req, false); 1676 if (!nsk) { 1677 reqsk_put(req); 1678 goto discard_and_relse; 1679 } 1680 if (nsk == sk) { 1681 reqsk_put(req); 1682 } else if (tcp_child_process(sk, nsk, skb)) { 1683 tcp_v4_send_reset(nsk, skb); 1684 goto discard_and_relse; 1685 } else { 1686 sock_put(sk); 1687 return 0; 1688 } 1689 } 1690 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { 1691 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); 1692 goto discard_and_relse; 1693 } 1694 1695 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 1696 goto discard_and_relse; 1697 1698 if (tcp_v4_inbound_md5_hash(sk, skb)) 1699 goto discard_and_relse; 1700 1701 nf_reset(skb); 1702 1703 if (tcp_filter(sk, skb)) 1704 goto discard_and_relse; 1705 th = (const struct tcphdr *)skb->data; 1706 iph = ip_hdr(skb); 1707 1708 skb->dev = NULL; 1709 1710 if (sk->sk_state == TCP_LISTEN) { 1711 ret = tcp_v4_do_rcv(sk, skb); 1712 goto put_and_return; 1713 } 1714 1715 sk_incoming_cpu_update(sk); 1716 1717 bh_lock_sock_nested(sk); 1718 tcp_segs_in(tcp_sk(sk), skb); 1719 ret = 0; 1720 if (!sock_owned_by_user(sk)) { 1721 ret = tcp_v4_do_rcv(sk, skb); 1722 } else if (tcp_add_backlog(sk, skb)) { 1723 goto discard_and_relse; 1724 } 1725 bh_unlock_sock(sk); 1726 1727 put_and_return: 1728 if (refcounted) 1729 sock_put(sk); 1730 1731 return ret; 1732 1733 no_tcp_socket: 1734 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 1735 goto discard_it; 1736 1737 if (tcp_checksum_complete(skb)) { 1738 csum_error: 1739 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); 1740 bad_packet: 1741 __TCP_INC_STATS(net, TCP_MIB_INERRS); 1742 } else { 1743 tcp_v4_send_reset(NULL, skb); 1744 } 1745 1746 discard_it: 1747 /* Discard frame. */ 1748 kfree_skb(skb); 1749 return 0; 1750 1751 discard_and_relse: 1752 sk_drops_add(sk, skb); 1753 if (refcounted) 1754 sock_put(sk); 1755 goto discard_it; 1756 1757 do_time_wait: 1758 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1759 inet_twsk_put(inet_twsk(sk)); 1760 goto discard_it; 1761 } 1762 1763 if (tcp_checksum_complete(skb)) { 1764 inet_twsk_put(inet_twsk(sk)); 1765 goto csum_error; 1766 } 1767 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { 1768 case TCP_TW_SYN: { 1769 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev), 1770 &tcp_hashinfo, skb, 1771 __tcp_hdrlen(th), 1772 iph->saddr, th->source, 1773 iph->daddr, th->dest, 1774 inet_iif(skb), 1775 sdif); 1776 if (sk2) { 1777 inet_twsk_deschedule_put(inet_twsk(sk)); 1778 sk = sk2; 1779 refcounted = false; 1780 goto process; 1781 } 1782 /* Fall through to ACK */ 1783 } 1784 case TCP_TW_ACK: 1785 tcp_v4_timewait_ack(sk, skb); 1786 break; 1787 case TCP_TW_RST: 1788 tcp_v4_send_reset(sk, skb); 1789 inet_twsk_deschedule_put(inet_twsk(sk)); 1790 goto discard_it; 1791 case TCP_TW_SUCCESS:; 1792 } 1793 goto discard_it; 1794 } 1795 1796 static struct timewait_sock_ops tcp_timewait_sock_ops = { 1797 .twsk_obj_size = sizeof(struct tcp_timewait_sock), 1798 .twsk_unique = tcp_twsk_unique, 1799 .twsk_destructor= tcp_twsk_destructor, 1800 }; 1801 1802 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) 1803 { 1804 struct dst_entry *dst = skb_dst(skb); 1805 1806 if (dst && dst_hold_safe(dst)) { 1807 sk->sk_rx_dst = dst; 1808 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; 1809 } 1810 } 1811 EXPORT_SYMBOL(inet_sk_rx_dst_set); 1812 1813 const struct inet_connection_sock_af_ops ipv4_specific = { 1814 .queue_xmit = ip_queue_xmit, 1815 .send_check = tcp_v4_send_check, 1816 .rebuild_header = inet_sk_rebuild_header, 1817 .sk_rx_dst_set = inet_sk_rx_dst_set, 1818 .conn_request = tcp_v4_conn_request, 1819 .syn_recv_sock = tcp_v4_syn_recv_sock, 1820 .net_header_len = sizeof(struct iphdr), 1821 .setsockopt = ip_setsockopt, 1822 .getsockopt = ip_getsockopt, 1823 .addr2sockaddr = inet_csk_addr2sockaddr, 1824 .sockaddr_len = sizeof(struct sockaddr_in), 1825 #ifdef CONFIG_COMPAT 1826 .compat_setsockopt = compat_ip_setsockopt, 1827 .compat_getsockopt = compat_ip_getsockopt, 1828 #endif 1829 .mtu_reduced = tcp_v4_mtu_reduced, 1830 }; 1831 EXPORT_SYMBOL(ipv4_specific); 1832 1833 #ifdef CONFIG_TCP_MD5SIG 1834 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { 1835 .md5_lookup = tcp_v4_md5_lookup, 1836 .calc_md5_hash = tcp_v4_md5_hash_skb, 1837 .md5_parse = tcp_v4_parse_md5_keys, 1838 }; 1839 #endif 1840 1841 /* NOTE: A lot of things set to zero explicitly by call to 1842 * sk_alloc() so need not be done here. 1843 */ 1844 static int tcp_v4_init_sock(struct sock *sk) 1845 { 1846 struct inet_connection_sock *icsk = inet_csk(sk); 1847 1848 tcp_init_sock(sk); 1849 1850 icsk->icsk_af_ops = &ipv4_specific; 1851 1852 #ifdef CONFIG_TCP_MD5SIG 1853 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific; 1854 #endif 1855 1856 return 0; 1857 } 1858 1859 void tcp_v4_destroy_sock(struct sock *sk) 1860 { 1861 struct tcp_sock *tp = tcp_sk(sk); 1862 1863 tcp_clear_xmit_timers(sk); 1864 1865 tcp_cleanup_congestion_control(sk); 1866 1867 tcp_cleanup_ulp(sk); 1868 1869 /* Cleanup up the write buffer. */ 1870 tcp_write_queue_purge(sk); 1871 1872 /* Check if we want to disable active TFO */ 1873 tcp_fastopen_active_disable_ofo_check(sk); 1874 1875 /* Cleans up our, hopefully empty, out_of_order_queue. */ 1876 skb_rbtree_purge(&tp->out_of_order_queue); 1877 1878 #ifdef CONFIG_TCP_MD5SIG 1879 /* Clean up the MD5 key list, if any */ 1880 if (tp->md5sig_info) { 1881 tcp_clear_md5_list(sk); 1882 kfree_rcu(tp->md5sig_info, rcu); 1883 tp->md5sig_info = NULL; 1884 } 1885 #endif 1886 1887 /* Clean up a referenced TCP bind bucket. */ 1888 if (inet_csk(sk)->icsk_bind_hash) 1889 inet_put_port(sk); 1890 1891 BUG_ON(tp->fastopen_rsk); 1892 1893 /* If socket is aborted during connect operation */ 1894 tcp_free_fastopen_req(tp); 1895 tcp_saved_syn_free(tp); 1896 1897 sk_sockets_allocated_dec(sk); 1898 } 1899 EXPORT_SYMBOL(tcp_v4_destroy_sock); 1900 1901 #ifdef CONFIG_PROC_FS 1902 /* Proc filesystem TCP sock list dumping. */ 1903 1904 /* 1905 * Get next listener socket follow cur. If cur is NULL, get first socket 1906 * starting from bucket given in st->bucket; when st->bucket is zero the 1907 * very first socket in the hash table is returned. 1908 */ 1909 static void *listening_get_next(struct seq_file *seq, void *cur) 1910 { 1911 struct tcp_iter_state *st = seq->private; 1912 struct net *net = seq_file_net(seq); 1913 struct inet_listen_hashbucket *ilb; 1914 struct sock *sk = cur; 1915 1916 if (!sk) { 1917 get_head: 1918 ilb = &tcp_hashinfo.listening_hash[st->bucket]; 1919 spin_lock(&ilb->lock); 1920 sk = sk_head(&ilb->head); 1921 st->offset = 0; 1922 goto get_sk; 1923 } 1924 ilb = &tcp_hashinfo.listening_hash[st->bucket]; 1925 ++st->num; 1926 ++st->offset; 1927 1928 sk = sk_next(sk); 1929 get_sk: 1930 sk_for_each_from(sk) { 1931 if (!net_eq(sock_net(sk), net)) 1932 continue; 1933 if (sk->sk_family == st->family) 1934 return sk; 1935 } 1936 spin_unlock(&ilb->lock); 1937 st->offset = 0; 1938 if (++st->bucket < INET_LHTABLE_SIZE) 1939 goto get_head; 1940 return NULL; 1941 } 1942 1943 static void *listening_get_idx(struct seq_file *seq, loff_t *pos) 1944 { 1945 struct tcp_iter_state *st = seq->private; 1946 void *rc; 1947 1948 st->bucket = 0; 1949 st->offset = 0; 1950 rc = listening_get_next(seq, NULL); 1951 1952 while (rc && *pos) { 1953 rc = listening_get_next(seq, rc); 1954 --*pos; 1955 } 1956 return rc; 1957 } 1958 1959 static inline bool empty_bucket(const struct tcp_iter_state *st) 1960 { 1961 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain); 1962 } 1963 1964 /* 1965 * Get first established socket starting from bucket given in st->bucket. 1966 * If st->bucket is zero, the very first socket in the hash is returned. 1967 */ 1968 static void *established_get_first(struct seq_file *seq) 1969 { 1970 struct tcp_iter_state *st = seq->private; 1971 struct net *net = seq_file_net(seq); 1972 void *rc = NULL; 1973 1974 st->offset = 0; 1975 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { 1976 struct sock *sk; 1977 struct hlist_nulls_node *node; 1978 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); 1979 1980 /* Lockless fast path for the common case of empty buckets */ 1981 if (empty_bucket(st)) 1982 continue; 1983 1984 spin_lock_bh(lock); 1985 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { 1986 if (sk->sk_family != st->family || 1987 !net_eq(sock_net(sk), net)) { 1988 continue; 1989 } 1990 rc = sk; 1991 goto out; 1992 } 1993 spin_unlock_bh(lock); 1994 } 1995 out: 1996 return rc; 1997 } 1998 1999 static void *established_get_next(struct seq_file *seq, void *cur) 2000 { 2001 struct sock *sk = cur; 2002 struct hlist_nulls_node *node; 2003 struct tcp_iter_state *st = seq->private; 2004 struct net *net = seq_file_net(seq); 2005 2006 ++st->num; 2007 ++st->offset; 2008 2009 sk = sk_nulls_next(sk); 2010 2011 sk_nulls_for_each_from(sk, node) { 2012 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) 2013 return sk; 2014 } 2015 2016 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2017 ++st->bucket; 2018 return established_get_first(seq); 2019 } 2020 2021 static void *established_get_idx(struct seq_file *seq, loff_t pos) 2022 { 2023 struct tcp_iter_state *st = seq->private; 2024 void *rc; 2025 2026 st->bucket = 0; 2027 rc = established_get_first(seq); 2028 2029 while (rc && pos) { 2030 rc = established_get_next(seq, rc); 2031 --pos; 2032 } 2033 return rc; 2034 } 2035 2036 static void *tcp_get_idx(struct seq_file *seq, loff_t pos) 2037 { 2038 void *rc; 2039 struct tcp_iter_state *st = seq->private; 2040 2041 st->state = TCP_SEQ_STATE_LISTENING; 2042 rc = listening_get_idx(seq, &pos); 2043 2044 if (!rc) { 2045 st->state = TCP_SEQ_STATE_ESTABLISHED; 2046 rc = established_get_idx(seq, pos); 2047 } 2048 2049 return rc; 2050 } 2051 2052 static void *tcp_seek_last_pos(struct seq_file *seq) 2053 { 2054 struct tcp_iter_state *st = seq->private; 2055 int offset = st->offset; 2056 int orig_num = st->num; 2057 void *rc = NULL; 2058 2059 switch (st->state) { 2060 case TCP_SEQ_STATE_LISTENING: 2061 if (st->bucket >= INET_LHTABLE_SIZE) 2062 break; 2063 st->state = TCP_SEQ_STATE_LISTENING; 2064 rc = listening_get_next(seq, NULL); 2065 while (offset-- && rc) 2066 rc = listening_get_next(seq, rc); 2067 if (rc) 2068 break; 2069 st->bucket = 0; 2070 st->state = TCP_SEQ_STATE_ESTABLISHED; 2071 /* Fallthrough */ 2072 case TCP_SEQ_STATE_ESTABLISHED: 2073 if (st->bucket > tcp_hashinfo.ehash_mask) 2074 break; 2075 rc = established_get_first(seq); 2076 while (offset-- && rc) 2077 rc = established_get_next(seq, rc); 2078 } 2079 2080 st->num = orig_num; 2081 2082 return rc; 2083 } 2084 2085 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) 2086 { 2087 struct tcp_iter_state *st = seq->private; 2088 void *rc; 2089 2090 if (*pos && *pos == st->last_pos) { 2091 rc = tcp_seek_last_pos(seq); 2092 if (rc) 2093 goto out; 2094 } 2095 2096 st->state = TCP_SEQ_STATE_LISTENING; 2097 st->num = 0; 2098 st->bucket = 0; 2099 st->offset = 0; 2100 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2101 2102 out: 2103 st->last_pos = *pos; 2104 return rc; 2105 } 2106 2107 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2108 { 2109 struct tcp_iter_state *st = seq->private; 2110 void *rc = NULL; 2111 2112 if (v == SEQ_START_TOKEN) { 2113 rc = tcp_get_idx(seq, 0); 2114 goto out; 2115 } 2116 2117 switch (st->state) { 2118 case TCP_SEQ_STATE_LISTENING: 2119 rc = listening_get_next(seq, v); 2120 if (!rc) { 2121 st->state = TCP_SEQ_STATE_ESTABLISHED; 2122 st->bucket = 0; 2123 st->offset = 0; 2124 rc = established_get_first(seq); 2125 } 2126 break; 2127 case TCP_SEQ_STATE_ESTABLISHED: 2128 rc = established_get_next(seq, v); 2129 break; 2130 } 2131 out: 2132 ++*pos; 2133 st->last_pos = *pos; 2134 return rc; 2135 } 2136 2137 static void tcp_seq_stop(struct seq_file *seq, void *v) 2138 { 2139 struct tcp_iter_state *st = seq->private; 2140 2141 switch (st->state) { 2142 case TCP_SEQ_STATE_LISTENING: 2143 if (v != SEQ_START_TOKEN) 2144 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock); 2145 break; 2146 case TCP_SEQ_STATE_ESTABLISHED: 2147 if (v) 2148 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2149 break; 2150 } 2151 } 2152 2153 int tcp_seq_open(struct inode *inode, struct file *file) 2154 { 2155 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode); 2156 struct tcp_iter_state *s; 2157 int err; 2158 2159 err = seq_open_net(inode, file, &afinfo->seq_ops, 2160 sizeof(struct tcp_iter_state)); 2161 if (err < 0) 2162 return err; 2163 2164 s = ((struct seq_file *)file->private_data)->private; 2165 s->family = afinfo->family; 2166 s->last_pos = 0; 2167 return 0; 2168 } 2169 EXPORT_SYMBOL(tcp_seq_open); 2170 2171 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo) 2172 { 2173 int rc = 0; 2174 struct proc_dir_entry *p; 2175 2176 afinfo->seq_ops.start = tcp_seq_start; 2177 afinfo->seq_ops.next = tcp_seq_next; 2178 afinfo->seq_ops.stop = tcp_seq_stop; 2179 2180 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, 2181 afinfo->seq_fops, afinfo); 2182 if (!p) 2183 rc = -ENOMEM; 2184 return rc; 2185 } 2186 EXPORT_SYMBOL(tcp_proc_register); 2187 2188 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) 2189 { 2190 remove_proc_entry(afinfo->name, net->proc_net); 2191 } 2192 EXPORT_SYMBOL(tcp_proc_unregister); 2193 2194 static void get_openreq4(const struct request_sock *req, 2195 struct seq_file *f, int i) 2196 { 2197 const struct inet_request_sock *ireq = inet_rsk(req); 2198 long delta = req->rsk_timer.expires - jiffies; 2199 2200 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2201 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK", 2202 i, 2203 ireq->ir_loc_addr, 2204 ireq->ir_num, 2205 ireq->ir_rmt_addr, 2206 ntohs(ireq->ir_rmt_port), 2207 TCP_SYN_RECV, 2208 0, 0, /* could print option size, but that is af dependent. */ 2209 1, /* timers active (only the expire timer) */ 2210 jiffies_delta_to_clock_t(delta), 2211 req->num_timeout, 2212 from_kuid_munged(seq_user_ns(f), 2213 sock_i_uid(req->rsk_listener)), 2214 0, /* non standard timer */ 2215 0, /* open_requests have no inode */ 2216 0, 2217 req); 2218 } 2219 2220 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) 2221 { 2222 int timer_active; 2223 unsigned long timer_expires; 2224 const struct tcp_sock *tp = tcp_sk(sk); 2225 const struct inet_connection_sock *icsk = inet_csk(sk); 2226 const struct inet_sock *inet = inet_sk(sk); 2227 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; 2228 __be32 dest = inet->inet_daddr; 2229 __be32 src = inet->inet_rcv_saddr; 2230 __u16 destp = ntohs(inet->inet_dport); 2231 __u16 srcp = ntohs(inet->inet_sport); 2232 int rx_queue; 2233 int state; 2234 2235 if (icsk->icsk_pending == ICSK_TIME_RETRANS || 2236 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || 2237 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 2238 timer_active = 1; 2239 timer_expires = icsk->icsk_timeout; 2240 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 2241 timer_active = 4; 2242 timer_expires = icsk->icsk_timeout; 2243 } else if (timer_pending(&sk->sk_timer)) { 2244 timer_active = 2; 2245 timer_expires = sk->sk_timer.expires; 2246 } else { 2247 timer_active = 0; 2248 timer_expires = jiffies; 2249 } 2250 2251 state = sk_state_load(sk); 2252 if (state == TCP_LISTEN) 2253 rx_queue = sk->sk_ack_backlog; 2254 else 2255 /* Because we don't lock the socket, 2256 * we might find a transient negative value. 2257 */ 2258 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 2259 2260 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2261 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d", 2262 i, src, srcp, dest, destp, state, 2263 tp->write_seq - tp->snd_una, 2264 rx_queue, 2265 timer_active, 2266 jiffies_delta_to_clock_t(timer_expires - jiffies), 2267 icsk->icsk_retransmits, 2268 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)), 2269 icsk->icsk_probes_out, 2270 sock_i_ino(sk), 2271 refcount_read(&sk->sk_refcnt), sk, 2272 jiffies_to_clock_t(icsk->icsk_rto), 2273 jiffies_to_clock_t(icsk->icsk_ack.ato), 2274 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 2275 tp->snd_cwnd, 2276 state == TCP_LISTEN ? 2277 fastopenq->max_qlen : 2278 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)); 2279 } 2280 2281 static void get_timewait4_sock(const struct inet_timewait_sock *tw, 2282 struct seq_file *f, int i) 2283 { 2284 long delta = tw->tw_timer.expires - jiffies; 2285 __be32 dest, src; 2286 __u16 destp, srcp; 2287 2288 dest = tw->tw_daddr; 2289 src = tw->tw_rcv_saddr; 2290 destp = ntohs(tw->tw_dport); 2291 srcp = ntohs(tw->tw_sport); 2292 2293 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2294 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK", 2295 i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 2296 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, 2297 refcount_read(&tw->tw_refcnt), tw); 2298 } 2299 2300 #define TMPSZ 150 2301 2302 static int tcp4_seq_show(struct seq_file *seq, void *v) 2303 { 2304 struct tcp_iter_state *st; 2305 struct sock *sk = v; 2306 2307 seq_setwidth(seq, TMPSZ - 1); 2308 if (v == SEQ_START_TOKEN) { 2309 seq_puts(seq, " sl local_address rem_address st tx_queue " 2310 "rx_queue tr tm->when retrnsmt uid timeout " 2311 "inode"); 2312 goto out; 2313 } 2314 st = seq->private; 2315 2316 if (sk->sk_state == TCP_TIME_WAIT) 2317 get_timewait4_sock(v, seq, st->num); 2318 else if (sk->sk_state == TCP_NEW_SYN_RECV) 2319 get_openreq4(v, seq, st->num); 2320 else 2321 get_tcp4_sock(v, seq, st->num); 2322 out: 2323 seq_pad(seq, '\n'); 2324 return 0; 2325 } 2326 2327 static const struct file_operations tcp_afinfo_seq_fops = { 2328 .owner = THIS_MODULE, 2329 .open = tcp_seq_open, 2330 .read = seq_read, 2331 .llseek = seq_lseek, 2332 .release = seq_release_net 2333 }; 2334 2335 static struct tcp_seq_afinfo tcp4_seq_afinfo = { 2336 .name = "tcp", 2337 .family = AF_INET, 2338 .seq_fops = &tcp_afinfo_seq_fops, 2339 .seq_ops = { 2340 .show = tcp4_seq_show, 2341 }, 2342 }; 2343 2344 static int __net_init tcp4_proc_init_net(struct net *net) 2345 { 2346 return tcp_proc_register(net, &tcp4_seq_afinfo); 2347 } 2348 2349 static void __net_exit tcp4_proc_exit_net(struct net *net) 2350 { 2351 tcp_proc_unregister(net, &tcp4_seq_afinfo); 2352 } 2353 2354 static struct pernet_operations tcp4_net_ops = { 2355 .init = tcp4_proc_init_net, 2356 .exit = tcp4_proc_exit_net, 2357 }; 2358 2359 int __init tcp4_proc_init(void) 2360 { 2361 return register_pernet_subsys(&tcp4_net_ops); 2362 } 2363 2364 void tcp4_proc_exit(void) 2365 { 2366 unregister_pernet_subsys(&tcp4_net_ops); 2367 } 2368 #endif /* CONFIG_PROC_FS */ 2369 2370 struct proto tcp_prot = { 2371 .name = "TCP", 2372 .owner = THIS_MODULE, 2373 .close = tcp_close, 2374 .connect = tcp_v4_connect, 2375 .disconnect = tcp_disconnect, 2376 .accept = inet_csk_accept, 2377 .ioctl = tcp_ioctl, 2378 .init = tcp_v4_init_sock, 2379 .destroy = tcp_v4_destroy_sock, 2380 .shutdown = tcp_shutdown, 2381 .setsockopt = tcp_setsockopt, 2382 .getsockopt = tcp_getsockopt, 2383 .keepalive = tcp_set_keepalive, 2384 .recvmsg = tcp_recvmsg, 2385 .sendmsg = tcp_sendmsg, 2386 .sendpage = tcp_sendpage, 2387 .backlog_rcv = tcp_v4_do_rcv, 2388 .release_cb = tcp_release_cb, 2389 .hash = inet_hash, 2390 .unhash = inet_unhash, 2391 .get_port = inet_csk_get_port, 2392 .enter_memory_pressure = tcp_enter_memory_pressure, 2393 .leave_memory_pressure = tcp_leave_memory_pressure, 2394 .stream_memory_free = tcp_stream_memory_free, 2395 .sockets_allocated = &tcp_sockets_allocated, 2396 .orphan_count = &tcp_orphan_count, 2397 .memory_allocated = &tcp_memory_allocated, 2398 .memory_pressure = &tcp_memory_pressure, 2399 .sysctl_mem = sysctl_tcp_mem, 2400 .sysctl_wmem = sysctl_tcp_wmem, 2401 .sysctl_rmem = sysctl_tcp_rmem, 2402 .max_header = MAX_TCP_HEADER, 2403 .obj_size = sizeof(struct tcp_sock), 2404 .slab_flags = SLAB_TYPESAFE_BY_RCU, 2405 .twsk_prot = &tcp_timewait_sock_ops, 2406 .rsk_prot = &tcp_request_sock_ops, 2407 .h.hashinfo = &tcp_hashinfo, 2408 .no_autobind = true, 2409 #ifdef CONFIG_COMPAT 2410 .compat_setsockopt = compat_tcp_setsockopt, 2411 .compat_getsockopt = compat_tcp_getsockopt, 2412 #endif 2413 .diag_destroy = tcp_abort, 2414 }; 2415 EXPORT_SYMBOL(tcp_prot); 2416 2417 static void __net_exit tcp_sk_exit(struct net *net) 2418 { 2419 int cpu; 2420 2421 for_each_possible_cpu(cpu) 2422 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); 2423 free_percpu(net->ipv4.tcp_sk); 2424 } 2425 2426 static int __net_init tcp_sk_init(struct net *net) 2427 { 2428 int res, cpu, cnt; 2429 2430 net->ipv4.tcp_sk = alloc_percpu(struct sock *); 2431 if (!net->ipv4.tcp_sk) 2432 return -ENOMEM; 2433 2434 for_each_possible_cpu(cpu) { 2435 struct sock *sk; 2436 2437 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, 2438 IPPROTO_TCP, net); 2439 if (res) 2440 goto fail; 2441 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 2442 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; 2443 } 2444 2445 net->ipv4.sysctl_tcp_ecn = 2; 2446 net->ipv4.sysctl_tcp_ecn_fallback = 1; 2447 2448 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS; 2449 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; 2450 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; 2451 2452 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME; 2453 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES; 2454 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL; 2455 2456 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES; 2457 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES; 2458 net->ipv4.sysctl_tcp_syncookies = 1; 2459 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH; 2460 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1; 2461 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2; 2462 net->ipv4.sysctl_tcp_orphan_retries = 0; 2463 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; 2464 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; 2465 net->ipv4.sysctl_tcp_tw_reuse = 0; 2466 2467 cnt = tcp_hashinfo.ehash_mask + 1; 2468 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2; 2469 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; 2470 2471 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256); 2472 net->ipv4.sysctl_tcp_sack = 1; 2473 net->ipv4.sysctl_tcp_window_scaling = 1; 2474 net->ipv4.sysctl_tcp_timestamps = 1; 2475 2476 return 0; 2477 fail: 2478 tcp_sk_exit(net); 2479 2480 return res; 2481 } 2482 2483 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) 2484 { 2485 inet_twsk_purge(&tcp_hashinfo, AF_INET); 2486 } 2487 2488 static struct pernet_operations __net_initdata tcp_sk_ops = { 2489 .init = tcp_sk_init, 2490 .exit = tcp_sk_exit, 2491 .exit_batch = tcp_sk_exit_batch, 2492 }; 2493 2494 void __init tcp_v4_init(void) 2495 { 2496 if (register_pernet_subsys(&tcp_sk_ops)) 2497 panic("Failed to create the TCP control socket.\n"); 2498 } 2499