1 #include <linux/rcupdate.h> 2 #include <linux/spinlock.h> 3 #include <linux/jiffies.h> 4 #include <linux/module.h> 5 #include <linux/cache.h> 6 #include <linux/slab.h> 7 #include <linux/init.h> 8 #include <linux/tcp.h> 9 #include <linux/hash.h> 10 #include <linux/tcp_metrics.h> 11 #include <linux/vmalloc.h> 12 13 #include <net/inet_connection_sock.h> 14 #include <net/net_namespace.h> 15 #include <net/request_sock.h> 16 #include <net/inetpeer.h> 17 #include <net/sock.h> 18 #include <net/ipv6.h> 19 #include <net/dst.h> 20 #include <net/tcp.h> 21 #include <net/genetlink.h> 22 23 int sysctl_tcp_nometrics_save __read_mostly; 24 25 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr, 26 const struct inetpeer_addr *daddr, 27 struct net *net, unsigned int hash); 28 29 struct tcp_fastopen_metrics { 30 u16 mss; 31 u16 syn_loss:10, /* Recurring Fast Open SYN losses */ 32 try_exp:2; /* Request w/ exp. option (once) */ 33 unsigned long last_syn_loss; /* Last Fast Open SYN loss */ 34 struct tcp_fastopen_cookie cookie; 35 }; 36 37 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility 38 * Kernel only stores RTT and RTTVAR in usec resolution 39 */ 40 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2) 41 42 struct tcp_metrics_block { 43 struct tcp_metrics_block __rcu *tcpm_next; 44 possible_net_t tcpm_net; 45 struct inetpeer_addr tcpm_saddr; 46 struct inetpeer_addr tcpm_daddr; 47 unsigned long tcpm_stamp; 48 u32 tcpm_ts; 49 u32 tcpm_ts_stamp; 50 u32 tcpm_lock; 51 u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1]; 52 struct tcp_fastopen_metrics tcpm_fastopen; 53 54 struct rcu_head rcu_head; 55 }; 56 57 static inline struct net *tm_net(struct tcp_metrics_block *tm) 58 { 59 return read_pnet(&tm->tcpm_net); 60 } 61 62 static bool tcp_metric_locked(struct tcp_metrics_block *tm, 63 enum tcp_metric_index idx) 64 { 65 return tm->tcpm_lock & (1 << idx); 66 } 67 68 static u32 tcp_metric_get(struct tcp_metrics_block *tm, 69 enum tcp_metric_index idx) 70 { 71 return tm->tcpm_vals[idx]; 72 } 73 74 static void tcp_metric_set(struct tcp_metrics_block *tm, 75 enum tcp_metric_index idx, 76 u32 val) 77 { 78 tm->tcpm_vals[idx] = val; 79 } 80 81 static bool addr_same(const struct inetpeer_addr *a, 82 const struct inetpeer_addr *b) 83 { 84 return inetpeer_addr_cmp(a, b) == 0; 85 } 86 87 struct tcpm_hash_bucket { 88 struct tcp_metrics_block __rcu *chain; 89 }; 90 91 static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly; 92 static unsigned int tcp_metrics_hash_log __read_mostly; 93 94 static DEFINE_SPINLOCK(tcp_metrics_lock); 95 96 static void tcpm_suck_dst(struct tcp_metrics_block *tm, 97 const struct dst_entry *dst, 98 bool fastopen_clear) 99 { 100 u32 msval; 101 u32 val; 102 103 tm->tcpm_stamp = jiffies; 104 105 val = 0; 106 if (dst_metric_locked(dst, RTAX_RTT)) 107 val |= 1 << TCP_METRIC_RTT; 108 if (dst_metric_locked(dst, RTAX_RTTVAR)) 109 val |= 1 << TCP_METRIC_RTTVAR; 110 if (dst_metric_locked(dst, RTAX_SSTHRESH)) 111 val |= 1 << TCP_METRIC_SSTHRESH; 112 if (dst_metric_locked(dst, RTAX_CWND)) 113 val |= 1 << TCP_METRIC_CWND; 114 if (dst_metric_locked(dst, RTAX_REORDERING)) 115 val |= 1 << TCP_METRIC_REORDERING; 116 tm->tcpm_lock = val; 117 118 msval = dst_metric_raw(dst, RTAX_RTT); 119 tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC; 120 121 msval = dst_metric_raw(dst, RTAX_RTTVAR); 122 tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC; 123 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH); 124 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND); 125 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING); 126 tm->tcpm_ts = 0; 127 tm->tcpm_ts_stamp = 0; 128 if (fastopen_clear) { 129 tm->tcpm_fastopen.mss = 0; 130 tm->tcpm_fastopen.syn_loss = 0; 131 tm->tcpm_fastopen.try_exp = 0; 132 tm->tcpm_fastopen.cookie.exp = false; 133 tm->tcpm_fastopen.cookie.len = 0; 134 } 135 } 136 137 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ) 138 139 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst) 140 { 141 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT))) 142 tcpm_suck_dst(tm, dst, false); 143 } 144 145 #define TCP_METRICS_RECLAIM_DEPTH 5 146 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL 147 148 #define deref_locked(p) \ 149 rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock)) 150 151 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, 152 struct inetpeer_addr *saddr, 153 struct inetpeer_addr *daddr, 154 unsigned int hash) 155 { 156 struct tcp_metrics_block *tm; 157 struct net *net; 158 bool reclaim = false; 159 160 spin_lock_bh(&tcp_metrics_lock); 161 net = dev_net(dst->dev); 162 163 /* While waiting for the spin-lock the cache might have been populated 164 * with this entry and so we have to check again. 165 */ 166 tm = __tcp_get_metrics(saddr, daddr, net, hash); 167 if (tm == TCP_METRICS_RECLAIM_PTR) { 168 reclaim = true; 169 tm = NULL; 170 } 171 if (tm) { 172 tcpm_check_stamp(tm, dst); 173 goto out_unlock; 174 } 175 176 if (unlikely(reclaim)) { 177 struct tcp_metrics_block *oldest; 178 179 oldest = deref_locked(tcp_metrics_hash[hash].chain); 180 for (tm = deref_locked(oldest->tcpm_next); tm; 181 tm = deref_locked(tm->tcpm_next)) { 182 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp)) 183 oldest = tm; 184 } 185 tm = oldest; 186 } else { 187 tm = kmalloc(sizeof(*tm), GFP_ATOMIC); 188 if (!tm) 189 goto out_unlock; 190 } 191 write_pnet(&tm->tcpm_net, net); 192 tm->tcpm_saddr = *saddr; 193 tm->tcpm_daddr = *daddr; 194 195 tcpm_suck_dst(tm, dst, true); 196 197 if (likely(!reclaim)) { 198 tm->tcpm_next = tcp_metrics_hash[hash].chain; 199 rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm); 200 } 201 202 out_unlock: 203 spin_unlock_bh(&tcp_metrics_lock); 204 return tm; 205 } 206 207 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth) 208 { 209 if (tm) 210 return tm; 211 if (depth > TCP_METRICS_RECLAIM_DEPTH) 212 return TCP_METRICS_RECLAIM_PTR; 213 return NULL; 214 } 215 216 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr, 217 const struct inetpeer_addr *daddr, 218 struct net *net, unsigned int hash) 219 { 220 struct tcp_metrics_block *tm; 221 int depth = 0; 222 223 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; 224 tm = rcu_dereference(tm->tcpm_next)) { 225 if (addr_same(&tm->tcpm_saddr, saddr) && 226 addr_same(&tm->tcpm_daddr, daddr) && 227 net_eq(tm_net(tm), net)) 228 break; 229 depth++; 230 } 231 return tcp_get_encode(tm, depth); 232 } 233 234 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req, 235 struct dst_entry *dst) 236 { 237 struct tcp_metrics_block *tm; 238 struct inetpeer_addr saddr, daddr; 239 unsigned int hash; 240 struct net *net; 241 242 saddr.family = req->rsk_ops->family; 243 daddr.family = req->rsk_ops->family; 244 switch (daddr.family) { 245 case AF_INET: 246 inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr); 247 inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr); 248 hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr); 249 break; 250 #if IS_ENABLED(CONFIG_IPV6) 251 case AF_INET6: 252 inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr); 253 inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr); 254 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr); 255 break; 256 #endif 257 default: 258 return NULL; 259 } 260 261 net = dev_net(dst->dev); 262 hash ^= net_hash_mix(net); 263 hash = hash_32(hash, tcp_metrics_hash_log); 264 265 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; 266 tm = rcu_dereference(tm->tcpm_next)) { 267 if (addr_same(&tm->tcpm_saddr, &saddr) && 268 addr_same(&tm->tcpm_daddr, &daddr) && 269 net_eq(tm_net(tm), net)) 270 break; 271 } 272 tcpm_check_stamp(tm, dst); 273 return tm; 274 } 275 276 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw) 277 { 278 struct tcp_metrics_block *tm; 279 struct inetpeer_addr saddr, daddr; 280 unsigned int hash; 281 struct net *net; 282 283 if (tw->tw_family == AF_INET) { 284 inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr); 285 inetpeer_set_addr_v4(&daddr, tw->tw_daddr); 286 hash = ipv4_addr_hash(tw->tw_daddr); 287 } 288 #if IS_ENABLED(CONFIG_IPV6) 289 else if (tw->tw_family == AF_INET6) { 290 if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) { 291 inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr); 292 inetpeer_set_addr_v4(&daddr, tw->tw_daddr); 293 hash = ipv4_addr_hash(tw->tw_daddr); 294 } else { 295 inetpeer_set_addr_v6(&saddr, &tw->tw_v6_rcv_saddr); 296 inetpeer_set_addr_v6(&daddr, &tw->tw_v6_daddr); 297 hash = ipv6_addr_hash(&tw->tw_v6_daddr); 298 } 299 } 300 #endif 301 else 302 return NULL; 303 304 net = twsk_net(tw); 305 hash ^= net_hash_mix(net); 306 hash = hash_32(hash, tcp_metrics_hash_log); 307 308 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; 309 tm = rcu_dereference(tm->tcpm_next)) { 310 if (addr_same(&tm->tcpm_saddr, &saddr) && 311 addr_same(&tm->tcpm_daddr, &daddr) && 312 net_eq(tm_net(tm), net)) 313 break; 314 } 315 return tm; 316 } 317 318 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, 319 struct dst_entry *dst, 320 bool create) 321 { 322 struct tcp_metrics_block *tm; 323 struct inetpeer_addr saddr, daddr; 324 unsigned int hash; 325 struct net *net; 326 327 if (sk->sk_family == AF_INET) { 328 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr); 329 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr); 330 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr); 331 } 332 #if IS_ENABLED(CONFIG_IPV6) 333 else if (sk->sk_family == AF_INET6) { 334 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) { 335 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr); 336 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr); 337 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr); 338 } else { 339 inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr); 340 inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr); 341 hash = ipv6_addr_hash(&sk->sk_v6_daddr); 342 } 343 } 344 #endif 345 else 346 return NULL; 347 348 net = dev_net(dst->dev); 349 hash ^= net_hash_mix(net); 350 hash = hash_32(hash, tcp_metrics_hash_log); 351 352 tm = __tcp_get_metrics(&saddr, &daddr, net, hash); 353 if (tm == TCP_METRICS_RECLAIM_PTR) 354 tm = NULL; 355 if (!tm && create) 356 tm = tcpm_new(dst, &saddr, &daddr, hash); 357 else 358 tcpm_check_stamp(tm, dst); 359 360 return tm; 361 } 362 363 /* Save metrics learned by this TCP session. This function is called 364 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT 365 * or goes from LAST-ACK to CLOSE. 366 */ 367 void tcp_update_metrics(struct sock *sk) 368 { 369 const struct inet_connection_sock *icsk = inet_csk(sk); 370 struct dst_entry *dst = __sk_dst_get(sk); 371 struct tcp_sock *tp = tcp_sk(sk); 372 struct net *net = sock_net(sk); 373 struct tcp_metrics_block *tm; 374 unsigned long rtt; 375 u32 val; 376 int m; 377 378 if (sysctl_tcp_nometrics_save || !dst) 379 return; 380 381 if (dst->flags & DST_HOST) 382 dst_confirm(dst); 383 384 rcu_read_lock(); 385 if (icsk->icsk_backoff || !tp->srtt_us) { 386 /* This session failed to estimate rtt. Why? 387 * Probably, no packets returned in time. Reset our 388 * results. 389 */ 390 tm = tcp_get_metrics(sk, dst, false); 391 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT)) 392 tcp_metric_set(tm, TCP_METRIC_RTT, 0); 393 goto out_unlock; 394 } else 395 tm = tcp_get_metrics(sk, dst, true); 396 397 if (!tm) 398 goto out_unlock; 399 400 rtt = tcp_metric_get(tm, TCP_METRIC_RTT); 401 m = rtt - tp->srtt_us; 402 403 /* If newly calculated rtt larger than stored one, store new 404 * one. Otherwise, use EWMA. Remember, rtt overestimation is 405 * always better than underestimation. 406 */ 407 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) { 408 if (m <= 0) 409 rtt = tp->srtt_us; 410 else 411 rtt -= (m >> 3); 412 tcp_metric_set(tm, TCP_METRIC_RTT, rtt); 413 } 414 415 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) { 416 unsigned long var; 417 418 if (m < 0) 419 m = -m; 420 421 /* Scale deviation to rttvar fixed point */ 422 m >>= 1; 423 if (m < tp->mdev_us) 424 m = tp->mdev_us; 425 426 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR); 427 if (m >= var) 428 var = m; 429 else 430 var -= (var - m) >> 2; 431 432 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var); 433 } 434 435 if (tcp_in_initial_slowstart(tp)) { 436 /* Slow start still did not finish. */ 437 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { 438 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); 439 if (val && (tp->snd_cwnd >> 1) > val) 440 tcp_metric_set(tm, TCP_METRIC_SSTHRESH, 441 tp->snd_cwnd >> 1); 442 } 443 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { 444 val = tcp_metric_get(tm, TCP_METRIC_CWND); 445 if (tp->snd_cwnd > val) 446 tcp_metric_set(tm, TCP_METRIC_CWND, 447 tp->snd_cwnd); 448 } 449 } else if (!tcp_in_slow_start(tp) && 450 icsk->icsk_ca_state == TCP_CA_Open) { 451 /* Cong. avoidance phase, cwnd is reliable. */ 452 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) 453 tcp_metric_set(tm, TCP_METRIC_SSTHRESH, 454 max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); 455 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { 456 val = tcp_metric_get(tm, TCP_METRIC_CWND); 457 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1); 458 } 459 } else { 460 /* Else slow start did not finish, cwnd is non-sense, 461 * ssthresh may be also invalid. 462 */ 463 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { 464 val = tcp_metric_get(tm, TCP_METRIC_CWND); 465 tcp_metric_set(tm, TCP_METRIC_CWND, 466 (val + tp->snd_ssthresh) >> 1); 467 } 468 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { 469 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); 470 if (val && tp->snd_ssthresh > val) 471 tcp_metric_set(tm, TCP_METRIC_SSTHRESH, 472 tp->snd_ssthresh); 473 } 474 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) { 475 val = tcp_metric_get(tm, TCP_METRIC_REORDERING); 476 if (val < tp->reordering && 477 tp->reordering != net->ipv4.sysctl_tcp_reordering) 478 tcp_metric_set(tm, TCP_METRIC_REORDERING, 479 tp->reordering); 480 } 481 } 482 tm->tcpm_stamp = jiffies; 483 out_unlock: 484 rcu_read_unlock(); 485 } 486 487 /* Initialize metrics on socket. */ 488 489 void tcp_init_metrics(struct sock *sk) 490 { 491 struct dst_entry *dst = __sk_dst_get(sk); 492 struct tcp_sock *tp = tcp_sk(sk); 493 struct tcp_metrics_block *tm; 494 u32 val, crtt = 0; /* cached RTT scaled by 8 */ 495 496 if (!dst) 497 goto reset; 498 499 dst_confirm(dst); 500 501 rcu_read_lock(); 502 tm = tcp_get_metrics(sk, dst, true); 503 if (!tm) { 504 rcu_read_unlock(); 505 goto reset; 506 } 507 508 if (tcp_metric_locked(tm, TCP_METRIC_CWND)) 509 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND); 510 511 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); 512 if (val) { 513 tp->snd_ssthresh = val; 514 if (tp->snd_ssthresh > tp->snd_cwnd_clamp) 515 tp->snd_ssthresh = tp->snd_cwnd_clamp; 516 } else { 517 /* ssthresh may have been reduced unnecessarily during. 518 * 3WHS. Restore it back to its initial default. 519 */ 520 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 521 } 522 val = tcp_metric_get(tm, TCP_METRIC_REORDERING); 523 if (val && tp->reordering != val) { 524 tcp_disable_fack(tp); 525 tcp_disable_early_retrans(tp); 526 tp->reordering = val; 527 } 528 529 crtt = tcp_metric_get(tm, TCP_METRIC_RTT); 530 rcu_read_unlock(); 531 reset: 532 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal 533 * to seed the RTO for later data packets because SYN packets are 534 * small. Use the per-dst cached values to seed the RTO but keep 535 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar). 536 * Later the RTO will be updated immediately upon obtaining the first 537 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only 538 * influences the first RTO but not later RTT estimation. 539 * 540 * But if RTT is not available from the SYN (due to retransmits or 541 * syn cookies) or the cache, force a conservative 3secs timeout. 542 * 543 * A bit of theory. RTT is time passed after "normal" sized packet 544 * is sent until it is ACKed. In normal circumstances sending small 545 * packets force peer to delay ACKs and calculation is correct too. 546 * The algorithm is adaptive and, provided we follow specs, it 547 * NEVER underestimate RTT. BUT! If peer tries to make some clever 548 * tricks sort of "quick acks" for time long enough to decrease RTT 549 * to low value, and then abruptly stops to do it and starts to delay 550 * ACKs, wait for troubles. 551 */ 552 if (crtt > tp->srtt_us) { 553 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */ 554 crtt /= 8 * USEC_PER_SEC / HZ; 555 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk)); 556 } else if (tp->srtt_us == 0) { 557 /* RFC6298: 5.7 We've failed to get a valid RTT sample from 558 * 3WHS. This is most likely due to retransmission, 559 * including spurious one. Reset the RTO back to 3secs 560 * from the more aggressive 1sec to avoid more spurious 561 * retransmission. 562 */ 563 tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK); 564 tp->mdev_us = tp->mdev_max_us = tp->rttvar_us; 565 566 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; 567 } 568 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been 569 * retransmitted. In light of RFC6298 more aggressive 1sec 570 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK 571 * retransmission has occurred. 572 */ 573 if (tp->total_retrans > 1) 574 tp->snd_cwnd = 1; 575 else 576 tp->snd_cwnd = tcp_init_cwnd(tp, dst); 577 tp->snd_cwnd_stamp = tcp_time_stamp; 578 } 579 580 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, 581 bool paws_check, bool timestamps) 582 { 583 struct tcp_metrics_block *tm; 584 bool ret; 585 586 if (!dst) 587 return false; 588 589 rcu_read_lock(); 590 tm = __tcp_get_metrics_req(req, dst); 591 if (paws_check) { 592 if (tm && 593 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL && 594 ((s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW || 595 !timestamps)) 596 ret = false; 597 else 598 ret = true; 599 } else { 600 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp) 601 ret = true; 602 else 603 ret = false; 604 } 605 rcu_read_unlock(); 606 607 return ret; 608 } 609 EXPORT_SYMBOL_GPL(tcp_peer_is_proven); 610 611 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst) 612 { 613 struct tcp_metrics_block *tm; 614 615 rcu_read_lock(); 616 tm = tcp_get_metrics(sk, dst, true); 617 if (tm) { 618 struct tcp_sock *tp = tcp_sk(sk); 619 620 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) { 621 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp; 622 tp->rx_opt.ts_recent = tm->tcpm_ts; 623 } 624 } 625 rcu_read_unlock(); 626 } 627 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp); 628 629 /* VJ's idea. Save last timestamp seen from this destination and hold 630 * it at least for normal timewait interval to use for duplicate 631 * segment detection in subsequent connections, before they enter 632 * synchronized state. 633 */ 634 bool tcp_remember_stamp(struct sock *sk) 635 { 636 struct dst_entry *dst = __sk_dst_get(sk); 637 bool ret = false; 638 639 if (dst) { 640 struct tcp_metrics_block *tm; 641 642 rcu_read_lock(); 643 tm = tcp_get_metrics(sk, dst, true); 644 if (tm) { 645 struct tcp_sock *tp = tcp_sk(sk); 646 647 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 || 648 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL && 649 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) { 650 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp; 651 tm->tcpm_ts = tp->rx_opt.ts_recent; 652 } 653 ret = true; 654 } 655 rcu_read_unlock(); 656 } 657 return ret; 658 } 659 660 bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw) 661 { 662 struct tcp_metrics_block *tm; 663 bool ret = false; 664 665 rcu_read_lock(); 666 tm = __tcp_get_metrics_tw(tw); 667 if (tm) { 668 const struct tcp_timewait_sock *tcptw; 669 struct sock *sk = (struct sock *) tw; 670 671 tcptw = tcp_twsk(sk); 672 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 || 673 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL && 674 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) { 675 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp; 676 tm->tcpm_ts = tcptw->tw_ts_recent; 677 } 678 ret = true; 679 } 680 rcu_read_unlock(); 681 682 return ret; 683 } 684 685 static DEFINE_SEQLOCK(fastopen_seqlock); 686 687 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, 688 struct tcp_fastopen_cookie *cookie, 689 int *syn_loss, unsigned long *last_syn_loss) 690 { 691 struct tcp_metrics_block *tm; 692 693 rcu_read_lock(); 694 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false); 695 if (tm) { 696 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen; 697 unsigned int seq; 698 699 do { 700 seq = read_seqbegin(&fastopen_seqlock); 701 if (tfom->mss) 702 *mss = tfom->mss; 703 *cookie = tfom->cookie; 704 if (cookie->len <= 0 && tfom->try_exp == 1) 705 cookie->exp = true; 706 *syn_loss = tfom->syn_loss; 707 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0; 708 } while (read_seqretry(&fastopen_seqlock, seq)); 709 } 710 rcu_read_unlock(); 711 } 712 713 void tcp_fastopen_cache_set(struct sock *sk, u16 mss, 714 struct tcp_fastopen_cookie *cookie, bool syn_lost, 715 u16 try_exp) 716 { 717 struct dst_entry *dst = __sk_dst_get(sk); 718 struct tcp_metrics_block *tm; 719 720 if (!dst) 721 return; 722 rcu_read_lock(); 723 tm = tcp_get_metrics(sk, dst, true); 724 if (tm) { 725 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen; 726 727 write_seqlock_bh(&fastopen_seqlock); 728 if (mss) 729 tfom->mss = mss; 730 if (cookie && cookie->len > 0) 731 tfom->cookie = *cookie; 732 else if (try_exp > tfom->try_exp && 733 tfom->cookie.len <= 0 && !tfom->cookie.exp) 734 tfom->try_exp = try_exp; 735 if (syn_lost) { 736 ++tfom->syn_loss; 737 tfom->last_syn_loss = jiffies; 738 } else 739 tfom->syn_loss = 0; 740 write_sequnlock_bh(&fastopen_seqlock); 741 } 742 rcu_read_unlock(); 743 } 744 745 static struct genl_family tcp_metrics_nl_family = { 746 .id = GENL_ID_GENERATE, 747 .hdrsize = 0, 748 .name = TCP_METRICS_GENL_NAME, 749 .version = TCP_METRICS_GENL_VERSION, 750 .maxattr = TCP_METRICS_ATTR_MAX, 751 .netnsok = true, 752 }; 753 754 static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = { 755 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, }, 756 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY, 757 .len = sizeof(struct in6_addr), }, 758 /* Following attributes are not received for GET/DEL, 759 * we keep them for reference 760 */ 761 #if 0 762 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, }, 763 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, }, 764 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, }, 765 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, }, 766 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, }, 767 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, }, 768 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, }, 769 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY, 770 .len = TCP_FASTOPEN_COOKIE_MAX, }, 771 #endif 772 }; 773 774 /* Add attributes, caller cancels its header on failure */ 775 static int tcp_metrics_fill_info(struct sk_buff *msg, 776 struct tcp_metrics_block *tm) 777 { 778 struct nlattr *nest; 779 int i; 780 781 switch (tm->tcpm_daddr.family) { 782 case AF_INET: 783 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4, 784 inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0) 785 goto nla_put_failure; 786 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4, 787 inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0) 788 goto nla_put_failure; 789 break; 790 case AF_INET6: 791 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6, 792 inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0) 793 goto nla_put_failure; 794 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6, 795 inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0) 796 goto nla_put_failure; 797 break; 798 default: 799 return -EAFNOSUPPORT; 800 } 801 802 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE, 803 jiffies - tm->tcpm_stamp, 804 TCP_METRICS_ATTR_PAD) < 0) 805 goto nla_put_failure; 806 if (tm->tcpm_ts_stamp) { 807 if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP, 808 (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0) 809 goto nla_put_failure; 810 if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL, 811 tm->tcpm_ts) < 0) 812 goto nla_put_failure; 813 } 814 815 { 816 int n = 0; 817 818 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS); 819 if (!nest) 820 goto nla_put_failure; 821 for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) { 822 u32 val = tm->tcpm_vals[i]; 823 824 if (!val) 825 continue; 826 if (i == TCP_METRIC_RTT) { 827 if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1, 828 val) < 0) 829 goto nla_put_failure; 830 n++; 831 val = max(val / 1000, 1U); 832 } 833 if (i == TCP_METRIC_RTTVAR) { 834 if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1, 835 val) < 0) 836 goto nla_put_failure; 837 n++; 838 val = max(val / 1000, 1U); 839 } 840 if (nla_put_u32(msg, i + 1, val) < 0) 841 goto nla_put_failure; 842 n++; 843 } 844 if (n) 845 nla_nest_end(msg, nest); 846 else 847 nla_nest_cancel(msg, nest); 848 } 849 850 { 851 struct tcp_fastopen_metrics tfom_copy[1], *tfom; 852 unsigned int seq; 853 854 do { 855 seq = read_seqbegin(&fastopen_seqlock); 856 tfom_copy[0] = tm->tcpm_fastopen; 857 } while (read_seqretry(&fastopen_seqlock, seq)); 858 859 tfom = tfom_copy; 860 if (tfom->mss && 861 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS, 862 tfom->mss) < 0) 863 goto nla_put_failure; 864 if (tfom->syn_loss && 865 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS, 866 tfom->syn_loss) < 0 || 867 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS, 868 jiffies - tfom->last_syn_loss, 869 TCP_METRICS_ATTR_PAD) < 0)) 870 goto nla_put_failure; 871 if (tfom->cookie.len > 0 && 872 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE, 873 tfom->cookie.len, tfom->cookie.val) < 0) 874 goto nla_put_failure; 875 } 876 877 return 0; 878 879 nla_put_failure: 880 return -EMSGSIZE; 881 } 882 883 static int tcp_metrics_dump_info(struct sk_buff *skb, 884 struct netlink_callback *cb, 885 struct tcp_metrics_block *tm) 886 { 887 void *hdr; 888 889 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 890 &tcp_metrics_nl_family, NLM_F_MULTI, 891 TCP_METRICS_CMD_GET); 892 if (!hdr) 893 return -EMSGSIZE; 894 895 if (tcp_metrics_fill_info(skb, tm) < 0) 896 goto nla_put_failure; 897 898 genlmsg_end(skb, hdr); 899 return 0; 900 901 nla_put_failure: 902 genlmsg_cancel(skb, hdr); 903 return -EMSGSIZE; 904 } 905 906 static int tcp_metrics_nl_dump(struct sk_buff *skb, 907 struct netlink_callback *cb) 908 { 909 struct net *net = sock_net(skb->sk); 910 unsigned int max_rows = 1U << tcp_metrics_hash_log; 911 unsigned int row, s_row = cb->args[0]; 912 int s_col = cb->args[1], col = s_col; 913 914 for (row = s_row; row < max_rows; row++, s_col = 0) { 915 struct tcp_metrics_block *tm; 916 struct tcpm_hash_bucket *hb = tcp_metrics_hash + row; 917 918 rcu_read_lock(); 919 for (col = 0, tm = rcu_dereference(hb->chain); tm; 920 tm = rcu_dereference(tm->tcpm_next), col++) { 921 if (!net_eq(tm_net(tm), net)) 922 continue; 923 if (col < s_col) 924 continue; 925 if (tcp_metrics_dump_info(skb, cb, tm) < 0) { 926 rcu_read_unlock(); 927 goto done; 928 } 929 } 930 rcu_read_unlock(); 931 } 932 933 done: 934 cb->args[0] = row; 935 cb->args[1] = col; 936 return skb->len; 937 } 938 939 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr, 940 unsigned int *hash, int optional, int v4, int v6) 941 { 942 struct nlattr *a; 943 944 a = info->attrs[v4]; 945 if (a) { 946 inetpeer_set_addr_v4(addr, nla_get_in_addr(a)); 947 if (hash) 948 *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr)); 949 return 0; 950 } 951 a = info->attrs[v6]; 952 if (a) { 953 struct in6_addr in6; 954 955 if (nla_len(a) != sizeof(struct in6_addr)) 956 return -EINVAL; 957 in6 = nla_get_in6_addr(a); 958 inetpeer_set_addr_v6(addr, &in6); 959 if (hash) 960 *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr)); 961 return 0; 962 } 963 return optional ? 1 : -EAFNOSUPPORT; 964 } 965 966 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr, 967 unsigned int *hash, int optional) 968 { 969 return __parse_nl_addr(info, addr, hash, optional, 970 TCP_METRICS_ATTR_ADDR_IPV4, 971 TCP_METRICS_ATTR_ADDR_IPV6); 972 } 973 974 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr) 975 { 976 return __parse_nl_addr(info, addr, NULL, 0, 977 TCP_METRICS_ATTR_SADDR_IPV4, 978 TCP_METRICS_ATTR_SADDR_IPV6); 979 } 980 981 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info) 982 { 983 struct tcp_metrics_block *tm; 984 struct inetpeer_addr saddr, daddr; 985 unsigned int hash; 986 struct sk_buff *msg; 987 struct net *net = genl_info_net(info); 988 void *reply; 989 int ret; 990 bool src = true; 991 992 ret = parse_nl_addr(info, &daddr, &hash, 0); 993 if (ret < 0) 994 return ret; 995 996 ret = parse_nl_saddr(info, &saddr); 997 if (ret < 0) 998 src = false; 999 1000 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1001 if (!msg) 1002 return -ENOMEM; 1003 1004 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0, 1005 info->genlhdr->cmd); 1006 if (!reply) 1007 goto nla_put_failure; 1008 1009 hash ^= net_hash_mix(net); 1010 hash = hash_32(hash, tcp_metrics_hash_log); 1011 ret = -ESRCH; 1012 rcu_read_lock(); 1013 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; 1014 tm = rcu_dereference(tm->tcpm_next)) { 1015 if (addr_same(&tm->tcpm_daddr, &daddr) && 1016 (!src || addr_same(&tm->tcpm_saddr, &saddr)) && 1017 net_eq(tm_net(tm), net)) { 1018 ret = tcp_metrics_fill_info(msg, tm); 1019 break; 1020 } 1021 } 1022 rcu_read_unlock(); 1023 if (ret < 0) 1024 goto out_free; 1025 1026 genlmsg_end(msg, reply); 1027 return genlmsg_reply(msg, info); 1028 1029 nla_put_failure: 1030 ret = -EMSGSIZE; 1031 1032 out_free: 1033 nlmsg_free(msg); 1034 return ret; 1035 } 1036 1037 static void tcp_metrics_flush_all(struct net *net) 1038 { 1039 unsigned int max_rows = 1U << tcp_metrics_hash_log; 1040 struct tcpm_hash_bucket *hb = tcp_metrics_hash; 1041 struct tcp_metrics_block *tm; 1042 unsigned int row; 1043 1044 for (row = 0; row < max_rows; row++, hb++) { 1045 struct tcp_metrics_block __rcu **pp; 1046 spin_lock_bh(&tcp_metrics_lock); 1047 pp = &hb->chain; 1048 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) { 1049 if (net_eq(tm_net(tm), net)) { 1050 *pp = tm->tcpm_next; 1051 kfree_rcu(tm, rcu_head); 1052 } else { 1053 pp = &tm->tcpm_next; 1054 } 1055 } 1056 spin_unlock_bh(&tcp_metrics_lock); 1057 } 1058 } 1059 1060 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info) 1061 { 1062 struct tcpm_hash_bucket *hb; 1063 struct tcp_metrics_block *tm; 1064 struct tcp_metrics_block __rcu **pp; 1065 struct inetpeer_addr saddr, daddr; 1066 unsigned int hash; 1067 struct net *net = genl_info_net(info); 1068 int ret; 1069 bool src = true, found = false; 1070 1071 ret = parse_nl_addr(info, &daddr, &hash, 1); 1072 if (ret < 0) 1073 return ret; 1074 if (ret > 0) { 1075 tcp_metrics_flush_all(net); 1076 return 0; 1077 } 1078 ret = parse_nl_saddr(info, &saddr); 1079 if (ret < 0) 1080 src = false; 1081 1082 hash ^= net_hash_mix(net); 1083 hash = hash_32(hash, tcp_metrics_hash_log); 1084 hb = tcp_metrics_hash + hash; 1085 pp = &hb->chain; 1086 spin_lock_bh(&tcp_metrics_lock); 1087 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) { 1088 if (addr_same(&tm->tcpm_daddr, &daddr) && 1089 (!src || addr_same(&tm->tcpm_saddr, &saddr)) && 1090 net_eq(tm_net(tm), net)) { 1091 *pp = tm->tcpm_next; 1092 kfree_rcu(tm, rcu_head); 1093 found = true; 1094 } else { 1095 pp = &tm->tcpm_next; 1096 } 1097 } 1098 spin_unlock_bh(&tcp_metrics_lock); 1099 if (!found) 1100 return -ESRCH; 1101 return 0; 1102 } 1103 1104 static const struct genl_ops tcp_metrics_nl_ops[] = { 1105 { 1106 .cmd = TCP_METRICS_CMD_GET, 1107 .doit = tcp_metrics_nl_cmd_get, 1108 .dumpit = tcp_metrics_nl_dump, 1109 .policy = tcp_metrics_nl_policy, 1110 }, 1111 { 1112 .cmd = TCP_METRICS_CMD_DEL, 1113 .doit = tcp_metrics_nl_cmd_del, 1114 .policy = tcp_metrics_nl_policy, 1115 .flags = GENL_ADMIN_PERM, 1116 }, 1117 }; 1118 1119 static unsigned int tcpmhash_entries; 1120 static int __init set_tcpmhash_entries(char *str) 1121 { 1122 ssize_t ret; 1123 1124 if (!str) 1125 return 0; 1126 1127 ret = kstrtouint(str, 0, &tcpmhash_entries); 1128 if (ret) 1129 return 0; 1130 1131 return 1; 1132 } 1133 __setup("tcpmhash_entries=", set_tcpmhash_entries); 1134 1135 static int __net_init tcp_net_metrics_init(struct net *net) 1136 { 1137 size_t size; 1138 unsigned int slots; 1139 1140 if (!net_eq(net, &init_net)) 1141 return 0; 1142 1143 slots = tcpmhash_entries; 1144 if (!slots) { 1145 if (totalram_pages >= 128 * 1024) 1146 slots = 16 * 1024; 1147 else 1148 slots = 8 * 1024; 1149 } 1150 1151 tcp_metrics_hash_log = order_base_2(slots); 1152 size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log; 1153 1154 tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 1155 if (!tcp_metrics_hash) 1156 tcp_metrics_hash = vzalloc(size); 1157 1158 if (!tcp_metrics_hash) 1159 return -ENOMEM; 1160 1161 return 0; 1162 } 1163 1164 static void __net_exit tcp_net_metrics_exit(struct net *net) 1165 { 1166 tcp_metrics_flush_all(net); 1167 } 1168 1169 static __net_initdata struct pernet_operations tcp_net_metrics_ops = { 1170 .init = tcp_net_metrics_init, 1171 .exit = tcp_net_metrics_exit, 1172 }; 1173 1174 void __init tcp_metrics_init(void) 1175 { 1176 int ret; 1177 1178 ret = register_pernet_subsys(&tcp_net_metrics_ops); 1179 if (ret < 0) 1180 panic("Could not allocate the tcp_metrics hash table\n"); 1181 1182 ret = genl_register_family_with_ops(&tcp_metrics_nl_family, 1183 tcp_metrics_nl_ops); 1184 if (ret < 0) 1185 panic("Could not register tcp_metrics generic netlink\n"); 1186 } 1187