xref: /openbmc/linux/net/ipv4/tcp_metrics.c (revision b34e08d5)
1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/tcp.h>
9 #include <linux/hash.h>
10 #include <linux/tcp_metrics.h>
11 #include <linux/vmalloc.h>
12 
13 #include <net/inet_connection_sock.h>
14 #include <net/net_namespace.h>
15 #include <net/request_sock.h>
16 #include <net/inetpeer.h>
17 #include <net/sock.h>
18 #include <net/ipv6.h>
19 #include <net/dst.h>
20 #include <net/tcp.h>
21 #include <net/genetlink.h>
22 
23 int sysctl_tcp_nometrics_save __read_mostly;
24 
25 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
26 						   const struct inetpeer_addr *daddr,
27 						   struct net *net, unsigned int hash);
28 
29 struct tcp_fastopen_metrics {
30 	u16	mss;
31 	u16	syn_loss:10;		/* Recurring Fast Open SYN losses */
32 	unsigned long	last_syn_loss;	/* Last Fast Open SYN loss */
33 	struct	tcp_fastopen_cookie	cookie;
34 };
35 
36 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
37  * Kernel only stores RTT and RTTVAR in usec resolution
38  */
39 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
40 
41 struct tcp_metrics_block {
42 	struct tcp_metrics_block __rcu	*tcpm_next;
43 	struct inetpeer_addr		tcpm_saddr;
44 	struct inetpeer_addr		tcpm_daddr;
45 	unsigned long			tcpm_stamp;
46 	u32				tcpm_ts;
47 	u32				tcpm_ts_stamp;
48 	u32				tcpm_lock;
49 	u32				tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
50 	struct tcp_fastopen_metrics	tcpm_fastopen;
51 
52 	struct rcu_head			rcu_head;
53 };
54 
55 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
56 			      enum tcp_metric_index idx)
57 {
58 	return tm->tcpm_lock & (1 << idx);
59 }
60 
61 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
62 			  enum tcp_metric_index idx)
63 {
64 	return tm->tcpm_vals[idx];
65 }
66 
67 static void tcp_metric_set(struct tcp_metrics_block *tm,
68 			   enum tcp_metric_index idx,
69 			   u32 val)
70 {
71 	tm->tcpm_vals[idx] = val;
72 }
73 
74 static bool addr_same(const struct inetpeer_addr *a,
75 		      const struct inetpeer_addr *b)
76 {
77 	const struct in6_addr *a6, *b6;
78 
79 	if (a->family != b->family)
80 		return false;
81 	if (a->family == AF_INET)
82 		return a->addr.a4 == b->addr.a4;
83 
84 	a6 = (const struct in6_addr *) &a->addr.a6[0];
85 	b6 = (const struct in6_addr *) &b->addr.a6[0];
86 
87 	return ipv6_addr_equal(a6, b6);
88 }
89 
90 struct tcpm_hash_bucket {
91 	struct tcp_metrics_block __rcu	*chain;
92 };
93 
94 static DEFINE_SPINLOCK(tcp_metrics_lock);
95 
96 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
97 			  const struct dst_entry *dst,
98 			  bool fastopen_clear)
99 {
100 	u32 msval;
101 	u32 val;
102 
103 	tm->tcpm_stamp = jiffies;
104 
105 	val = 0;
106 	if (dst_metric_locked(dst, RTAX_RTT))
107 		val |= 1 << TCP_METRIC_RTT;
108 	if (dst_metric_locked(dst, RTAX_RTTVAR))
109 		val |= 1 << TCP_METRIC_RTTVAR;
110 	if (dst_metric_locked(dst, RTAX_SSTHRESH))
111 		val |= 1 << TCP_METRIC_SSTHRESH;
112 	if (dst_metric_locked(dst, RTAX_CWND))
113 		val |= 1 << TCP_METRIC_CWND;
114 	if (dst_metric_locked(dst, RTAX_REORDERING))
115 		val |= 1 << TCP_METRIC_REORDERING;
116 	tm->tcpm_lock = val;
117 
118 	msval = dst_metric_raw(dst, RTAX_RTT);
119 	tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
120 
121 	msval = dst_metric_raw(dst, RTAX_RTTVAR);
122 	tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
123 	tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
124 	tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
125 	tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
126 	tm->tcpm_ts = 0;
127 	tm->tcpm_ts_stamp = 0;
128 	if (fastopen_clear) {
129 		tm->tcpm_fastopen.mss = 0;
130 		tm->tcpm_fastopen.syn_loss = 0;
131 		tm->tcpm_fastopen.cookie.len = 0;
132 	}
133 }
134 
135 #define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
136 
137 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
138 {
139 	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
140 		tcpm_suck_dst(tm, dst, false);
141 }
142 
143 #define TCP_METRICS_RECLAIM_DEPTH	5
144 #define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL
145 
146 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
147 					  struct inetpeer_addr *saddr,
148 					  struct inetpeer_addr *daddr,
149 					  unsigned int hash)
150 {
151 	struct tcp_metrics_block *tm;
152 	struct net *net;
153 	bool reclaim = false;
154 
155 	spin_lock_bh(&tcp_metrics_lock);
156 	net = dev_net(dst->dev);
157 
158 	/* While waiting for the spin-lock the cache might have been populated
159 	 * with this entry and so we have to check again.
160 	 */
161 	tm = __tcp_get_metrics(saddr, daddr, net, hash);
162 	if (tm == TCP_METRICS_RECLAIM_PTR) {
163 		reclaim = true;
164 		tm = NULL;
165 	}
166 	if (tm) {
167 		tcpm_check_stamp(tm, dst);
168 		goto out_unlock;
169 	}
170 
171 	if (unlikely(reclaim)) {
172 		struct tcp_metrics_block *oldest;
173 
174 		oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
175 		for (tm = rcu_dereference(oldest->tcpm_next); tm;
176 		     tm = rcu_dereference(tm->tcpm_next)) {
177 			if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
178 				oldest = tm;
179 		}
180 		tm = oldest;
181 	} else {
182 		tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
183 		if (!tm)
184 			goto out_unlock;
185 	}
186 	tm->tcpm_saddr = *saddr;
187 	tm->tcpm_daddr = *daddr;
188 
189 	tcpm_suck_dst(tm, dst, true);
190 
191 	if (likely(!reclaim)) {
192 		tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
193 		rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
194 	}
195 
196 out_unlock:
197 	spin_unlock_bh(&tcp_metrics_lock);
198 	return tm;
199 }
200 
201 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
202 {
203 	if (tm)
204 		return tm;
205 	if (depth > TCP_METRICS_RECLAIM_DEPTH)
206 		return TCP_METRICS_RECLAIM_PTR;
207 	return NULL;
208 }
209 
210 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
211 						   const struct inetpeer_addr *daddr,
212 						   struct net *net, unsigned int hash)
213 {
214 	struct tcp_metrics_block *tm;
215 	int depth = 0;
216 
217 	for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
218 	     tm = rcu_dereference(tm->tcpm_next)) {
219 		if (addr_same(&tm->tcpm_saddr, saddr) &&
220 		    addr_same(&tm->tcpm_daddr, daddr))
221 			break;
222 		depth++;
223 	}
224 	return tcp_get_encode(tm, depth);
225 }
226 
227 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
228 						       struct dst_entry *dst)
229 {
230 	struct tcp_metrics_block *tm;
231 	struct inetpeer_addr saddr, daddr;
232 	unsigned int hash;
233 	struct net *net;
234 
235 	saddr.family = req->rsk_ops->family;
236 	daddr.family = req->rsk_ops->family;
237 	switch (daddr.family) {
238 	case AF_INET:
239 		saddr.addr.a4 = inet_rsk(req)->ir_loc_addr;
240 		daddr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
241 		hash = (__force unsigned int) daddr.addr.a4;
242 		break;
243 #if IS_ENABLED(CONFIG_IPV6)
244 	case AF_INET6:
245 		*(struct in6_addr *)saddr.addr.a6 = inet_rsk(req)->ir_v6_loc_addr;
246 		*(struct in6_addr *)daddr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
247 		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
248 		break;
249 #endif
250 	default:
251 		return NULL;
252 	}
253 
254 	net = dev_net(dst->dev);
255 	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
256 
257 	for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
258 	     tm = rcu_dereference(tm->tcpm_next)) {
259 		if (addr_same(&tm->tcpm_saddr, &saddr) &&
260 		    addr_same(&tm->tcpm_daddr, &daddr))
261 			break;
262 	}
263 	tcpm_check_stamp(tm, dst);
264 	return tm;
265 }
266 
267 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
268 {
269 	struct tcp_metrics_block *tm;
270 	struct inetpeer_addr saddr, daddr;
271 	unsigned int hash;
272 	struct net *net;
273 
274 	if (tw->tw_family == AF_INET) {
275 		saddr.family = AF_INET;
276 		saddr.addr.a4 = tw->tw_rcv_saddr;
277 		daddr.family = AF_INET;
278 		daddr.addr.a4 = tw->tw_daddr;
279 		hash = (__force unsigned int) daddr.addr.a4;
280 	}
281 #if IS_ENABLED(CONFIG_IPV6)
282 	else if (tw->tw_family == AF_INET6) {
283 		if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) {
284 			saddr.family = AF_INET;
285 			saddr.addr.a4 = tw->tw_rcv_saddr;
286 			daddr.family = AF_INET;
287 			daddr.addr.a4 = tw->tw_daddr;
288 			hash = (__force unsigned int) daddr.addr.a4;
289 		} else {
290 			saddr.family = AF_INET6;
291 			*(struct in6_addr *)saddr.addr.a6 = tw->tw_v6_rcv_saddr;
292 			daddr.family = AF_INET6;
293 			*(struct in6_addr *)daddr.addr.a6 = tw->tw_v6_daddr;
294 			hash = ipv6_addr_hash(&tw->tw_v6_daddr);
295 		}
296 	}
297 #endif
298 	else
299 		return NULL;
300 
301 	net = twsk_net(tw);
302 	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
303 
304 	for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
305 	     tm = rcu_dereference(tm->tcpm_next)) {
306 		if (addr_same(&tm->tcpm_saddr, &saddr) &&
307 		    addr_same(&tm->tcpm_daddr, &daddr))
308 			break;
309 	}
310 	return tm;
311 }
312 
313 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
314 						 struct dst_entry *dst,
315 						 bool create)
316 {
317 	struct tcp_metrics_block *tm;
318 	struct inetpeer_addr saddr, daddr;
319 	unsigned int hash;
320 	struct net *net;
321 
322 	if (sk->sk_family == AF_INET) {
323 		saddr.family = AF_INET;
324 		saddr.addr.a4 = inet_sk(sk)->inet_saddr;
325 		daddr.family = AF_INET;
326 		daddr.addr.a4 = inet_sk(sk)->inet_daddr;
327 		hash = (__force unsigned int) daddr.addr.a4;
328 	}
329 #if IS_ENABLED(CONFIG_IPV6)
330 	else if (sk->sk_family == AF_INET6) {
331 		if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
332 			saddr.family = AF_INET;
333 			saddr.addr.a4 = inet_sk(sk)->inet_saddr;
334 			daddr.family = AF_INET;
335 			daddr.addr.a4 = inet_sk(sk)->inet_daddr;
336 			hash = (__force unsigned int) daddr.addr.a4;
337 		} else {
338 			saddr.family = AF_INET6;
339 			*(struct in6_addr *)saddr.addr.a6 = sk->sk_v6_rcv_saddr;
340 			daddr.family = AF_INET6;
341 			*(struct in6_addr *)daddr.addr.a6 = sk->sk_v6_daddr;
342 			hash = ipv6_addr_hash(&sk->sk_v6_daddr);
343 		}
344 	}
345 #endif
346 	else
347 		return NULL;
348 
349 	net = dev_net(dst->dev);
350 	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
351 
352 	tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
353 	if (tm == TCP_METRICS_RECLAIM_PTR)
354 		tm = NULL;
355 	if (!tm && create)
356 		tm = tcpm_new(dst, &saddr, &daddr, hash);
357 	else
358 		tcpm_check_stamp(tm, dst);
359 
360 	return tm;
361 }
362 
363 /* Save metrics learned by this TCP session.  This function is called
364  * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
365  * or goes from LAST-ACK to CLOSE.
366  */
367 void tcp_update_metrics(struct sock *sk)
368 {
369 	const struct inet_connection_sock *icsk = inet_csk(sk);
370 	struct dst_entry *dst = __sk_dst_get(sk);
371 	struct tcp_sock *tp = tcp_sk(sk);
372 	struct tcp_metrics_block *tm;
373 	unsigned long rtt;
374 	u32 val;
375 	int m;
376 
377 	if (sysctl_tcp_nometrics_save || !dst)
378 		return;
379 
380 	if (dst->flags & DST_HOST)
381 		dst_confirm(dst);
382 
383 	rcu_read_lock();
384 	if (icsk->icsk_backoff || !tp->srtt_us) {
385 		/* This session failed to estimate rtt. Why?
386 		 * Probably, no packets returned in time.  Reset our
387 		 * results.
388 		 */
389 		tm = tcp_get_metrics(sk, dst, false);
390 		if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
391 			tcp_metric_set(tm, TCP_METRIC_RTT, 0);
392 		goto out_unlock;
393 	} else
394 		tm = tcp_get_metrics(sk, dst, true);
395 
396 	if (!tm)
397 		goto out_unlock;
398 
399 	rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
400 	m = rtt - tp->srtt_us;
401 
402 	/* If newly calculated rtt larger than stored one, store new
403 	 * one. Otherwise, use EWMA. Remember, rtt overestimation is
404 	 * always better than underestimation.
405 	 */
406 	if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
407 		if (m <= 0)
408 			rtt = tp->srtt_us;
409 		else
410 			rtt -= (m >> 3);
411 		tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
412 	}
413 
414 	if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
415 		unsigned long var;
416 
417 		if (m < 0)
418 			m = -m;
419 
420 		/* Scale deviation to rttvar fixed point */
421 		m >>= 1;
422 		if (m < tp->mdev_us)
423 			m = tp->mdev_us;
424 
425 		var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
426 		if (m >= var)
427 			var = m;
428 		else
429 			var -= (var - m) >> 2;
430 
431 		tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
432 	}
433 
434 	if (tcp_in_initial_slowstart(tp)) {
435 		/* Slow start still did not finish. */
436 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
437 			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
438 			if (val && (tp->snd_cwnd >> 1) > val)
439 				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
440 					       tp->snd_cwnd >> 1);
441 		}
442 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
443 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
444 			if (tp->snd_cwnd > val)
445 				tcp_metric_set(tm, TCP_METRIC_CWND,
446 					       tp->snd_cwnd);
447 		}
448 	} else if (tp->snd_cwnd > tp->snd_ssthresh &&
449 		   icsk->icsk_ca_state == TCP_CA_Open) {
450 		/* Cong. avoidance phase, cwnd is reliable. */
451 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
452 			tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
453 				       max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
454 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
455 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
456 			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
457 		}
458 	} else {
459 		/* Else slow start did not finish, cwnd is non-sense,
460 		 * ssthresh may be also invalid.
461 		 */
462 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
463 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
464 			tcp_metric_set(tm, TCP_METRIC_CWND,
465 				       (val + tp->snd_ssthresh) >> 1);
466 		}
467 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
468 			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
469 			if (val && tp->snd_ssthresh > val)
470 				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
471 					       tp->snd_ssthresh);
472 		}
473 		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
474 			val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
475 			if (val < tp->reordering &&
476 			    tp->reordering != sysctl_tcp_reordering)
477 				tcp_metric_set(tm, TCP_METRIC_REORDERING,
478 					       tp->reordering);
479 		}
480 	}
481 	tm->tcpm_stamp = jiffies;
482 out_unlock:
483 	rcu_read_unlock();
484 }
485 
486 /* Initialize metrics on socket. */
487 
488 void tcp_init_metrics(struct sock *sk)
489 {
490 	struct dst_entry *dst = __sk_dst_get(sk);
491 	struct tcp_sock *tp = tcp_sk(sk);
492 	struct tcp_metrics_block *tm;
493 	u32 val, crtt = 0; /* cached RTT scaled by 8 */
494 
495 	if (dst == NULL)
496 		goto reset;
497 
498 	dst_confirm(dst);
499 
500 	rcu_read_lock();
501 	tm = tcp_get_metrics(sk, dst, true);
502 	if (!tm) {
503 		rcu_read_unlock();
504 		goto reset;
505 	}
506 
507 	if (tcp_metric_locked(tm, TCP_METRIC_CWND))
508 		tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
509 
510 	val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
511 	if (val) {
512 		tp->snd_ssthresh = val;
513 		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
514 			tp->snd_ssthresh = tp->snd_cwnd_clamp;
515 	} else {
516 		/* ssthresh may have been reduced unnecessarily during.
517 		 * 3WHS. Restore it back to its initial default.
518 		 */
519 		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
520 	}
521 	val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
522 	if (val && tp->reordering != val) {
523 		tcp_disable_fack(tp);
524 		tcp_disable_early_retrans(tp);
525 		tp->reordering = val;
526 	}
527 
528 	crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
529 	rcu_read_unlock();
530 reset:
531 	/* The initial RTT measurement from the SYN/SYN-ACK is not ideal
532 	 * to seed the RTO for later data packets because SYN packets are
533 	 * small. Use the per-dst cached values to seed the RTO but keep
534 	 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
535 	 * Later the RTO will be updated immediately upon obtaining the first
536 	 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
537 	 * influences the first RTO but not later RTT estimation.
538 	 *
539 	 * But if RTT is not available from the SYN (due to retransmits or
540 	 * syn cookies) or the cache, force a conservative 3secs timeout.
541 	 *
542 	 * A bit of theory. RTT is time passed after "normal" sized packet
543 	 * is sent until it is ACKed. In normal circumstances sending small
544 	 * packets force peer to delay ACKs and calculation is correct too.
545 	 * The algorithm is adaptive and, provided we follow specs, it
546 	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
547 	 * tricks sort of "quick acks" for time long enough to decrease RTT
548 	 * to low value, and then abruptly stops to do it and starts to delay
549 	 * ACKs, wait for troubles.
550 	 */
551 	if (crtt > tp->srtt_us) {
552 		/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
553 		crtt /= 8 * USEC_PER_MSEC;
554 		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
555 	} else if (tp->srtt_us == 0) {
556 		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
557 		 * 3WHS. This is most likely due to retransmission,
558 		 * including spurious one. Reset the RTO back to 3secs
559 		 * from the more aggressive 1sec to avoid more spurious
560 		 * retransmission.
561 		 */
562 		tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
563 		tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
564 
565 		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
566 	}
567 	/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
568 	 * retransmitted. In light of RFC6298 more aggressive 1sec
569 	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
570 	 * retransmission has occurred.
571 	 */
572 	if (tp->total_retrans > 1)
573 		tp->snd_cwnd = 1;
574 	else
575 		tp->snd_cwnd = tcp_init_cwnd(tp, dst);
576 	tp->snd_cwnd_stamp = tcp_time_stamp;
577 }
578 
579 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
580 {
581 	struct tcp_metrics_block *tm;
582 	bool ret;
583 
584 	if (!dst)
585 		return false;
586 
587 	rcu_read_lock();
588 	tm = __tcp_get_metrics_req(req, dst);
589 	if (paws_check) {
590 		if (tm &&
591 		    (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
592 		    (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
593 			ret = false;
594 		else
595 			ret = true;
596 	} else {
597 		if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
598 			ret = true;
599 		else
600 			ret = false;
601 	}
602 	rcu_read_unlock();
603 
604 	return ret;
605 }
606 EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
607 
608 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
609 {
610 	struct tcp_metrics_block *tm;
611 
612 	rcu_read_lock();
613 	tm = tcp_get_metrics(sk, dst, true);
614 	if (tm) {
615 		struct tcp_sock *tp = tcp_sk(sk);
616 
617 		if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
618 			tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
619 			tp->rx_opt.ts_recent = tm->tcpm_ts;
620 		}
621 	}
622 	rcu_read_unlock();
623 }
624 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
625 
626 /* VJ's idea. Save last timestamp seen from this destination and hold
627  * it at least for normal timewait interval to use for duplicate
628  * segment detection in subsequent connections, before they enter
629  * synchronized state.
630  */
631 bool tcp_remember_stamp(struct sock *sk)
632 {
633 	struct dst_entry *dst = __sk_dst_get(sk);
634 	bool ret = false;
635 
636 	if (dst) {
637 		struct tcp_metrics_block *tm;
638 
639 		rcu_read_lock();
640 		tm = tcp_get_metrics(sk, dst, true);
641 		if (tm) {
642 			struct tcp_sock *tp = tcp_sk(sk);
643 
644 			if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
645 			    ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
646 			     tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
647 				tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
648 				tm->tcpm_ts = tp->rx_opt.ts_recent;
649 			}
650 			ret = true;
651 		}
652 		rcu_read_unlock();
653 	}
654 	return ret;
655 }
656 
657 bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
658 {
659 	struct tcp_metrics_block *tm;
660 	bool ret = false;
661 
662 	rcu_read_lock();
663 	tm = __tcp_get_metrics_tw(tw);
664 	if (tm) {
665 		const struct tcp_timewait_sock *tcptw;
666 		struct sock *sk = (struct sock *) tw;
667 
668 		tcptw = tcp_twsk(sk);
669 		if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
670 		    ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
671 		     tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
672 			tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
673 			tm->tcpm_ts	   = tcptw->tw_ts_recent;
674 		}
675 		ret = true;
676 	}
677 	rcu_read_unlock();
678 
679 	return ret;
680 }
681 
682 static DEFINE_SEQLOCK(fastopen_seqlock);
683 
684 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
685 			    struct tcp_fastopen_cookie *cookie,
686 			    int *syn_loss, unsigned long *last_syn_loss)
687 {
688 	struct tcp_metrics_block *tm;
689 
690 	rcu_read_lock();
691 	tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
692 	if (tm) {
693 		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
694 		unsigned int seq;
695 
696 		do {
697 			seq = read_seqbegin(&fastopen_seqlock);
698 			if (tfom->mss)
699 				*mss = tfom->mss;
700 			*cookie = tfom->cookie;
701 			*syn_loss = tfom->syn_loss;
702 			*last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
703 		} while (read_seqretry(&fastopen_seqlock, seq));
704 	}
705 	rcu_read_unlock();
706 }
707 
708 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
709 			    struct tcp_fastopen_cookie *cookie, bool syn_lost)
710 {
711 	struct dst_entry *dst = __sk_dst_get(sk);
712 	struct tcp_metrics_block *tm;
713 
714 	if (!dst)
715 		return;
716 	rcu_read_lock();
717 	tm = tcp_get_metrics(sk, dst, true);
718 	if (tm) {
719 		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
720 
721 		write_seqlock_bh(&fastopen_seqlock);
722 		if (mss)
723 			tfom->mss = mss;
724 		if (cookie && cookie->len > 0)
725 			tfom->cookie = *cookie;
726 		if (syn_lost) {
727 			++tfom->syn_loss;
728 			tfom->last_syn_loss = jiffies;
729 		} else
730 			tfom->syn_loss = 0;
731 		write_sequnlock_bh(&fastopen_seqlock);
732 	}
733 	rcu_read_unlock();
734 }
735 
736 static struct genl_family tcp_metrics_nl_family = {
737 	.id		= GENL_ID_GENERATE,
738 	.hdrsize	= 0,
739 	.name		= TCP_METRICS_GENL_NAME,
740 	.version	= TCP_METRICS_GENL_VERSION,
741 	.maxattr	= TCP_METRICS_ATTR_MAX,
742 	.netnsok	= true,
743 };
744 
745 static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
746 	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
747 	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
748 					    .len = sizeof(struct in6_addr), },
749 	/* Following attributes are not received for GET/DEL,
750 	 * we keep them for reference
751 	 */
752 #if 0
753 	[TCP_METRICS_ATTR_AGE]		= { .type = NLA_MSECS, },
754 	[TCP_METRICS_ATTR_TW_TSVAL]	= { .type = NLA_U32, },
755 	[TCP_METRICS_ATTR_TW_TS_STAMP]	= { .type = NLA_S32, },
756 	[TCP_METRICS_ATTR_VALS]		= { .type = NLA_NESTED, },
757 	[TCP_METRICS_ATTR_FOPEN_MSS]	= { .type = NLA_U16, },
758 	[TCP_METRICS_ATTR_FOPEN_SYN_DROPS]	= { .type = NLA_U16, },
759 	[TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]	= { .type = NLA_MSECS, },
760 	[TCP_METRICS_ATTR_FOPEN_COOKIE]	= { .type = NLA_BINARY,
761 					    .len = TCP_FASTOPEN_COOKIE_MAX, },
762 #endif
763 };
764 
765 /* Add attributes, caller cancels its header on failure */
766 static int tcp_metrics_fill_info(struct sk_buff *msg,
767 				 struct tcp_metrics_block *tm)
768 {
769 	struct nlattr *nest;
770 	int i;
771 
772 	switch (tm->tcpm_daddr.family) {
773 	case AF_INET:
774 		if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
775 				tm->tcpm_daddr.addr.a4) < 0)
776 			goto nla_put_failure;
777 		if (nla_put_be32(msg, TCP_METRICS_ATTR_SADDR_IPV4,
778 				tm->tcpm_saddr.addr.a4) < 0)
779 			goto nla_put_failure;
780 		break;
781 	case AF_INET6:
782 		if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
783 			    tm->tcpm_daddr.addr.a6) < 0)
784 			goto nla_put_failure;
785 		if (nla_put(msg, TCP_METRICS_ATTR_SADDR_IPV6, 16,
786 			    tm->tcpm_saddr.addr.a6) < 0)
787 			goto nla_put_failure;
788 		break;
789 	default:
790 		return -EAFNOSUPPORT;
791 	}
792 
793 	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
794 			  jiffies - tm->tcpm_stamp) < 0)
795 		goto nla_put_failure;
796 	if (tm->tcpm_ts_stamp) {
797 		if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
798 				(s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
799 			goto nla_put_failure;
800 		if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
801 				tm->tcpm_ts) < 0)
802 			goto nla_put_failure;
803 	}
804 
805 	{
806 		int n = 0;
807 
808 		nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
809 		if (!nest)
810 			goto nla_put_failure;
811 		for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
812 			u32 val = tm->tcpm_vals[i];
813 
814 			if (!val)
815 				continue;
816 			if (i == TCP_METRIC_RTT) {
817 				if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
818 						val) < 0)
819 					goto nla_put_failure;
820 				n++;
821 				val = max(val / 1000, 1U);
822 			}
823 			if (i == TCP_METRIC_RTTVAR) {
824 				if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
825 						val) < 0)
826 					goto nla_put_failure;
827 				n++;
828 				val = max(val / 1000, 1U);
829 			}
830 			if (nla_put_u32(msg, i + 1, val) < 0)
831 				goto nla_put_failure;
832 			n++;
833 		}
834 		if (n)
835 			nla_nest_end(msg, nest);
836 		else
837 			nla_nest_cancel(msg, nest);
838 	}
839 
840 	{
841 		struct tcp_fastopen_metrics tfom_copy[1], *tfom;
842 		unsigned int seq;
843 
844 		do {
845 			seq = read_seqbegin(&fastopen_seqlock);
846 			tfom_copy[0] = tm->tcpm_fastopen;
847 		} while (read_seqretry(&fastopen_seqlock, seq));
848 
849 		tfom = tfom_copy;
850 		if (tfom->mss &&
851 		    nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
852 				tfom->mss) < 0)
853 			goto nla_put_failure;
854 		if (tfom->syn_loss &&
855 		    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
856 				tfom->syn_loss) < 0 ||
857 		     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
858 				jiffies - tfom->last_syn_loss) < 0))
859 			goto nla_put_failure;
860 		if (tfom->cookie.len > 0 &&
861 		    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
862 			    tfom->cookie.len, tfom->cookie.val) < 0)
863 			goto nla_put_failure;
864 	}
865 
866 	return 0;
867 
868 nla_put_failure:
869 	return -EMSGSIZE;
870 }
871 
872 static int tcp_metrics_dump_info(struct sk_buff *skb,
873 				 struct netlink_callback *cb,
874 				 struct tcp_metrics_block *tm)
875 {
876 	void *hdr;
877 
878 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
879 			  &tcp_metrics_nl_family, NLM_F_MULTI,
880 			  TCP_METRICS_CMD_GET);
881 	if (!hdr)
882 		return -EMSGSIZE;
883 
884 	if (tcp_metrics_fill_info(skb, tm) < 0)
885 		goto nla_put_failure;
886 
887 	return genlmsg_end(skb, hdr);
888 
889 nla_put_failure:
890 	genlmsg_cancel(skb, hdr);
891 	return -EMSGSIZE;
892 }
893 
894 static int tcp_metrics_nl_dump(struct sk_buff *skb,
895 			       struct netlink_callback *cb)
896 {
897 	struct net *net = sock_net(skb->sk);
898 	unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
899 	unsigned int row, s_row = cb->args[0];
900 	int s_col = cb->args[1], col = s_col;
901 
902 	for (row = s_row; row < max_rows; row++, s_col = 0) {
903 		struct tcp_metrics_block *tm;
904 		struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
905 
906 		rcu_read_lock();
907 		for (col = 0, tm = rcu_dereference(hb->chain); tm;
908 		     tm = rcu_dereference(tm->tcpm_next), col++) {
909 			if (col < s_col)
910 				continue;
911 			if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
912 				rcu_read_unlock();
913 				goto done;
914 			}
915 		}
916 		rcu_read_unlock();
917 	}
918 
919 done:
920 	cb->args[0] = row;
921 	cb->args[1] = col;
922 	return skb->len;
923 }
924 
925 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
926 			   unsigned int *hash, int optional, int v4, int v6)
927 {
928 	struct nlattr *a;
929 
930 	a = info->attrs[v4];
931 	if (a) {
932 		addr->family = AF_INET;
933 		addr->addr.a4 = nla_get_be32(a);
934 		if (hash)
935 			*hash = (__force unsigned int) addr->addr.a4;
936 		return 0;
937 	}
938 	a = info->attrs[v6];
939 	if (a) {
940 		if (nla_len(a) != sizeof(struct in6_addr))
941 			return -EINVAL;
942 		addr->family = AF_INET6;
943 		memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
944 		if (hash)
945 			*hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
946 		return 0;
947 	}
948 	return optional ? 1 : -EAFNOSUPPORT;
949 }
950 
951 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
952 			 unsigned int *hash, int optional)
953 {
954 	return __parse_nl_addr(info, addr, hash, optional,
955 			       TCP_METRICS_ATTR_ADDR_IPV4,
956 			       TCP_METRICS_ATTR_ADDR_IPV6);
957 }
958 
959 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
960 {
961 	return __parse_nl_addr(info, addr, NULL, 0,
962 			       TCP_METRICS_ATTR_SADDR_IPV4,
963 			       TCP_METRICS_ATTR_SADDR_IPV6);
964 }
965 
966 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
967 {
968 	struct tcp_metrics_block *tm;
969 	struct inetpeer_addr saddr, daddr;
970 	unsigned int hash;
971 	struct sk_buff *msg;
972 	struct net *net = genl_info_net(info);
973 	void *reply;
974 	int ret;
975 	bool src = true;
976 
977 	ret = parse_nl_addr(info, &daddr, &hash, 0);
978 	if (ret < 0)
979 		return ret;
980 
981 	ret = parse_nl_saddr(info, &saddr);
982 	if (ret < 0)
983 		src = false;
984 
985 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
986 	if (!msg)
987 		return -ENOMEM;
988 
989 	reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
990 				  info->genlhdr->cmd);
991 	if (!reply)
992 		goto nla_put_failure;
993 
994 	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
995 	ret = -ESRCH;
996 	rcu_read_lock();
997 	for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
998 	     tm = rcu_dereference(tm->tcpm_next)) {
999 		if (addr_same(&tm->tcpm_daddr, &daddr) &&
1000 		    (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
1001 			ret = tcp_metrics_fill_info(msg, tm);
1002 			break;
1003 		}
1004 	}
1005 	rcu_read_unlock();
1006 	if (ret < 0)
1007 		goto out_free;
1008 
1009 	genlmsg_end(msg, reply);
1010 	return genlmsg_reply(msg, info);
1011 
1012 nla_put_failure:
1013 	ret = -EMSGSIZE;
1014 
1015 out_free:
1016 	nlmsg_free(msg);
1017 	return ret;
1018 }
1019 
1020 #define deref_locked_genl(p)	\
1021 	rcu_dereference_protected(p, lockdep_genl_is_held() && \
1022 				     lockdep_is_held(&tcp_metrics_lock))
1023 
1024 #define deref_genl(p)	rcu_dereference_protected(p, lockdep_genl_is_held())
1025 
1026 static int tcp_metrics_flush_all(struct net *net)
1027 {
1028 	unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
1029 	struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
1030 	struct tcp_metrics_block *tm;
1031 	unsigned int row;
1032 
1033 	for (row = 0; row < max_rows; row++, hb++) {
1034 		spin_lock_bh(&tcp_metrics_lock);
1035 		tm = deref_locked_genl(hb->chain);
1036 		if (tm)
1037 			hb->chain = NULL;
1038 		spin_unlock_bh(&tcp_metrics_lock);
1039 		while (tm) {
1040 			struct tcp_metrics_block *next;
1041 
1042 			next = deref_genl(tm->tcpm_next);
1043 			kfree_rcu(tm, rcu_head);
1044 			tm = next;
1045 		}
1046 	}
1047 	return 0;
1048 }
1049 
1050 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
1051 {
1052 	struct tcpm_hash_bucket *hb;
1053 	struct tcp_metrics_block *tm;
1054 	struct tcp_metrics_block __rcu **pp;
1055 	struct inetpeer_addr saddr, daddr;
1056 	unsigned int hash;
1057 	struct net *net = genl_info_net(info);
1058 	int ret;
1059 	bool src = true, found = false;
1060 
1061 	ret = parse_nl_addr(info, &daddr, &hash, 1);
1062 	if (ret < 0)
1063 		return ret;
1064 	if (ret > 0)
1065 		return tcp_metrics_flush_all(net);
1066 	ret = parse_nl_saddr(info, &saddr);
1067 	if (ret < 0)
1068 		src = false;
1069 
1070 	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
1071 	hb = net->ipv4.tcp_metrics_hash + hash;
1072 	pp = &hb->chain;
1073 	spin_lock_bh(&tcp_metrics_lock);
1074 	for (tm = deref_locked_genl(*pp); tm; tm = deref_locked_genl(*pp)) {
1075 		if (addr_same(&tm->tcpm_daddr, &daddr) &&
1076 		    (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
1077 			*pp = tm->tcpm_next;
1078 			kfree_rcu(tm, rcu_head);
1079 			found = true;
1080 		} else {
1081 			pp = &tm->tcpm_next;
1082 		}
1083 	}
1084 	spin_unlock_bh(&tcp_metrics_lock);
1085 	if (!found)
1086 		return -ESRCH;
1087 	return 0;
1088 }
1089 
1090 static const struct genl_ops tcp_metrics_nl_ops[] = {
1091 	{
1092 		.cmd = TCP_METRICS_CMD_GET,
1093 		.doit = tcp_metrics_nl_cmd_get,
1094 		.dumpit = tcp_metrics_nl_dump,
1095 		.policy = tcp_metrics_nl_policy,
1096 		.flags = GENL_ADMIN_PERM,
1097 	},
1098 	{
1099 		.cmd = TCP_METRICS_CMD_DEL,
1100 		.doit = tcp_metrics_nl_cmd_del,
1101 		.policy = tcp_metrics_nl_policy,
1102 		.flags = GENL_ADMIN_PERM,
1103 	},
1104 };
1105 
1106 static unsigned int tcpmhash_entries;
1107 static int __init set_tcpmhash_entries(char *str)
1108 {
1109 	ssize_t ret;
1110 
1111 	if (!str)
1112 		return 0;
1113 
1114 	ret = kstrtouint(str, 0, &tcpmhash_entries);
1115 	if (ret)
1116 		return 0;
1117 
1118 	return 1;
1119 }
1120 __setup("tcpmhash_entries=", set_tcpmhash_entries);
1121 
1122 static int __net_init tcp_net_metrics_init(struct net *net)
1123 {
1124 	size_t size;
1125 	unsigned int slots;
1126 
1127 	slots = tcpmhash_entries;
1128 	if (!slots) {
1129 		if (totalram_pages >= 128 * 1024)
1130 			slots = 16 * 1024;
1131 		else
1132 			slots = 8 * 1024;
1133 	}
1134 
1135 	net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1136 	size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
1137 
1138 	net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1139 	if (!net->ipv4.tcp_metrics_hash)
1140 		net->ipv4.tcp_metrics_hash = vzalloc(size);
1141 
1142 	if (!net->ipv4.tcp_metrics_hash)
1143 		return -ENOMEM;
1144 
1145 	return 0;
1146 }
1147 
1148 static void __net_exit tcp_net_metrics_exit(struct net *net)
1149 {
1150 	unsigned int i;
1151 
1152 	for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
1153 		struct tcp_metrics_block *tm, *next;
1154 
1155 		tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
1156 		while (tm) {
1157 			next = rcu_dereference_protected(tm->tcpm_next, 1);
1158 			kfree(tm);
1159 			tm = next;
1160 		}
1161 	}
1162 	if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
1163 		vfree(net->ipv4.tcp_metrics_hash);
1164 	else
1165 		kfree(net->ipv4.tcp_metrics_hash);
1166 }
1167 
1168 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1169 	.init	=	tcp_net_metrics_init,
1170 	.exit	=	tcp_net_metrics_exit,
1171 };
1172 
1173 void __init tcp_metrics_init(void)
1174 {
1175 	int ret;
1176 
1177 	ret = register_pernet_subsys(&tcp_net_metrics_ops);
1178 	if (ret < 0)
1179 		goto cleanup;
1180 	ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
1181 					    tcp_metrics_nl_ops);
1182 	if (ret < 0)
1183 		goto cleanup_subsys;
1184 	return;
1185 
1186 cleanup_subsys:
1187 	unregister_pernet_subsys(&tcp_net_metrics_ops);
1188 
1189 cleanup:
1190 	return;
1191 }
1192