xref: /openbmc/linux/net/ipv4/inet_diag.c (revision 3381df09)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * inet_diag.c	Module for monitoring INET transport protocols sockets.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/fcntl.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/cache.h>
15 #include <linux/init.h>
16 #include <linux/time.h>
17 
18 #include <net/icmp.h>
19 #include <net/tcp.h>
20 #include <net/ipv6.h>
21 #include <net/inet_common.h>
22 #include <net/inet_connection_sock.h>
23 #include <net/inet_hashtables.h>
24 #include <net/inet_timewait_sock.h>
25 #include <net/inet6_hashtables.h>
26 #include <net/bpf_sk_storage.h>
27 #include <net/netlink.h>
28 
29 #include <linux/inet.h>
30 #include <linux/stddef.h>
31 
32 #include <linux/inet_diag.h>
33 #include <linux/sock_diag.h>
34 
35 static const struct inet_diag_handler **inet_diag_table;
36 
37 struct inet_diag_entry {
38 	const __be32 *saddr;
39 	const __be32 *daddr;
40 	u16 sport;
41 	u16 dport;
42 	u16 family;
43 	u16 userlocks;
44 	u32 ifindex;
45 	u32 mark;
46 };
47 
48 static DEFINE_MUTEX(inet_diag_table_mutex);
49 
50 static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
51 {
52 	if (!inet_diag_table[proto])
53 		sock_load_diag_module(AF_INET, proto);
54 
55 	mutex_lock(&inet_diag_table_mutex);
56 	if (!inet_diag_table[proto])
57 		return ERR_PTR(-ENOENT);
58 
59 	return inet_diag_table[proto];
60 }
61 
62 static void inet_diag_unlock_handler(const struct inet_diag_handler *handler)
63 {
64 	mutex_unlock(&inet_diag_table_mutex);
65 }
66 
67 void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
68 {
69 	r->idiag_family = sk->sk_family;
70 
71 	r->id.idiag_sport = htons(sk->sk_num);
72 	r->id.idiag_dport = sk->sk_dport;
73 	r->id.idiag_if = sk->sk_bound_dev_if;
74 	sock_diag_save_cookie(sk, r->id.idiag_cookie);
75 
76 #if IS_ENABLED(CONFIG_IPV6)
77 	if (sk->sk_family == AF_INET6) {
78 		*(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
79 		*(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
80 	} else
81 #endif
82 	{
83 	memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
84 	memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
85 
86 	r->id.idiag_src[0] = sk->sk_rcv_saddr;
87 	r->id.idiag_dst[0] = sk->sk_daddr;
88 	}
89 }
90 EXPORT_SYMBOL_GPL(inet_diag_msg_common_fill);
91 
92 static size_t inet_sk_attr_size(struct sock *sk,
93 				const struct inet_diag_req_v2 *req,
94 				bool net_admin)
95 {
96 	const struct inet_diag_handler *handler;
97 	size_t aux = 0;
98 
99 	handler = inet_diag_table[req->sdiag_protocol];
100 	if (handler && handler->idiag_get_aux_size)
101 		aux = handler->idiag_get_aux_size(sk, net_admin);
102 
103 	return	  nla_total_size(sizeof(struct tcp_info))
104 		+ nla_total_size(sizeof(struct inet_diag_msg))
105 		+ inet_diag_msg_attrs_size()
106 		+ nla_total_size(sizeof(struct inet_diag_meminfo))
107 		+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
108 		+ nla_total_size(TCP_CA_NAME_MAX)
109 		+ nla_total_size(sizeof(struct tcpvegas_info))
110 		+ aux
111 		+ 64;
112 }
113 
114 int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
115 			     struct inet_diag_msg *r, int ext,
116 			     struct user_namespace *user_ns,
117 			     bool net_admin)
118 {
119 	const struct inet_sock *inet = inet_sk(sk);
120 
121 	if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
122 		goto errout;
123 
124 	/* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
125 	 * hence this needs to be included regardless of socket family.
126 	 */
127 	if (ext & (1 << (INET_DIAG_TOS - 1)))
128 		if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
129 			goto errout;
130 
131 #if IS_ENABLED(CONFIG_IPV6)
132 	if (r->idiag_family == AF_INET6) {
133 		if (ext & (1 << (INET_DIAG_TCLASS - 1)))
134 			if (nla_put_u8(skb, INET_DIAG_TCLASS,
135 				       inet6_sk(sk)->tclass) < 0)
136 				goto errout;
137 
138 		if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
139 		    nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk)))
140 			goto errout;
141 	}
142 #endif
143 
144 	if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
145 		goto errout;
146 
147 	if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
148 	    ext & (1 << (INET_DIAG_TCLASS - 1))) {
149 		u32 classid = 0;
150 
151 #ifdef CONFIG_SOCK_CGROUP_DATA
152 		classid = sock_cgroup_classid(&sk->sk_cgrp_data);
153 #endif
154 		/* Fallback to socket priority if class id isn't set.
155 		 * Classful qdiscs use it as direct reference to class.
156 		 * For cgroup2 classid is always zero.
157 		 */
158 		if (!classid)
159 			classid = sk->sk_priority;
160 
161 		if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
162 			goto errout;
163 	}
164 
165 	r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
166 	r->idiag_inode = sock_i_ino(sk);
167 
168 	return 0;
169 errout:
170 	return 1;
171 }
172 EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill);
173 
174 #define MAX_DUMP_ALLOC_SIZE (KMALLOC_MAX_SIZE - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
175 
176 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
177 		      struct sk_buff *skb, struct netlink_callback *cb,
178 		      const struct inet_diag_req_v2 *req,
179 		      u16 nlmsg_flags, bool net_admin)
180 {
181 	const struct tcp_congestion_ops *ca_ops;
182 	const struct inet_diag_handler *handler;
183 	struct inet_diag_dump_data *cb_data;
184 	int ext = req->idiag_ext;
185 	struct inet_diag_msg *r;
186 	struct nlmsghdr  *nlh;
187 	struct nlattr *attr;
188 	void *info = NULL;
189 
190 	cb_data = cb->data;
191 	handler = inet_diag_table[req->sdiag_protocol];
192 	BUG_ON(!handler);
193 
194 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
195 			cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
196 	if (!nlh)
197 		return -EMSGSIZE;
198 
199 	r = nlmsg_data(nlh);
200 	BUG_ON(!sk_fullsock(sk));
201 
202 	inet_diag_msg_common_fill(r, sk);
203 	r->idiag_state = sk->sk_state;
204 	r->idiag_timer = 0;
205 	r->idiag_retrans = 0;
206 
207 	if (inet_diag_msg_attrs_fill(sk, skb, r, ext,
208 				     sk_user_ns(NETLINK_CB(cb->skb).sk),
209 				     net_admin))
210 		goto errout;
211 
212 	if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
213 		struct inet_diag_meminfo minfo = {
214 			.idiag_rmem = sk_rmem_alloc_get(sk),
215 			.idiag_wmem = READ_ONCE(sk->sk_wmem_queued),
216 			.idiag_fmem = sk->sk_forward_alloc,
217 			.idiag_tmem = sk_wmem_alloc_get(sk),
218 		};
219 
220 		if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
221 			goto errout;
222 	}
223 
224 	if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
225 		if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
226 			goto errout;
227 
228 	/*
229 	 * RAW sockets might have user-defined protocols assigned,
230 	 * so report the one supplied on socket creation.
231 	 */
232 	if (sk->sk_type == SOCK_RAW) {
233 		if (nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))
234 			goto errout;
235 	}
236 
237 	if (!icsk) {
238 		handler->idiag_get_info(sk, r, NULL);
239 		goto out;
240 	}
241 
242 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
243 	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
244 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
245 		r->idiag_timer = 1;
246 		r->idiag_retrans = icsk->icsk_retransmits;
247 		r->idiag_expires =
248 			jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
249 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
250 		r->idiag_timer = 4;
251 		r->idiag_retrans = icsk->icsk_probes_out;
252 		r->idiag_expires =
253 			jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
254 	} else if (timer_pending(&sk->sk_timer)) {
255 		r->idiag_timer = 2;
256 		r->idiag_retrans = icsk->icsk_probes_out;
257 		r->idiag_expires =
258 			jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies);
259 	} else {
260 		r->idiag_timer = 0;
261 		r->idiag_expires = 0;
262 	}
263 
264 	if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
265 		attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
266 					 handler->idiag_info_size,
267 					 INET_DIAG_PAD);
268 		if (!attr)
269 			goto errout;
270 
271 		info = nla_data(attr);
272 	}
273 
274 	if (ext & (1 << (INET_DIAG_CONG - 1))) {
275 		int err = 0;
276 
277 		rcu_read_lock();
278 		ca_ops = READ_ONCE(icsk->icsk_ca_ops);
279 		if (ca_ops)
280 			err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name);
281 		rcu_read_unlock();
282 		if (err < 0)
283 			goto errout;
284 	}
285 
286 	handler->idiag_get_info(sk, r, info);
287 
288 	if (ext & (1 << (INET_DIAG_INFO - 1)) && handler->idiag_get_aux)
289 		if (handler->idiag_get_aux(sk, net_admin, skb) < 0)
290 			goto errout;
291 
292 	if (sk->sk_state < TCP_TIME_WAIT) {
293 		union tcp_cc_info info;
294 		size_t sz = 0;
295 		int attr;
296 
297 		rcu_read_lock();
298 		ca_ops = READ_ONCE(icsk->icsk_ca_ops);
299 		if (ca_ops && ca_ops->get_info)
300 			sz = ca_ops->get_info(sk, ext, &attr, &info);
301 		rcu_read_unlock();
302 		if (sz && nla_put(skb, attr, sz, &info) < 0)
303 			goto errout;
304 	}
305 
306 	/* Keep it at the end for potential retry with a larger skb,
307 	 * or else do best-effort fitting, which is only done for the
308 	 * first_nlmsg.
309 	 */
310 	if (cb_data->bpf_stg_diag) {
311 		bool first_nlmsg = ((unsigned char *)nlh == skb->data);
312 		unsigned int prev_min_dump_alloc;
313 		unsigned int total_nla_size = 0;
314 		unsigned int msg_len;
315 		int err;
316 
317 		msg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
318 		err = bpf_sk_storage_diag_put(cb_data->bpf_stg_diag, sk, skb,
319 					      INET_DIAG_SK_BPF_STORAGES,
320 					      &total_nla_size);
321 
322 		if (!err)
323 			goto out;
324 
325 		total_nla_size += msg_len;
326 		prev_min_dump_alloc = cb->min_dump_alloc;
327 		if (total_nla_size > prev_min_dump_alloc)
328 			cb->min_dump_alloc = min_t(u32, total_nla_size,
329 						   MAX_DUMP_ALLOC_SIZE);
330 
331 		if (!first_nlmsg)
332 			goto errout;
333 
334 		if (cb->min_dump_alloc > prev_min_dump_alloc)
335 			/* Retry with pskb_expand_head() with
336 			 * __GFP_DIRECT_RECLAIM
337 			 */
338 			goto errout;
339 
340 		WARN_ON_ONCE(total_nla_size <= prev_min_dump_alloc);
341 
342 		/* Send what we have for this sk
343 		 * and move on to the next sk in the following
344 		 * dump()
345 		 */
346 	}
347 
348 out:
349 	nlmsg_end(skb, nlh);
350 	return 0;
351 
352 errout:
353 	nlmsg_cancel(skb, nlh);
354 	return -EMSGSIZE;
355 }
356 EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
357 
358 static int inet_twsk_diag_fill(struct sock *sk,
359 			       struct sk_buff *skb,
360 			       struct netlink_callback *cb,
361 			       u16 nlmsg_flags)
362 {
363 	struct inet_timewait_sock *tw = inet_twsk(sk);
364 	struct inet_diag_msg *r;
365 	struct nlmsghdr *nlh;
366 	long tmo;
367 
368 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
369 			cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type,
370 			sizeof(*r), nlmsg_flags);
371 	if (!nlh)
372 		return -EMSGSIZE;
373 
374 	r = nlmsg_data(nlh);
375 	BUG_ON(tw->tw_state != TCP_TIME_WAIT);
376 
377 	inet_diag_msg_common_fill(r, sk);
378 	r->idiag_retrans      = 0;
379 
380 	r->idiag_state	      = tw->tw_substate;
381 	r->idiag_timer	      = 3;
382 	tmo = tw->tw_timer.expires - jiffies;
383 	r->idiag_expires      = jiffies_delta_to_msecs(tmo);
384 	r->idiag_rqueue	      = 0;
385 	r->idiag_wqueue	      = 0;
386 	r->idiag_uid	      = 0;
387 	r->idiag_inode	      = 0;
388 
389 	nlmsg_end(skb, nlh);
390 	return 0;
391 }
392 
393 static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
394 			      struct netlink_callback *cb,
395 			      u16 nlmsg_flags, bool net_admin)
396 {
397 	struct request_sock *reqsk = inet_reqsk(sk);
398 	struct inet_diag_msg *r;
399 	struct nlmsghdr *nlh;
400 	long tmo;
401 
402 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
403 			cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
404 	if (!nlh)
405 		return -EMSGSIZE;
406 
407 	r = nlmsg_data(nlh);
408 	inet_diag_msg_common_fill(r, sk);
409 	r->idiag_state = TCP_SYN_RECV;
410 	r->idiag_timer = 1;
411 	r->idiag_retrans = reqsk->num_retrans;
412 
413 	BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
414 		     offsetof(struct sock, sk_cookie));
415 
416 	tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
417 	r->idiag_expires = jiffies_delta_to_msecs(tmo);
418 	r->idiag_rqueue	= 0;
419 	r->idiag_wqueue	= 0;
420 	r->idiag_uid	= 0;
421 	r->idiag_inode	= 0;
422 
423 	if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
424 				     inet_rsk(reqsk)->ir_mark))
425 		return -EMSGSIZE;
426 
427 	nlmsg_end(skb, nlh);
428 	return 0;
429 }
430 
431 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
432 			struct netlink_callback *cb,
433 			const struct inet_diag_req_v2 *r,
434 			u16 nlmsg_flags, bool net_admin)
435 {
436 	if (sk->sk_state == TCP_TIME_WAIT)
437 		return inet_twsk_diag_fill(sk, skb, cb, nlmsg_flags);
438 
439 	if (sk->sk_state == TCP_NEW_SYN_RECV)
440 		return inet_req_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
441 
442 	return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, nlmsg_flags,
443 				 net_admin);
444 }
445 
446 struct sock *inet_diag_find_one_icsk(struct net *net,
447 				     struct inet_hashinfo *hashinfo,
448 				     const struct inet_diag_req_v2 *req)
449 {
450 	struct sock *sk;
451 
452 	rcu_read_lock();
453 	if (req->sdiag_family == AF_INET)
454 		sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[0],
455 				 req->id.idiag_dport, req->id.idiag_src[0],
456 				 req->id.idiag_sport, req->id.idiag_if);
457 #if IS_ENABLED(CONFIG_IPV6)
458 	else if (req->sdiag_family == AF_INET6) {
459 		if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
460 		    ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
461 			sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[3],
462 					 req->id.idiag_dport, req->id.idiag_src[3],
463 					 req->id.idiag_sport, req->id.idiag_if);
464 		else
465 			sk = inet6_lookup(net, hashinfo, NULL, 0,
466 					  (struct in6_addr *)req->id.idiag_dst,
467 					  req->id.idiag_dport,
468 					  (struct in6_addr *)req->id.idiag_src,
469 					  req->id.idiag_sport,
470 					  req->id.idiag_if);
471 	}
472 #endif
473 	else {
474 		rcu_read_unlock();
475 		return ERR_PTR(-EINVAL);
476 	}
477 	rcu_read_unlock();
478 	if (!sk)
479 		return ERR_PTR(-ENOENT);
480 
481 	if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
482 		sock_gen_put(sk);
483 		return ERR_PTR(-ENOENT);
484 	}
485 
486 	return sk;
487 }
488 EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk);
489 
490 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
491 			    struct netlink_callback *cb,
492 			    const struct inet_diag_req_v2 *req)
493 {
494 	struct sk_buff *in_skb = cb->skb;
495 	bool net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN);
496 	struct net *net = sock_net(in_skb->sk);
497 	struct sk_buff *rep;
498 	struct sock *sk;
499 	int err;
500 
501 	sk = inet_diag_find_one_icsk(net, hashinfo, req);
502 	if (IS_ERR(sk))
503 		return PTR_ERR(sk);
504 
505 	rep = nlmsg_new(inet_sk_attr_size(sk, req, net_admin), GFP_KERNEL);
506 	if (!rep) {
507 		err = -ENOMEM;
508 		goto out;
509 	}
510 
511 	err = sk_diag_fill(sk, rep, cb, req, 0, net_admin);
512 	if (err < 0) {
513 		WARN_ON(err == -EMSGSIZE);
514 		nlmsg_free(rep);
515 		goto out;
516 	}
517 	err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
518 			      MSG_DONTWAIT);
519 	if (err > 0)
520 		err = 0;
521 
522 out:
523 	if (sk)
524 		sock_gen_put(sk);
525 
526 	return err;
527 }
528 EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
529 
530 static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
531 			       const struct nlmsghdr *nlh,
532 			       const struct inet_diag_req_v2 *req)
533 {
534 	const struct inet_diag_handler *handler;
535 	int err;
536 
537 	handler = inet_diag_lock_handler(req->sdiag_protocol);
538 	if (IS_ERR(handler)) {
539 		err = PTR_ERR(handler);
540 	} else if (cmd == SOCK_DIAG_BY_FAMILY) {
541 		struct inet_diag_dump_data empty_dump_data = {};
542 		struct netlink_callback cb = {
543 			.nlh = nlh,
544 			.skb = in_skb,
545 			.data = &empty_dump_data,
546 		};
547 		err = handler->dump_one(&cb, req);
548 	} else if (cmd == SOCK_DESTROY && handler->destroy) {
549 		err = handler->destroy(in_skb, req);
550 	} else {
551 		err = -EOPNOTSUPP;
552 	}
553 	inet_diag_unlock_handler(handler);
554 
555 	return err;
556 }
557 
558 static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
559 {
560 	int words = bits >> 5;
561 
562 	bits &= 0x1f;
563 
564 	if (words) {
565 		if (memcmp(a1, a2, words << 2))
566 			return 0;
567 	}
568 	if (bits) {
569 		__be32 w1, w2;
570 		__be32 mask;
571 
572 		w1 = a1[words];
573 		w2 = a2[words];
574 
575 		mask = htonl((0xffffffff) << (32 - bits));
576 
577 		if ((w1 ^ w2) & mask)
578 			return 0;
579 	}
580 
581 	return 1;
582 }
583 
584 static int inet_diag_bc_run(const struct nlattr *_bc,
585 			    const struct inet_diag_entry *entry)
586 {
587 	const void *bc = nla_data(_bc);
588 	int len = nla_len(_bc);
589 
590 	while (len > 0) {
591 		int yes = 1;
592 		const struct inet_diag_bc_op *op = bc;
593 
594 		switch (op->code) {
595 		case INET_DIAG_BC_NOP:
596 			break;
597 		case INET_DIAG_BC_JMP:
598 			yes = 0;
599 			break;
600 		case INET_DIAG_BC_S_EQ:
601 			yes = entry->sport == op[1].no;
602 			break;
603 		case INET_DIAG_BC_S_GE:
604 			yes = entry->sport >= op[1].no;
605 			break;
606 		case INET_DIAG_BC_S_LE:
607 			yes = entry->sport <= op[1].no;
608 			break;
609 		case INET_DIAG_BC_D_EQ:
610 			yes = entry->dport == op[1].no;
611 			break;
612 		case INET_DIAG_BC_D_GE:
613 			yes = entry->dport >= op[1].no;
614 			break;
615 		case INET_DIAG_BC_D_LE:
616 			yes = entry->dport <= op[1].no;
617 			break;
618 		case INET_DIAG_BC_AUTO:
619 			yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
620 			break;
621 		case INET_DIAG_BC_S_COND:
622 		case INET_DIAG_BC_D_COND: {
623 			const struct inet_diag_hostcond *cond;
624 			const __be32 *addr;
625 
626 			cond = (const struct inet_diag_hostcond *)(op + 1);
627 			if (cond->port != -1 &&
628 			    cond->port != (op->code == INET_DIAG_BC_S_COND ?
629 					     entry->sport : entry->dport)) {
630 				yes = 0;
631 				break;
632 			}
633 
634 			if (op->code == INET_DIAG_BC_S_COND)
635 				addr = entry->saddr;
636 			else
637 				addr = entry->daddr;
638 
639 			if (cond->family != AF_UNSPEC &&
640 			    cond->family != entry->family) {
641 				if (entry->family == AF_INET6 &&
642 				    cond->family == AF_INET) {
643 					if (addr[0] == 0 && addr[1] == 0 &&
644 					    addr[2] == htonl(0xffff) &&
645 					    bitstring_match(addr + 3,
646 							    cond->addr,
647 							    cond->prefix_len))
648 						break;
649 				}
650 				yes = 0;
651 				break;
652 			}
653 
654 			if (cond->prefix_len == 0)
655 				break;
656 			if (bitstring_match(addr, cond->addr,
657 					    cond->prefix_len))
658 				break;
659 			yes = 0;
660 			break;
661 		}
662 		case INET_DIAG_BC_DEV_COND: {
663 			u32 ifindex;
664 
665 			ifindex = *((const u32 *)(op + 1));
666 			if (ifindex != entry->ifindex)
667 				yes = 0;
668 			break;
669 		}
670 		case INET_DIAG_BC_MARK_COND: {
671 			struct inet_diag_markcond *cond;
672 
673 			cond = (struct inet_diag_markcond *)(op + 1);
674 			if ((entry->mark & cond->mask) != cond->mark)
675 				yes = 0;
676 			break;
677 		}
678 		}
679 
680 		if (yes) {
681 			len -= op->yes;
682 			bc += op->yes;
683 		} else {
684 			len -= op->no;
685 			bc += op->no;
686 		}
687 	}
688 	return len == 0;
689 }
690 
691 /* This helper is available for all sockets (ESTABLISH, TIMEWAIT, SYN_RECV)
692  */
693 static void entry_fill_addrs(struct inet_diag_entry *entry,
694 			     const struct sock *sk)
695 {
696 #if IS_ENABLED(CONFIG_IPV6)
697 	if (sk->sk_family == AF_INET6) {
698 		entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32;
699 		entry->daddr = sk->sk_v6_daddr.s6_addr32;
700 	} else
701 #endif
702 	{
703 		entry->saddr = &sk->sk_rcv_saddr;
704 		entry->daddr = &sk->sk_daddr;
705 	}
706 }
707 
708 int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
709 {
710 	struct inet_sock *inet = inet_sk(sk);
711 	struct inet_diag_entry entry;
712 
713 	if (!bc)
714 		return 1;
715 
716 	entry.family = sk->sk_family;
717 	entry_fill_addrs(&entry, sk);
718 	entry.sport = inet->inet_num;
719 	entry.dport = ntohs(inet->inet_dport);
720 	entry.ifindex = sk->sk_bound_dev_if;
721 	entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
722 	if (sk_fullsock(sk))
723 		entry.mark = sk->sk_mark;
724 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
725 		entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark;
726 	else
727 		entry.mark = 0;
728 
729 	return inet_diag_bc_run(bc, &entry);
730 }
731 EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
732 
733 static int valid_cc(const void *bc, int len, int cc)
734 {
735 	while (len >= 0) {
736 		const struct inet_diag_bc_op *op = bc;
737 
738 		if (cc > len)
739 			return 0;
740 		if (cc == len)
741 			return 1;
742 		if (op->yes < 4 || op->yes & 3)
743 			return 0;
744 		len -= op->yes;
745 		bc  += op->yes;
746 	}
747 	return 0;
748 }
749 
750 /* data is u32 ifindex */
751 static bool valid_devcond(const struct inet_diag_bc_op *op, int len,
752 			  int *min_len)
753 {
754 	/* Check ifindex space. */
755 	*min_len += sizeof(u32);
756 	if (len < *min_len)
757 		return false;
758 
759 	return true;
760 }
761 /* Validate an inet_diag_hostcond. */
762 static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
763 			   int *min_len)
764 {
765 	struct inet_diag_hostcond *cond;
766 	int addr_len;
767 
768 	/* Check hostcond space. */
769 	*min_len += sizeof(struct inet_diag_hostcond);
770 	if (len < *min_len)
771 		return false;
772 	cond = (struct inet_diag_hostcond *)(op + 1);
773 
774 	/* Check address family and address length. */
775 	switch (cond->family) {
776 	case AF_UNSPEC:
777 		addr_len = 0;
778 		break;
779 	case AF_INET:
780 		addr_len = sizeof(struct in_addr);
781 		break;
782 	case AF_INET6:
783 		addr_len = sizeof(struct in6_addr);
784 		break;
785 	default:
786 		return false;
787 	}
788 	*min_len += addr_len;
789 	if (len < *min_len)
790 		return false;
791 
792 	/* Check prefix length (in bits) vs address length (in bytes). */
793 	if (cond->prefix_len > 8 * addr_len)
794 		return false;
795 
796 	return true;
797 }
798 
799 /* Validate a port comparison operator. */
800 static bool valid_port_comparison(const struct inet_diag_bc_op *op,
801 				  int len, int *min_len)
802 {
803 	/* Port comparisons put the port in a follow-on inet_diag_bc_op. */
804 	*min_len += sizeof(struct inet_diag_bc_op);
805 	if (len < *min_len)
806 		return false;
807 	return true;
808 }
809 
810 static bool valid_markcond(const struct inet_diag_bc_op *op, int len,
811 			   int *min_len)
812 {
813 	*min_len += sizeof(struct inet_diag_markcond);
814 	return len >= *min_len;
815 }
816 
817 static int inet_diag_bc_audit(const struct nlattr *attr,
818 			      const struct sk_buff *skb)
819 {
820 	bool net_admin = netlink_net_capable(skb, CAP_NET_ADMIN);
821 	const void *bytecode, *bc;
822 	int bytecode_len, len;
823 
824 	if (!attr || nla_len(attr) < sizeof(struct inet_diag_bc_op))
825 		return -EINVAL;
826 
827 	bytecode = bc = nla_data(attr);
828 	len = bytecode_len = nla_len(attr);
829 
830 	while (len > 0) {
831 		int min_len = sizeof(struct inet_diag_bc_op);
832 		const struct inet_diag_bc_op *op = bc;
833 
834 		switch (op->code) {
835 		case INET_DIAG_BC_S_COND:
836 		case INET_DIAG_BC_D_COND:
837 			if (!valid_hostcond(bc, len, &min_len))
838 				return -EINVAL;
839 			break;
840 		case INET_DIAG_BC_DEV_COND:
841 			if (!valid_devcond(bc, len, &min_len))
842 				return -EINVAL;
843 			break;
844 		case INET_DIAG_BC_S_EQ:
845 		case INET_DIAG_BC_S_GE:
846 		case INET_DIAG_BC_S_LE:
847 		case INET_DIAG_BC_D_EQ:
848 		case INET_DIAG_BC_D_GE:
849 		case INET_DIAG_BC_D_LE:
850 			if (!valid_port_comparison(bc, len, &min_len))
851 				return -EINVAL;
852 			break;
853 		case INET_DIAG_BC_MARK_COND:
854 			if (!net_admin)
855 				return -EPERM;
856 			if (!valid_markcond(bc, len, &min_len))
857 				return -EINVAL;
858 			break;
859 		case INET_DIAG_BC_AUTO:
860 		case INET_DIAG_BC_JMP:
861 		case INET_DIAG_BC_NOP:
862 			break;
863 		default:
864 			return -EINVAL;
865 		}
866 
867 		if (op->code != INET_DIAG_BC_NOP) {
868 			if (op->no < min_len || op->no > len + 4 || op->no & 3)
869 				return -EINVAL;
870 			if (op->no < len &&
871 			    !valid_cc(bytecode, bytecode_len, len - op->no))
872 				return -EINVAL;
873 		}
874 
875 		if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
876 			return -EINVAL;
877 		bc  += op->yes;
878 		len -= op->yes;
879 	}
880 	return len == 0 ? 0 : -EINVAL;
881 }
882 
883 static void twsk_build_assert(void)
884 {
885 	BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) !=
886 		     offsetof(struct sock, sk_family));
887 
888 	BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) !=
889 		     offsetof(struct inet_sock, inet_num));
890 
891 	BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) !=
892 		     offsetof(struct inet_sock, inet_dport));
893 
894 	BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) !=
895 		     offsetof(struct inet_sock, inet_rcv_saddr));
896 
897 	BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) !=
898 		     offsetof(struct inet_sock, inet_daddr));
899 
900 #if IS_ENABLED(CONFIG_IPV6)
901 	BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) !=
902 		     offsetof(struct sock, sk_v6_rcv_saddr));
903 
904 	BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) !=
905 		     offsetof(struct sock, sk_v6_daddr));
906 #endif
907 }
908 
909 void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
910 			 struct netlink_callback *cb,
911 			 const struct inet_diag_req_v2 *r)
912 {
913 	bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
914 	struct inet_diag_dump_data *cb_data = cb->data;
915 	struct net *net = sock_net(skb->sk);
916 	u32 idiag_states = r->idiag_states;
917 	int i, num, s_i, s_num;
918 	struct nlattr *bc;
919 	struct sock *sk;
920 
921 	bc = cb_data->inet_diag_nla_bc;
922 	if (idiag_states & TCPF_SYN_RECV)
923 		idiag_states |= TCPF_NEW_SYN_RECV;
924 	s_i = cb->args[1];
925 	s_num = num = cb->args[2];
926 
927 	if (cb->args[0] == 0) {
928 		if (!(idiag_states & TCPF_LISTEN) || r->id.idiag_dport)
929 			goto skip_listen_ht;
930 
931 		for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
932 			struct inet_listen_hashbucket *ilb;
933 			struct hlist_nulls_node *node;
934 
935 			num = 0;
936 			ilb = &hashinfo->listening_hash[i];
937 			spin_lock(&ilb->lock);
938 			sk_nulls_for_each(sk, node, &ilb->nulls_head) {
939 				struct inet_sock *inet = inet_sk(sk);
940 
941 				if (!net_eq(sock_net(sk), net))
942 					continue;
943 
944 				if (num < s_num) {
945 					num++;
946 					continue;
947 				}
948 
949 				if (r->sdiag_family != AF_UNSPEC &&
950 				    sk->sk_family != r->sdiag_family)
951 					goto next_listen;
952 
953 				if (r->id.idiag_sport != inet->inet_sport &&
954 				    r->id.idiag_sport)
955 					goto next_listen;
956 
957 				if (!inet_diag_bc_sk(bc, sk))
958 					goto next_listen;
959 
960 				if (inet_sk_diag_fill(sk, inet_csk(sk), skb,
961 						      cb, r, NLM_F_MULTI,
962 						      net_admin) < 0) {
963 					spin_unlock(&ilb->lock);
964 					goto done;
965 				}
966 
967 next_listen:
968 				++num;
969 			}
970 			spin_unlock(&ilb->lock);
971 
972 			s_num = 0;
973 		}
974 skip_listen_ht:
975 		cb->args[0] = 1;
976 		s_i = num = s_num = 0;
977 	}
978 
979 	if (!(idiag_states & ~TCPF_LISTEN))
980 		goto out;
981 
982 #define SKARR_SZ 16
983 	for (i = s_i; i <= hashinfo->ehash_mask; i++) {
984 		struct inet_ehash_bucket *head = &hashinfo->ehash[i];
985 		spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
986 		struct hlist_nulls_node *node;
987 		struct sock *sk_arr[SKARR_SZ];
988 		int num_arr[SKARR_SZ];
989 		int idx, accum, res;
990 
991 		if (hlist_nulls_empty(&head->chain))
992 			continue;
993 
994 		if (i > s_i)
995 			s_num = 0;
996 
997 next_chunk:
998 		num = 0;
999 		accum = 0;
1000 		spin_lock_bh(lock);
1001 		sk_nulls_for_each(sk, node, &head->chain) {
1002 			int state;
1003 
1004 			if (!net_eq(sock_net(sk), net))
1005 				continue;
1006 			if (num < s_num)
1007 				goto next_normal;
1008 			state = (sk->sk_state == TCP_TIME_WAIT) ?
1009 				inet_twsk(sk)->tw_substate : sk->sk_state;
1010 			if (!(idiag_states & (1 << state)))
1011 				goto next_normal;
1012 			if (r->sdiag_family != AF_UNSPEC &&
1013 			    sk->sk_family != r->sdiag_family)
1014 				goto next_normal;
1015 			if (r->id.idiag_sport != htons(sk->sk_num) &&
1016 			    r->id.idiag_sport)
1017 				goto next_normal;
1018 			if (r->id.idiag_dport != sk->sk_dport &&
1019 			    r->id.idiag_dport)
1020 				goto next_normal;
1021 			twsk_build_assert();
1022 
1023 			if (!inet_diag_bc_sk(bc, sk))
1024 				goto next_normal;
1025 
1026 			if (!refcount_inc_not_zero(&sk->sk_refcnt))
1027 				goto next_normal;
1028 
1029 			num_arr[accum] = num;
1030 			sk_arr[accum] = sk;
1031 			if (++accum == SKARR_SZ)
1032 				break;
1033 next_normal:
1034 			++num;
1035 		}
1036 		spin_unlock_bh(lock);
1037 		res = 0;
1038 		for (idx = 0; idx < accum; idx++) {
1039 			if (res >= 0) {
1040 				res = sk_diag_fill(sk_arr[idx], skb, cb, r,
1041 						   NLM_F_MULTI, net_admin);
1042 				if (res < 0)
1043 					num = num_arr[idx];
1044 			}
1045 			sock_gen_put(sk_arr[idx]);
1046 		}
1047 		if (res < 0)
1048 			break;
1049 		cond_resched();
1050 		if (accum == SKARR_SZ) {
1051 			s_num = num + 1;
1052 			goto next_chunk;
1053 		}
1054 	}
1055 
1056 done:
1057 	cb->args[1] = i;
1058 	cb->args[2] = num;
1059 out:
1060 	;
1061 }
1062 EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
1063 
1064 static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
1065 			    const struct inet_diag_req_v2 *r)
1066 {
1067 	const struct inet_diag_handler *handler;
1068 	u32 prev_min_dump_alloc;
1069 	int err = 0;
1070 
1071 again:
1072 	prev_min_dump_alloc = cb->min_dump_alloc;
1073 	handler = inet_diag_lock_handler(r->sdiag_protocol);
1074 	if (!IS_ERR(handler))
1075 		handler->dump(skb, cb, r);
1076 	else
1077 		err = PTR_ERR(handler);
1078 	inet_diag_unlock_handler(handler);
1079 
1080 	/* The skb is not large enough to fit one sk info and
1081 	 * inet_sk_diag_fill() has requested for a larger skb.
1082 	 */
1083 	if (!skb->len && cb->min_dump_alloc > prev_min_dump_alloc) {
1084 		err = pskb_expand_head(skb, 0, cb->min_dump_alloc, GFP_KERNEL);
1085 		if (!err)
1086 			goto again;
1087 	}
1088 
1089 	return err ? : skb->len;
1090 }
1091 
1092 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
1093 {
1094 	return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh));
1095 }
1096 
1097 static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen)
1098 {
1099 	const struct nlmsghdr *nlh = cb->nlh;
1100 	struct inet_diag_dump_data *cb_data;
1101 	struct sk_buff *skb = cb->skb;
1102 	struct nlattr *nla;
1103 	int rem, err;
1104 
1105 	cb_data = kzalloc(sizeof(*cb_data), GFP_KERNEL);
1106 	if (!cb_data)
1107 		return -ENOMEM;
1108 
1109 	nla_for_each_attr(nla, nlmsg_attrdata(nlh, hdrlen),
1110 			  nlmsg_attrlen(nlh, hdrlen), rem) {
1111 		int type = nla_type(nla);
1112 
1113 		if (type < __INET_DIAG_REQ_MAX)
1114 			cb_data->req_nlas[type] = nla;
1115 	}
1116 
1117 	nla = cb_data->inet_diag_nla_bc;
1118 	if (nla) {
1119 		err = inet_diag_bc_audit(nla, skb);
1120 		if (err) {
1121 			kfree(cb_data);
1122 			return err;
1123 		}
1124 	}
1125 
1126 	nla = cb_data->inet_diag_nla_bpf_stgs;
1127 	if (nla) {
1128 		struct bpf_sk_storage_diag *bpf_stg_diag;
1129 
1130 		bpf_stg_diag = bpf_sk_storage_diag_alloc(nla);
1131 		if (IS_ERR(bpf_stg_diag)) {
1132 			kfree(cb_data);
1133 			return PTR_ERR(bpf_stg_diag);
1134 		}
1135 		cb_data->bpf_stg_diag = bpf_stg_diag;
1136 	}
1137 
1138 	cb->data = cb_data;
1139 	return 0;
1140 }
1141 
1142 static int inet_diag_dump_start(struct netlink_callback *cb)
1143 {
1144 	return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req_v2));
1145 }
1146 
1147 static int inet_diag_dump_start_compat(struct netlink_callback *cb)
1148 {
1149 	return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req));
1150 }
1151 
1152 static int inet_diag_dump_done(struct netlink_callback *cb)
1153 {
1154 	struct inet_diag_dump_data *cb_data = cb->data;
1155 
1156 	bpf_sk_storage_diag_free(cb_data->bpf_stg_diag);
1157 	kfree(cb->data);
1158 
1159 	return 0;
1160 }
1161 
1162 static int inet_diag_type2proto(int type)
1163 {
1164 	switch (type) {
1165 	case TCPDIAG_GETSOCK:
1166 		return IPPROTO_TCP;
1167 	case DCCPDIAG_GETSOCK:
1168 		return IPPROTO_DCCP;
1169 	default:
1170 		return 0;
1171 	}
1172 }
1173 
1174 static int inet_diag_dump_compat(struct sk_buff *skb,
1175 				 struct netlink_callback *cb)
1176 {
1177 	struct inet_diag_req *rc = nlmsg_data(cb->nlh);
1178 	struct inet_diag_req_v2 req;
1179 
1180 	req.sdiag_family = AF_UNSPEC; /* compatibility */
1181 	req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
1182 	req.idiag_ext = rc->idiag_ext;
1183 	req.idiag_states = rc->idiag_states;
1184 	req.id = rc->id;
1185 
1186 	return __inet_diag_dump(skb, cb, &req);
1187 }
1188 
1189 static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
1190 				      const struct nlmsghdr *nlh)
1191 {
1192 	struct inet_diag_req *rc = nlmsg_data(nlh);
1193 	struct inet_diag_req_v2 req;
1194 
1195 	req.sdiag_family = rc->idiag_family;
1196 	req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
1197 	req.idiag_ext = rc->idiag_ext;
1198 	req.idiag_states = rc->idiag_states;
1199 	req.id = rc->id;
1200 
1201 	return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh, &req);
1202 }
1203 
1204 static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
1205 {
1206 	int hdrlen = sizeof(struct inet_diag_req);
1207 	struct net *net = sock_net(skb->sk);
1208 
1209 	if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
1210 	    nlmsg_len(nlh) < hdrlen)
1211 		return -EINVAL;
1212 
1213 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1214 		struct netlink_dump_control c = {
1215 			.start = inet_diag_dump_start_compat,
1216 			.done = inet_diag_dump_done,
1217 			.dump = inet_diag_dump_compat,
1218 		};
1219 		return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
1220 	}
1221 
1222 	return inet_diag_get_exact_compat(skb, nlh);
1223 }
1224 
1225 static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h)
1226 {
1227 	int hdrlen = sizeof(struct inet_diag_req_v2);
1228 	struct net *net = sock_net(skb->sk);
1229 
1230 	if (nlmsg_len(h) < hdrlen)
1231 		return -EINVAL;
1232 
1233 	if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
1234 	    h->nlmsg_flags & NLM_F_DUMP) {
1235 		struct netlink_dump_control c = {
1236 			.start = inet_diag_dump_start,
1237 			.done = inet_diag_dump_done,
1238 			.dump = inet_diag_dump,
1239 		};
1240 		return netlink_dump_start(net->diag_nlsk, skb, h, &c);
1241 	}
1242 
1243 	return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h));
1244 }
1245 
1246 static
1247 int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
1248 {
1249 	const struct inet_diag_handler *handler;
1250 	struct nlmsghdr *nlh;
1251 	struct nlattr *attr;
1252 	struct inet_diag_msg *r;
1253 	void *info = NULL;
1254 	int err = 0;
1255 
1256 	nlh = nlmsg_put(skb, 0, 0, SOCK_DIAG_BY_FAMILY, sizeof(*r), 0);
1257 	if (!nlh)
1258 		return -ENOMEM;
1259 
1260 	r = nlmsg_data(nlh);
1261 	memset(r, 0, sizeof(*r));
1262 	inet_diag_msg_common_fill(r, sk);
1263 	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_STREAM)
1264 		r->id.idiag_sport = inet_sk(sk)->inet_sport;
1265 	r->idiag_state = sk->sk_state;
1266 
1267 	if ((err = nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))) {
1268 		nlmsg_cancel(skb, nlh);
1269 		return err;
1270 	}
1271 
1272 	handler = inet_diag_lock_handler(sk->sk_protocol);
1273 	if (IS_ERR(handler)) {
1274 		inet_diag_unlock_handler(handler);
1275 		nlmsg_cancel(skb, nlh);
1276 		return PTR_ERR(handler);
1277 	}
1278 
1279 	attr = handler->idiag_info_size
1280 		? nla_reserve_64bit(skb, INET_DIAG_INFO,
1281 				    handler->idiag_info_size,
1282 				    INET_DIAG_PAD)
1283 		: NULL;
1284 	if (attr)
1285 		info = nla_data(attr);
1286 
1287 	handler->idiag_get_info(sk, r, info);
1288 	inet_diag_unlock_handler(handler);
1289 
1290 	nlmsg_end(skb, nlh);
1291 	return 0;
1292 }
1293 
1294 static const struct sock_diag_handler inet_diag_handler = {
1295 	.family = AF_INET,
1296 	.dump = inet_diag_handler_cmd,
1297 	.get_info = inet_diag_handler_get_info,
1298 	.destroy = inet_diag_handler_cmd,
1299 };
1300 
1301 static const struct sock_diag_handler inet6_diag_handler = {
1302 	.family = AF_INET6,
1303 	.dump = inet_diag_handler_cmd,
1304 	.get_info = inet_diag_handler_get_info,
1305 	.destroy = inet_diag_handler_cmd,
1306 };
1307 
1308 int inet_diag_register(const struct inet_diag_handler *h)
1309 {
1310 	const __u16 type = h->idiag_type;
1311 	int err = -EINVAL;
1312 
1313 	if (type >= IPPROTO_MAX)
1314 		goto out;
1315 
1316 	mutex_lock(&inet_diag_table_mutex);
1317 	err = -EEXIST;
1318 	if (!inet_diag_table[type]) {
1319 		inet_diag_table[type] = h;
1320 		err = 0;
1321 	}
1322 	mutex_unlock(&inet_diag_table_mutex);
1323 out:
1324 	return err;
1325 }
1326 EXPORT_SYMBOL_GPL(inet_diag_register);
1327 
1328 void inet_diag_unregister(const struct inet_diag_handler *h)
1329 {
1330 	const __u16 type = h->idiag_type;
1331 
1332 	if (type >= IPPROTO_MAX)
1333 		return;
1334 
1335 	mutex_lock(&inet_diag_table_mutex);
1336 	inet_diag_table[type] = NULL;
1337 	mutex_unlock(&inet_diag_table_mutex);
1338 }
1339 EXPORT_SYMBOL_GPL(inet_diag_unregister);
1340 
1341 static int __init inet_diag_init(void)
1342 {
1343 	const int inet_diag_table_size = (IPPROTO_MAX *
1344 					  sizeof(struct inet_diag_handler *));
1345 	int err = -ENOMEM;
1346 
1347 	inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
1348 	if (!inet_diag_table)
1349 		goto out;
1350 
1351 	err = sock_diag_register(&inet_diag_handler);
1352 	if (err)
1353 		goto out_free_nl;
1354 
1355 	err = sock_diag_register(&inet6_diag_handler);
1356 	if (err)
1357 		goto out_free_inet;
1358 
1359 	sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
1360 out:
1361 	return err;
1362 
1363 out_free_inet:
1364 	sock_diag_unregister(&inet_diag_handler);
1365 out_free_nl:
1366 	kfree(inet_diag_table);
1367 	goto out;
1368 }
1369 
1370 static void __exit inet_diag_exit(void)
1371 {
1372 	sock_diag_unregister(&inet6_diag_handler);
1373 	sock_diag_unregister(&inet_diag_handler);
1374 	sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
1375 	kfree(inet_diag_table);
1376 }
1377 
1378 module_init(inet_diag_init);
1379 module_exit(inet_diag_exit);
1380 MODULE_LICENSE("GPL");
1381 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
1382 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);
1383