1 /* Connection tracking via netlink socket. Allows for user space
2  * protocol helpers and general trouble making from userspace.
3  *
4  * (C) 2001 by Jay Schulist <jschlst@samba.org>
5  * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6  * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7  * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
8  *
9  * Initial connection tracking via netlink development funded and
10  * generally made possible by Network Robots, Inc. (www.networkrobots.com)
11  *
12  * Further development of this code funded by Astaro AG (http://www.astaro.com)
13  *
14  * This software may be used and distributed according to the terms
15  * of the GNU General Public License, incorporated herein by reference.
16  */
17 
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
32 #include <linux/siphash.h>
33 
34 #include <linux/netfilter.h>
35 #include <net/netlink.h>
36 #include <net/sock.h>
37 #include <net/netfilter/nf_conntrack.h>
38 #include <net/netfilter/nf_conntrack_core.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_seqadj.h>
42 #include <net/netfilter/nf_conntrack_l4proto.h>
43 #include <net/netfilter/nf_conntrack_tuple.h>
44 #include <net/netfilter/nf_conntrack_acct.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_conntrack_timestamp.h>
47 #include <net/netfilter/nf_conntrack_labels.h>
48 #include <net/netfilter/nf_conntrack_synproxy.h>
49 #if IS_ENABLED(CONFIG_NF_NAT)
50 #include <net/netfilter/nf_nat.h>
51 #include <net/netfilter/nf_nat_helper.h>
52 #endif
53 
54 #include <linux/netfilter/nfnetlink.h>
55 #include <linux/netfilter/nfnetlink_conntrack.h>
56 
57 #include "nf_internals.h"
58 
59 MODULE_LICENSE("GPL");
60 
61 static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
62 				const struct nf_conntrack_tuple *tuple,
63 				const struct nf_conntrack_l4proto *l4proto)
64 {
65 	int ret = 0;
66 	struct nlattr *nest_parms;
67 
68 	nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO);
69 	if (!nest_parms)
70 		goto nla_put_failure;
71 	if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
72 		goto nla_put_failure;
73 
74 	if (likely(l4proto->tuple_to_nlattr))
75 		ret = l4proto->tuple_to_nlattr(skb, tuple);
76 
77 	nla_nest_end(skb, nest_parms);
78 
79 	return ret;
80 
81 nla_put_failure:
82 	return -1;
83 }
84 
85 static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
86 				const struct nf_conntrack_tuple *tuple)
87 {
88 	if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
89 	    nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
90 		return -EMSGSIZE;
91 	return 0;
92 }
93 
94 static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
95 				const struct nf_conntrack_tuple *tuple)
96 {
97 	if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) ||
98 	    nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6))
99 		return -EMSGSIZE;
100 	return 0;
101 }
102 
103 static int ctnetlink_dump_tuples_ip(struct sk_buff *skb,
104 				    const struct nf_conntrack_tuple *tuple)
105 {
106 	int ret = 0;
107 	struct nlattr *nest_parms;
108 
109 	nest_parms = nla_nest_start(skb, CTA_TUPLE_IP);
110 	if (!nest_parms)
111 		goto nla_put_failure;
112 
113 	switch (tuple->src.l3num) {
114 	case NFPROTO_IPV4:
115 		ret = ipv4_tuple_to_nlattr(skb, tuple);
116 		break;
117 	case NFPROTO_IPV6:
118 		ret = ipv6_tuple_to_nlattr(skb, tuple);
119 		break;
120 	}
121 
122 	nla_nest_end(skb, nest_parms);
123 
124 	return ret;
125 
126 nla_put_failure:
127 	return -1;
128 }
129 
130 static int ctnetlink_dump_tuples(struct sk_buff *skb,
131 				 const struct nf_conntrack_tuple *tuple)
132 {
133 	const struct nf_conntrack_l4proto *l4proto;
134 	int ret;
135 
136 	rcu_read_lock();
137 	ret = ctnetlink_dump_tuples_ip(skb, tuple);
138 
139 	if (ret >= 0) {
140 		l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
141 		ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
142 	}
143 	rcu_read_unlock();
144 	return ret;
145 }
146 
147 static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
148 				  const struct nf_conntrack_zone *zone, int dir)
149 {
150 	if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
151 		return 0;
152 	if (nla_put_be16(skb, attrtype, htons(zone->id)))
153 		goto nla_put_failure;
154 	return 0;
155 
156 nla_put_failure:
157 	return -1;
158 }
159 
160 static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
161 {
162 	if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
163 		goto nla_put_failure;
164 	return 0;
165 
166 nla_put_failure:
167 	return -1;
168 }
169 
170 static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct,
171 				  bool skip_zero)
172 {
173 	long timeout = nf_ct_expires(ct) / HZ;
174 
175 	if (skip_zero && timeout == 0)
176 		return 0;
177 
178 	if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
179 		goto nla_put_failure;
180 	return 0;
181 
182 nla_put_failure:
183 	return -1;
184 }
185 
186 static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct,
187 				    bool destroy)
188 {
189 	const struct nf_conntrack_l4proto *l4proto;
190 	struct nlattr *nest_proto;
191 	int ret;
192 
193 	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
194 	if (!l4proto->to_nlattr)
195 		return 0;
196 
197 	nest_proto = nla_nest_start(skb, CTA_PROTOINFO);
198 	if (!nest_proto)
199 		goto nla_put_failure;
200 
201 	ret = l4proto->to_nlattr(skb, nest_proto, ct, destroy);
202 
203 	nla_nest_end(skb, nest_proto);
204 
205 	return ret;
206 
207 nla_put_failure:
208 	return -1;
209 }
210 
211 static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
212 				   const struct nf_conn *ct)
213 {
214 	struct nlattr *nest_helper;
215 	const struct nf_conn_help *help = nfct_help(ct);
216 	struct nf_conntrack_helper *helper;
217 
218 	if (!help)
219 		return 0;
220 
221 	helper = rcu_dereference(help->helper);
222 	if (!helper)
223 		goto out;
224 
225 	nest_helper = nla_nest_start(skb, CTA_HELP);
226 	if (!nest_helper)
227 		goto nla_put_failure;
228 	if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
229 		goto nla_put_failure;
230 
231 	if (helper->to_nlattr)
232 		helper->to_nlattr(skb, ct);
233 
234 	nla_nest_end(skb, nest_helper);
235 out:
236 	return 0;
237 
238 nla_put_failure:
239 	return -1;
240 }
241 
242 static int
243 dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct,
244 	      enum ip_conntrack_dir dir, int type)
245 {
246 	enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
247 	struct nf_conn_counter *counter = acct->counter;
248 	struct nlattr *nest_count;
249 	u64 pkts, bytes;
250 
251 	if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
252 		pkts = atomic64_xchg(&counter[dir].packets, 0);
253 		bytes = atomic64_xchg(&counter[dir].bytes, 0);
254 	} else {
255 		pkts = atomic64_read(&counter[dir].packets);
256 		bytes = atomic64_read(&counter[dir].bytes);
257 	}
258 
259 	nest_count = nla_nest_start(skb, attr);
260 	if (!nest_count)
261 		goto nla_put_failure;
262 
263 	if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts),
264 			 CTA_COUNTERS_PAD) ||
265 	    nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes),
266 			 CTA_COUNTERS_PAD))
267 		goto nla_put_failure;
268 
269 	nla_nest_end(skb, nest_count);
270 
271 	return 0;
272 
273 nla_put_failure:
274 	return -1;
275 }
276 
277 static int
278 ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type)
279 {
280 	struct nf_conn_acct *acct = nf_conn_acct_find(ct);
281 
282 	if (!acct)
283 		return 0;
284 
285 	if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0)
286 		return -1;
287 	if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0)
288 		return -1;
289 
290 	return 0;
291 }
292 
293 static int
294 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
295 {
296 	struct nlattr *nest_count;
297 	const struct nf_conn_tstamp *tstamp;
298 
299 	tstamp = nf_conn_tstamp_find(ct);
300 	if (!tstamp)
301 		return 0;
302 
303 	nest_count = nla_nest_start(skb, CTA_TIMESTAMP);
304 	if (!nest_count)
305 		goto nla_put_failure;
306 
307 	if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start),
308 			 CTA_TIMESTAMP_PAD) ||
309 	    (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
310 					       cpu_to_be64(tstamp->stop),
311 					       CTA_TIMESTAMP_PAD)))
312 		goto nla_put_failure;
313 	nla_nest_end(skb, nest_count);
314 
315 	return 0;
316 
317 nla_put_failure:
318 	return -1;
319 }
320 
321 #ifdef CONFIG_NF_CONNTRACK_MARK
322 static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
323 {
324 	if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
325 		goto nla_put_failure;
326 	return 0;
327 
328 nla_put_failure:
329 	return -1;
330 }
331 #else
332 #define ctnetlink_dump_mark(a, b) (0)
333 #endif
334 
335 #ifdef CONFIG_NF_CONNTRACK_SECMARK
336 static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
337 {
338 	struct nlattr *nest_secctx;
339 	int len, ret;
340 	char *secctx;
341 
342 	ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
343 	if (ret)
344 		return 0;
345 
346 	ret = -1;
347 	nest_secctx = nla_nest_start(skb, CTA_SECCTX);
348 	if (!nest_secctx)
349 		goto nla_put_failure;
350 
351 	if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
352 		goto nla_put_failure;
353 	nla_nest_end(skb, nest_secctx);
354 
355 	ret = 0;
356 nla_put_failure:
357 	security_release_secctx(secctx, len);
358 	return ret;
359 }
360 #else
361 #define ctnetlink_dump_secctx(a, b) (0)
362 #endif
363 
364 #ifdef CONFIG_NF_CONNTRACK_LABELS
365 static inline int ctnetlink_label_size(const struct nf_conn *ct)
366 {
367 	struct nf_conn_labels *labels = nf_ct_labels_find(ct);
368 
369 	if (!labels)
370 		return 0;
371 	return nla_total_size(sizeof(labels->bits));
372 }
373 
374 static int
375 ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
376 {
377 	struct nf_conn_labels *labels = nf_ct_labels_find(ct);
378 	unsigned int i;
379 
380 	if (!labels)
381 		return 0;
382 
383 	i = 0;
384 	do {
385 		if (labels->bits[i] != 0)
386 			return nla_put(skb, CTA_LABELS, sizeof(labels->bits),
387 				       labels->bits);
388 		i++;
389 	} while (i < ARRAY_SIZE(labels->bits));
390 
391 	return 0;
392 }
393 #else
394 #define ctnetlink_dump_labels(a, b) (0)
395 #define ctnetlink_label_size(a)	(0)
396 #endif
397 
398 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
399 
400 static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
401 {
402 	struct nlattr *nest_parms;
403 
404 	if (!(ct->status & IPS_EXPECTED))
405 		return 0;
406 
407 	nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER);
408 	if (!nest_parms)
409 		goto nla_put_failure;
410 	if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
411 		goto nla_put_failure;
412 	nla_nest_end(skb, nest_parms);
413 
414 	return 0;
415 
416 nla_put_failure:
417 	return -1;
418 }
419 
420 static int
421 dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type)
422 {
423 	struct nlattr *nest_parms;
424 
425 	nest_parms = nla_nest_start(skb, type);
426 	if (!nest_parms)
427 		goto nla_put_failure;
428 
429 	if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS,
430 			 htonl(seq->correction_pos)) ||
431 	    nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE,
432 			 htonl(seq->offset_before)) ||
433 	    nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER,
434 			 htonl(seq->offset_after)))
435 		goto nla_put_failure;
436 
437 	nla_nest_end(skb, nest_parms);
438 
439 	return 0;
440 
441 nla_put_failure:
442 	return -1;
443 }
444 
445 static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct)
446 {
447 	struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
448 	struct nf_ct_seqadj *seq;
449 
450 	if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj)
451 		return 0;
452 
453 	spin_lock_bh(&ct->lock);
454 	seq = &seqadj->seq[IP_CT_DIR_ORIGINAL];
455 	if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1)
456 		goto err;
457 
458 	seq = &seqadj->seq[IP_CT_DIR_REPLY];
459 	if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1)
460 		goto err;
461 
462 	spin_unlock_bh(&ct->lock);
463 	return 0;
464 err:
465 	spin_unlock_bh(&ct->lock);
466 	return -1;
467 }
468 
469 static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct)
470 {
471 	struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
472 	struct nlattr *nest_parms;
473 
474 	if (!synproxy)
475 		return 0;
476 
477 	nest_parms = nla_nest_start(skb, CTA_SYNPROXY);
478 	if (!nest_parms)
479 		goto nla_put_failure;
480 
481 	if (nla_put_be32(skb, CTA_SYNPROXY_ISN, htonl(synproxy->isn)) ||
482 	    nla_put_be32(skb, CTA_SYNPROXY_ITS, htonl(synproxy->its)) ||
483 	    nla_put_be32(skb, CTA_SYNPROXY_TSOFF, htonl(synproxy->tsoff)))
484 		goto nla_put_failure;
485 
486 	nla_nest_end(skb, nest_parms);
487 
488 	return 0;
489 
490 nla_put_failure:
491 	return -1;
492 }
493 
494 static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
495 {
496 	__be32 id = (__force __be32)nf_ct_get_id(ct);
497 
498 	if (nla_put_be32(skb, CTA_ID, id))
499 		goto nla_put_failure;
500 	return 0;
501 
502 nla_put_failure:
503 	return -1;
504 }
505 
506 static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
507 {
508 	if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
509 		goto nla_put_failure;
510 	return 0;
511 
512 nla_put_failure:
513 	return -1;
514 }
515 
516 /* all these functions access ct->ext. Caller must either hold a reference
517  * on ct or prevent its deletion by holding either the bucket spinlock or
518  * pcpu dying list lock.
519  */
520 static int ctnetlink_dump_extinfo(struct sk_buff *skb,
521 				  struct nf_conn *ct, u32 type)
522 {
523 	if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
524 	    ctnetlink_dump_timestamp(skb, ct) < 0 ||
525 	    ctnetlink_dump_helpinfo(skb, ct) < 0 ||
526 	    ctnetlink_dump_labels(skb, ct) < 0 ||
527 	    ctnetlink_dump_ct_seq_adj(skb, ct) < 0 ||
528 	    ctnetlink_dump_ct_synproxy(skb, ct) < 0)
529 		return -1;
530 
531 	return 0;
532 }
533 
534 static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
535 {
536 	if (ctnetlink_dump_status(skb, ct) < 0 ||
537 	    ctnetlink_dump_mark(skb, ct) < 0 ||
538 	    ctnetlink_dump_secctx(skb, ct) < 0 ||
539 	    ctnetlink_dump_id(skb, ct) < 0 ||
540 	    ctnetlink_dump_use(skb, ct) < 0 ||
541 	    ctnetlink_dump_master(skb, ct) < 0)
542 		return -1;
543 
544 	if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
545 	    (ctnetlink_dump_timeout(skb, ct, false) < 0 ||
546 	     ctnetlink_dump_protoinfo(skb, ct, false) < 0))
547 		return -1;
548 
549 	return 0;
550 }
551 
552 static int
553 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
554 		    struct nf_conn *ct, bool extinfo, unsigned int flags)
555 {
556 	const struct nf_conntrack_zone *zone;
557 	struct nlmsghdr *nlh;
558 	struct nfgenmsg *nfmsg;
559 	struct nlattr *nest_parms;
560 	unsigned int event;
561 
562 	if (portid)
563 		flags |= NLM_F_MULTI;
564 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW);
565 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
566 	if (nlh == NULL)
567 		goto nlmsg_failure;
568 
569 	nfmsg = nlmsg_data(nlh);
570 	nfmsg->nfgen_family = nf_ct_l3num(ct);
571 	nfmsg->version      = NFNETLINK_V0;
572 	nfmsg->res_id	    = 0;
573 
574 	zone = nf_ct_zone(ct);
575 
576 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
577 	if (!nest_parms)
578 		goto nla_put_failure;
579 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
580 		goto nla_put_failure;
581 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
582 				   NF_CT_ZONE_DIR_ORIG) < 0)
583 		goto nla_put_failure;
584 	nla_nest_end(skb, nest_parms);
585 
586 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
587 	if (!nest_parms)
588 		goto nla_put_failure;
589 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
590 		goto nla_put_failure;
591 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
592 				   NF_CT_ZONE_DIR_REPL) < 0)
593 		goto nla_put_failure;
594 	nla_nest_end(skb, nest_parms);
595 
596 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
597 				   NF_CT_DEFAULT_ZONE_DIR) < 0)
598 		goto nla_put_failure;
599 
600 	if (ctnetlink_dump_info(skb, ct) < 0)
601 		goto nla_put_failure;
602 	if (extinfo && ctnetlink_dump_extinfo(skb, ct, type) < 0)
603 		goto nla_put_failure;
604 
605 	nlmsg_end(skb, nlh);
606 	return skb->len;
607 
608 nlmsg_failure:
609 nla_put_failure:
610 	nlmsg_cancel(skb, nlh);
611 	return -1;
612 }
613 
614 static const struct nla_policy cta_ip_nla_policy[CTA_IP_MAX + 1] = {
615 	[CTA_IP_V4_SRC]	= { .type = NLA_U32 },
616 	[CTA_IP_V4_DST]	= { .type = NLA_U32 },
617 	[CTA_IP_V6_SRC]	= { .len = sizeof(__be32) * 4 },
618 	[CTA_IP_V6_DST]	= { .len = sizeof(__be32) * 4 },
619 };
620 
621 #if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS)
622 static size_t ctnetlink_proto_size(const struct nf_conn *ct)
623 {
624 	const struct nf_conntrack_l4proto *l4proto;
625 	size_t len, len4 = 0;
626 
627 	len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1);
628 	len *= 3u; /* ORIG, REPLY, MASTER */
629 
630 	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
631 	len += l4proto->nlattr_size;
632 	if (l4proto->nlattr_tuple_size) {
633 		len4 = l4proto->nlattr_tuple_size();
634 		len4 *= 3u; /* ORIG, REPLY, MASTER */
635 	}
636 
637 	return len + len4;
638 }
639 #endif
640 
641 static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
642 {
643 	if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
644 		return 0;
645 	return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
646 	       + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
647 	       + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
648 	       ;
649 }
650 
651 static inline int ctnetlink_secctx_size(const struct nf_conn *ct)
652 {
653 #ifdef CONFIG_NF_CONNTRACK_SECMARK
654 	int len, ret;
655 
656 	ret = security_secid_to_secctx(ct->secmark, NULL, &len);
657 	if (ret)
658 		return 0;
659 
660 	return nla_total_size(0) /* CTA_SECCTX */
661 	       + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
662 #else
663 	return 0;
664 #endif
665 }
666 
667 static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct)
668 {
669 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
670 	if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
671 		return 0;
672 	return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t));
673 #else
674 	return 0;
675 #endif
676 }
677 
678 #ifdef CONFIG_NF_CONNTRACK_EVENTS
679 static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
680 {
681 	return NLMSG_ALIGN(sizeof(struct nfgenmsg))
682 	       + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
683 	       + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
684 	       + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
685 	       + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
686 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
687 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
688 	       + ctnetlink_acct_size(ct)
689 	       + ctnetlink_timestamp_size(ct)
690 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
691 	       + nla_total_size(0) /* CTA_PROTOINFO */
692 	       + nla_total_size(0) /* CTA_HELP */
693 	       + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
694 	       + ctnetlink_secctx_size(ct)
695 #if IS_ENABLED(CONFIG_NF_NAT)
696 	       + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
697 	       + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
698 #endif
699 #ifdef CONFIG_NF_CONNTRACK_MARK
700 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
701 #endif
702 #ifdef CONFIG_NF_CONNTRACK_ZONES
703 	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
704 #endif
705 	       + ctnetlink_proto_size(ct)
706 	       + ctnetlink_label_size(ct)
707 	       ;
708 }
709 
710 static int
711 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
712 {
713 	const struct nf_conntrack_zone *zone;
714 	struct net *net;
715 	struct nlmsghdr *nlh;
716 	struct nfgenmsg *nfmsg;
717 	struct nlattr *nest_parms;
718 	struct nf_conn *ct = item->ct;
719 	struct sk_buff *skb;
720 	unsigned int type;
721 	unsigned int flags = 0, group;
722 	int err;
723 
724 	if (events & (1 << IPCT_DESTROY)) {
725 		type = IPCTNL_MSG_CT_DELETE;
726 		group = NFNLGRP_CONNTRACK_DESTROY;
727 	} else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
728 		type = IPCTNL_MSG_CT_NEW;
729 		flags = NLM_F_CREATE|NLM_F_EXCL;
730 		group = NFNLGRP_CONNTRACK_NEW;
731 	} else if (events) {
732 		type = IPCTNL_MSG_CT_NEW;
733 		group = NFNLGRP_CONNTRACK_UPDATE;
734 	} else
735 		return 0;
736 
737 	net = nf_ct_net(ct);
738 	if (!item->report && !nfnetlink_has_listeners(net, group))
739 		return 0;
740 
741 	skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
742 	if (skb == NULL)
743 		goto errout;
744 
745 	type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, type);
746 	nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
747 	if (nlh == NULL)
748 		goto nlmsg_failure;
749 
750 	nfmsg = nlmsg_data(nlh);
751 	nfmsg->nfgen_family = nf_ct_l3num(ct);
752 	nfmsg->version	= NFNETLINK_V0;
753 	nfmsg->res_id	= 0;
754 
755 	zone = nf_ct_zone(ct);
756 
757 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
758 	if (!nest_parms)
759 		goto nla_put_failure;
760 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
761 		goto nla_put_failure;
762 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
763 				   NF_CT_ZONE_DIR_ORIG) < 0)
764 		goto nla_put_failure;
765 	nla_nest_end(skb, nest_parms);
766 
767 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
768 	if (!nest_parms)
769 		goto nla_put_failure;
770 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
771 		goto nla_put_failure;
772 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
773 				   NF_CT_ZONE_DIR_REPL) < 0)
774 		goto nla_put_failure;
775 	nla_nest_end(skb, nest_parms);
776 
777 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
778 				   NF_CT_DEFAULT_ZONE_DIR) < 0)
779 		goto nla_put_failure;
780 
781 	if (ctnetlink_dump_id(skb, ct) < 0)
782 		goto nla_put_failure;
783 
784 	if (ctnetlink_dump_status(skb, ct) < 0)
785 		goto nla_put_failure;
786 
787 	if (events & (1 << IPCT_DESTROY)) {
788 		if (ctnetlink_dump_timeout(skb, ct, true) < 0)
789 			goto nla_put_failure;
790 
791 		if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
792 		    ctnetlink_dump_timestamp(skb, ct) < 0 ||
793 		    ctnetlink_dump_protoinfo(skb, ct, true) < 0)
794 			goto nla_put_failure;
795 	} else {
796 		if (ctnetlink_dump_timeout(skb, ct, false) < 0)
797 			goto nla_put_failure;
798 
799 		if (events & (1 << IPCT_PROTOINFO) &&
800 		    ctnetlink_dump_protoinfo(skb, ct, false) < 0)
801 			goto nla_put_failure;
802 
803 		if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
804 		    && ctnetlink_dump_helpinfo(skb, ct) < 0)
805 			goto nla_put_failure;
806 
807 #ifdef CONFIG_NF_CONNTRACK_SECMARK
808 		if ((events & (1 << IPCT_SECMARK) || ct->secmark)
809 		    && ctnetlink_dump_secctx(skb, ct) < 0)
810 			goto nla_put_failure;
811 #endif
812 		if (events & (1 << IPCT_LABEL) &&
813 		     ctnetlink_dump_labels(skb, ct) < 0)
814 			goto nla_put_failure;
815 
816 		if (events & (1 << IPCT_RELATED) &&
817 		    ctnetlink_dump_master(skb, ct) < 0)
818 			goto nla_put_failure;
819 
820 		if (events & (1 << IPCT_SEQADJ) &&
821 		    ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
822 			goto nla_put_failure;
823 
824 		if (events & (1 << IPCT_SYNPROXY) &&
825 		    ctnetlink_dump_ct_synproxy(skb, ct) < 0)
826 			goto nla_put_failure;
827 	}
828 
829 #ifdef CONFIG_NF_CONNTRACK_MARK
830 	if ((events & (1 << IPCT_MARK) || ct->mark)
831 	    && ctnetlink_dump_mark(skb, ct) < 0)
832 		goto nla_put_failure;
833 #endif
834 	nlmsg_end(skb, nlh);
835 	err = nfnetlink_send(skb, net, item->portid, group, item->report,
836 			     GFP_ATOMIC);
837 	if (err == -ENOBUFS || err == -EAGAIN)
838 		return -ENOBUFS;
839 
840 	return 0;
841 
842 nla_put_failure:
843 	nlmsg_cancel(skb, nlh);
844 nlmsg_failure:
845 	kfree_skb(skb);
846 errout:
847 	if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
848 		return -ENOBUFS;
849 
850 	return 0;
851 }
852 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
853 
854 static int ctnetlink_done(struct netlink_callback *cb)
855 {
856 	if (cb->args[1])
857 		nf_ct_put((struct nf_conn *)cb->args[1]);
858 	kfree(cb->data);
859 	return 0;
860 }
861 
862 struct ctnetlink_filter {
863 	u8 family;
864 
865 	u_int32_t orig_flags;
866 	u_int32_t reply_flags;
867 
868 	struct nf_conntrack_tuple orig;
869 	struct nf_conntrack_tuple reply;
870 	struct nf_conntrack_zone zone;
871 
872 	struct {
873 		u_int32_t val;
874 		u_int32_t mask;
875 	} mark;
876 };
877 
878 static const struct nla_policy cta_filter_nla_policy[CTA_FILTER_MAX + 1] = {
879 	[CTA_FILTER_ORIG_FLAGS]		= { .type = NLA_U32 },
880 	[CTA_FILTER_REPLY_FLAGS]	= { .type = NLA_U32 },
881 };
882 
883 static int ctnetlink_parse_filter(const struct nlattr *attr,
884 				  struct ctnetlink_filter *filter)
885 {
886 	struct nlattr *tb[CTA_FILTER_MAX + 1];
887 	int ret = 0;
888 
889 	ret = nla_parse_nested(tb, CTA_FILTER_MAX, attr, cta_filter_nla_policy,
890 			       NULL);
891 	if (ret)
892 		return ret;
893 
894 	if (tb[CTA_FILTER_ORIG_FLAGS]) {
895 		filter->orig_flags = nla_get_u32(tb[CTA_FILTER_ORIG_FLAGS]);
896 		if (filter->orig_flags & ~CTA_FILTER_F_ALL)
897 			return -EOPNOTSUPP;
898 	}
899 
900 	if (tb[CTA_FILTER_REPLY_FLAGS]) {
901 		filter->reply_flags = nla_get_u32(tb[CTA_FILTER_REPLY_FLAGS]);
902 		if (filter->reply_flags & ~CTA_FILTER_F_ALL)
903 			return -EOPNOTSUPP;
904 	}
905 
906 	return 0;
907 }
908 
909 static int ctnetlink_parse_zone(const struct nlattr *attr,
910 				struct nf_conntrack_zone *zone);
911 static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
912 					 struct nf_conntrack_tuple *tuple,
913 					 u32 type, u_int8_t l3num,
914 					 struct nf_conntrack_zone *zone,
915 					 u_int32_t flags);
916 
917 static struct ctnetlink_filter *
918 ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
919 {
920 	struct ctnetlink_filter *filter;
921 	int err;
922 
923 #ifndef CONFIG_NF_CONNTRACK_MARK
924 	if (cda[CTA_MARK] || cda[CTA_MARK_MASK])
925 		return ERR_PTR(-EOPNOTSUPP);
926 #endif
927 
928 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
929 	if (filter == NULL)
930 		return ERR_PTR(-ENOMEM);
931 
932 	filter->family = family;
933 
934 #ifdef CONFIG_NF_CONNTRACK_MARK
935 	if (cda[CTA_MARK]) {
936 		filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
937 		if (cda[CTA_MARK_MASK])
938 			filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
939 		else
940 			filter->mark.mask = 0xffffffff;
941 	} else if (cda[CTA_MARK_MASK]) {
942 		err = -EINVAL;
943 		goto err_filter;
944 	}
945 #endif
946 	if (!cda[CTA_FILTER])
947 		return filter;
948 
949 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone);
950 	if (err < 0)
951 		goto err_filter;
952 
953 	err = ctnetlink_parse_filter(cda[CTA_FILTER], filter);
954 	if (err < 0)
955 		goto err_filter;
956 
957 	if (filter->orig_flags) {
958 		if (!cda[CTA_TUPLE_ORIG]) {
959 			err = -EINVAL;
960 			goto err_filter;
961 		}
962 
963 		err = ctnetlink_parse_tuple_filter(cda, &filter->orig,
964 						   CTA_TUPLE_ORIG,
965 						   filter->family,
966 						   &filter->zone,
967 						   filter->orig_flags);
968 		if (err < 0)
969 			goto err_filter;
970 	}
971 
972 	if (filter->reply_flags) {
973 		if (!cda[CTA_TUPLE_REPLY]) {
974 			err = -EINVAL;
975 			goto err_filter;
976 		}
977 
978 		err = ctnetlink_parse_tuple_filter(cda, &filter->reply,
979 						   CTA_TUPLE_REPLY,
980 						   filter->family,
981 						   &filter->zone,
982 						   filter->orig_flags);
983 		if (err < 0) {
984 			err = -EINVAL;
985 			goto err_filter;
986 		}
987 	}
988 
989 	return filter;
990 
991 err_filter:
992 	kfree(filter);
993 
994 	return ERR_PTR(err);
995 }
996 
997 static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda)
998 {
999 	return family || cda[CTA_MARK] || cda[CTA_FILTER];
1000 }
1001 
1002 static int ctnetlink_start(struct netlink_callback *cb)
1003 {
1004 	const struct nlattr * const *cda = cb->data;
1005 	struct ctnetlink_filter *filter = NULL;
1006 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1007 	u8 family = nfmsg->nfgen_family;
1008 
1009 	if (ctnetlink_needs_filter(family, cda)) {
1010 		filter = ctnetlink_alloc_filter(cda, family);
1011 		if (IS_ERR(filter))
1012 			return PTR_ERR(filter);
1013 	}
1014 
1015 	cb->data = filter;
1016 	return 0;
1017 }
1018 
1019 static int ctnetlink_filter_match_tuple(struct nf_conntrack_tuple *filter_tuple,
1020 					struct nf_conntrack_tuple *ct_tuple,
1021 					u_int32_t flags, int family)
1022 {
1023 	switch (family) {
1024 	case NFPROTO_IPV4:
1025 		if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) &&
1026 		    filter_tuple->src.u3.ip != ct_tuple->src.u3.ip)
1027 			return  0;
1028 
1029 		if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) &&
1030 		    filter_tuple->dst.u3.ip != ct_tuple->dst.u3.ip)
1031 			return  0;
1032 		break;
1033 	case NFPROTO_IPV6:
1034 		if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) &&
1035 		    !ipv6_addr_cmp(&filter_tuple->src.u3.in6,
1036 				   &ct_tuple->src.u3.in6))
1037 			return 0;
1038 
1039 		if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) &&
1040 		    !ipv6_addr_cmp(&filter_tuple->dst.u3.in6,
1041 				   &ct_tuple->dst.u3.in6))
1042 			return 0;
1043 		break;
1044 	}
1045 
1046 	if ((flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) &&
1047 	    filter_tuple->dst.protonum != ct_tuple->dst.protonum)
1048 		return 0;
1049 
1050 	switch (ct_tuple->dst.protonum) {
1051 	case IPPROTO_TCP:
1052 	case IPPROTO_UDP:
1053 		if ((flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) &&
1054 		    filter_tuple->src.u.tcp.port != ct_tuple->src.u.tcp.port)
1055 			return 0;
1056 
1057 		if ((flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) &&
1058 		    filter_tuple->dst.u.tcp.port != ct_tuple->dst.u.tcp.port)
1059 			return 0;
1060 		break;
1061 	case IPPROTO_ICMP:
1062 		if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) &&
1063 		    filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type)
1064 			return 0;
1065 		if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) &&
1066 		    filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code)
1067 			return 0;
1068 		if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) &&
1069 		    filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id)
1070 			return 0;
1071 		break;
1072 	case IPPROTO_ICMPV6:
1073 		if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_TYPE)) &&
1074 		    filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type)
1075 			return 0;
1076 		if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_CODE)) &&
1077 		    filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code)
1078 			return 0;
1079 		if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_ID)) &&
1080 		    filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id)
1081 			return 0;
1082 		break;
1083 	}
1084 
1085 	return 1;
1086 }
1087 
1088 static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
1089 {
1090 	struct ctnetlink_filter *filter = data;
1091 	struct nf_conntrack_tuple *tuple;
1092 
1093 	if (filter == NULL)
1094 		goto out;
1095 
1096 	/* Match entries of a given L3 protocol number.
1097 	 * If it is not specified, ie. l3proto == 0,
1098 	 * then match everything.
1099 	 */
1100 	if (filter->family && nf_ct_l3num(ct) != filter->family)
1101 		goto ignore_entry;
1102 
1103 	if (filter->orig_flags) {
1104 		tuple = nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL);
1105 		if (!ctnetlink_filter_match_tuple(&filter->orig, tuple,
1106 						  filter->orig_flags,
1107 						  filter->family))
1108 			goto ignore_entry;
1109 	}
1110 
1111 	if (filter->reply_flags) {
1112 		tuple = nf_ct_tuple(ct, IP_CT_DIR_REPLY);
1113 		if (!ctnetlink_filter_match_tuple(&filter->reply, tuple,
1114 						  filter->reply_flags,
1115 						  filter->family))
1116 			goto ignore_entry;
1117 	}
1118 
1119 #ifdef CONFIG_NF_CONNTRACK_MARK
1120 	if ((ct->mark & filter->mark.mask) != filter->mark.val)
1121 		goto ignore_entry;
1122 #endif
1123 
1124 out:
1125 	return 1;
1126 
1127 ignore_entry:
1128 	return 0;
1129 }
1130 
1131 static int
1132 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1133 {
1134 	unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0;
1135 	struct net *net = sock_net(skb->sk);
1136 	struct nf_conn *ct, *last;
1137 	struct nf_conntrack_tuple_hash *h;
1138 	struct hlist_nulls_node *n;
1139 	struct nf_conn *nf_ct_evict[8];
1140 	int res, i;
1141 	spinlock_t *lockp;
1142 
1143 	last = (struct nf_conn *)cb->args[1];
1144 	i = 0;
1145 
1146 	local_bh_disable();
1147 	for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
1148 restart:
1149 		while (i) {
1150 			i--;
1151 			if (nf_ct_should_gc(nf_ct_evict[i]))
1152 				nf_ct_kill(nf_ct_evict[i]);
1153 			nf_ct_put(nf_ct_evict[i]);
1154 		}
1155 
1156 		lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
1157 		nf_conntrack_lock(lockp);
1158 		if (cb->args[0] >= nf_conntrack_htable_size) {
1159 			spin_unlock(lockp);
1160 			goto out;
1161 		}
1162 		hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
1163 					   hnnode) {
1164 			if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1165 				continue;
1166 			ct = nf_ct_tuplehash_to_ctrack(h);
1167 			if (nf_ct_is_expired(ct)) {
1168 				if (i < ARRAY_SIZE(nf_ct_evict) &&
1169 				    atomic_inc_not_zero(&ct->ct_general.use))
1170 					nf_ct_evict[i++] = ct;
1171 				continue;
1172 			}
1173 
1174 			if (!net_eq(net, nf_ct_net(ct)))
1175 				continue;
1176 
1177 			if (cb->args[1]) {
1178 				if (ct != last)
1179 					continue;
1180 				cb->args[1] = 0;
1181 			}
1182 			if (!ctnetlink_filter_match(ct, cb->data))
1183 				continue;
1184 
1185 			res =
1186 			ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1187 					    cb->nlh->nlmsg_seq,
1188 					    NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1189 					    ct, true, flags);
1190 			if (res < 0) {
1191 				nf_conntrack_get(&ct->ct_general);
1192 				cb->args[1] = (unsigned long)ct;
1193 				spin_unlock(lockp);
1194 				goto out;
1195 			}
1196 		}
1197 		spin_unlock(lockp);
1198 		if (cb->args[1]) {
1199 			cb->args[1] = 0;
1200 			goto restart;
1201 		}
1202 	}
1203 out:
1204 	local_bh_enable();
1205 	if (last) {
1206 		/* nf ct hash resize happened, now clear the leftover. */
1207 		if ((struct nf_conn *)cb->args[1] == last)
1208 			cb->args[1] = 0;
1209 
1210 		nf_ct_put(last);
1211 	}
1212 
1213 	while (i) {
1214 		i--;
1215 		if (nf_ct_should_gc(nf_ct_evict[i]))
1216 			nf_ct_kill(nf_ct_evict[i]);
1217 		nf_ct_put(nf_ct_evict[i]);
1218 	}
1219 
1220 	return skb->len;
1221 }
1222 
1223 static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
1224 				struct nf_conntrack_tuple *t,
1225 				u_int32_t flags)
1226 {
1227 	if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
1228 		if (!tb[CTA_IP_V4_SRC])
1229 			return -EINVAL;
1230 
1231 		t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
1232 	}
1233 
1234 	if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) {
1235 		if (!tb[CTA_IP_V4_DST])
1236 			return -EINVAL;
1237 
1238 		t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
1239 	}
1240 
1241 	return 0;
1242 }
1243 
1244 static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
1245 				struct nf_conntrack_tuple *t,
1246 				u_int32_t flags)
1247 {
1248 	if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
1249 		if (!tb[CTA_IP_V6_SRC])
1250 			return -EINVAL;
1251 
1252 		t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
1253 	}
1254 
1255 	if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) {
1256 		if (!tb[CTA_IP_V6_DST])
1257 			return -EINVAL;
1258 
1259 		t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
1260 	}
1261 
1262 	return 0;
1263 }
1264 
1265 static int ctnetlink_parse_tuple_ip(struct nlattr *attr,
1266 				    struct nf_conntrack_tuple *tuple,
1267 				    u_int32_t flags)
1268 {
1269 	struct nlattr *tb[CTA_IP_MAX+1];
1270 	int ret = 0;
1271 
1272 	ret = nla_parse_nested_deprecated(tb, CTA_IP_MAX, attr, NULL, NULL);
1273 	if (ret < 0)
1274 		return ret;
1275 
1276 	ret = nla_validate_nested_deprecated(attr, CTA_IP_MAX,
1277 					     cta_ip_nla_policy, NULL);
1278 	if (ret)
1279 		return ret;
1280 
1281 	switch (tuple->src.l3num) {
1282 	case NFPROTO_IPV4:
1283 		ret = ipv4_nlattr_to_tuple(tb, tuple, flags);
1284 		break;
1285 	case NFPROTO_IPV6:
1286 		ret = ipv6_nlattr_to_tuple(tb, tuple, flags);
1287 		break;
1288 	}
1289 
1290 	return ret;
1291 }
1292 
1293 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
1294 	[CTA_PROTO_NUM]	= { .type = NLA_U8 },
1295 };
1296 
1297 static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
1298 				       struct nf_conntrack_tuple *tuple,
1299 				       u_int32_t flags)
1300 {
1301 	const struct nf_conntrack_l4proto *l4proto;
1302 	struct nlattr *tb[CTA_PROTO_MAX+1];
1303 	int ret = 0;
1304 
1305 	ret = nla_parse_nested_deprecated(tb, CTA_PROTO_MAX, attr,
1306 					  proto_nla_policy, NULL);
1307 	if (ret < 0)
1308 		return ret;
1309 
1310 	if (!(flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)))
1311 		return 0;
1312 
1313 	if (!tb[CTA_PROTO_NUM])
1314 		return -EINVAL;
1315 
1316 	tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
1317 
1318 	rcu_read_lock();
1319 	l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
1320 
1321 	if (likely(l4proto->nlattr_to_tuple)) {
1322 		ret = nla_validate_nested_deprecated(attr, CTA_PROTO_MAX,
1323 						     l4proto->nla_policy,
1324 						     NULL);
1325 		if (ret == 0)
1326 			ret = l4proto->nlattr_to_tuple(tb, tuple, flags);
1327 	}
1328 
1329 	rcu_read_unlock();
1330 
1331 	return ret;
1332 }
1333 
1334 static int
1335 ctnetlink_parse_zone(const struct nlattr *attr,
1336 		     struct nf_conntrack_zone *zone)
1337 {
1338 	nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
1339 			NF_CT_DEFAULT_ZONE_DIR, 0);
1340 #ifdef CONFIG_NF_CONNTRACK_ZONES
1341 	if (attr)
1342 		zone->id = ntohs(nla_get_be16(attr));
1343 #else
1344 	if (attr)
1345 		return -EOPNOTSUPP;
1346 #endif
1347 	return 0;
1348 }
1349 
1350 static int
1351 ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
1352 			   struct nf_conntrack_zone *zone)
1353 {
1354 	int ret;
1355 
1356 	if (zone->id != NF_CT_DEFAULT_ZONE_ID)
1357 		return -EINVAL;
1358 
1359 	ret = ctnetlink_parse_zone(attr, zone);
1360 	if (ret < 0)
1361 		return ret;
1362 
1363 	if (type == CTA_TUPLE_REPLY)
1364 		zone->dir = NF_CT_ZONE_DIR_REPL;
1365 	else
1366 		zone->dir = NF_CT_ZONE_DIR_ORIG;
1367 
1368 	return 0;
1369 }
1370 
1371 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
1372 	[CTA_TUPLE_IP]		= { .type = NLA_NESTED },
1373 	[CTA_TUPLE_PROTO]	= { .type = NLA_NESTED },
1374 	[CTA_TUPLE_ZONE]	= { .type = NLA_U16 },
1375 };
1376 
1377 #define CTA_FILTER_F_ALL_CTA_PROTO \
1378   (CTA_FILTER_F_CTA_PROTO_SRC_PORT | \
1379    CTA_FILTER_F_CTA_PROTO_DST_PORT | \
1380    CTA_FILTER_F_CTA_PROTO_ICMP_TYPE | \
1381    CTA_FILTER_F_CTA_PROTO_ICMP_CODE | \
1382    CTA_FILTER_F_CTA_PROTO_ICMP_ID | \
1383    CTA_FILTER_F_CTA_PROTO_ICMPV6_TYPE | \
1384    CTA_FILTER_F_CTA_PROTO_ICMPV6_CODE | \
1385    CTA_FILTER_F_CTA_PROTO_ICMPV6_ID)
1386 
1387 static int
1388 ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
1389 			      struct nf_conntrack_tuple *tuple, u32 type,
1390 			      u_int8_t l3num, struct nf_conntrack_zone *zone,
1391 			      u_int32_t flags)
1392 {
1393 	struct nlattr *tb[CTA_TUPLE_MAX+1];
1394 	int err;
1395 
1396 	memset(tuple, 0, sizeof(*tuple));
1397 
1398 	err = nla_parse_nested_deprecated(tb, CTA_TUPLE_MAX, cda[type],
1399 					  tuple_nla_policy, NULL);
1400 	if (err < 0)
1401 		return err;
1402 
1403 	if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6)
1404 		return -EOPNOTSUPP;
1405 	tuple->src.l3num = l3num;
1406 
1407 	if (flags & CTA_FILTER_FLAG(CTA_IP_DST) ||
1408 	    flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
1409 		if (!tb[CTA_TUPLE_IP])
1410 			return -EINVAL;
1411 
1412 		err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple, flags);
1413 		if (err < 0)
1414 			return err;
1415 	}
1416 
1417 	if (flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) {
1418 		if (!tb[CTA_TUPLE_PROTO])
1419 			return -EINVAL;
1420 
1421 		err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple, flags);
1422 		if (err < 0)
1423 			return err;
1424 	} else if (flags & CTA_FILTER_FLAG(ALL_CTA_PROTO)) {
1425 		/* Can't manage proto flags without a protonum  */
1426 		return -EINVAL;
1427 	}
1428 
1429 	if ((flags & CTA_FILTER_FLAG(CTA_TUPLE_ZONE)) && tb[CTA_TUPLE_ZONE]) {
1430 		if (!zone)
1431 			return -EINVAL;
1432 
1433 		err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
1434 						 type, zone);
1435 		if (err < 0)
1436 			return err;
1437 	}
1438 
1439 	/* orig and expect tuples get DIR_ORIGINAL */
1440 	if (type == CTA_TUPLE_REPLY)
1441 		tuple->dst.dir = IP_CT_DIR_REPLY;
1442 	else
1443 		tuple->dst.dir = IP_CT_DIR_ORIGINAL;
1444 
1445 	return 0;
1446 }
1447 
1448 static int
1449 ctnetlink_parse_tuple(const struct nlattr * const cda[],
1450 		      struct nf_conntrack_tuple *tuple, u32 type,
1451 		      u_int8_t l3num, struct nf_conntrack_zone *zone)
1452 {
1453 	return ctnetlink_parse_tuple_filter(cda, tuple, type, l3num, zone,
1454 					    CTA_FILTER_FLAG(ALL));
1455 }
1456 
1457 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
1458 	[CTA_HELP_NAME]		= { .type = NLA_NUL_STRING,
1459 				    .len = NF_CT_HELPER_NAME_LEN - 1 },
1460 };
1461 
1462 static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
1463 				struct nlattr **helpinfo)
1464 {
1465 	int err;
1466 	struct nlattr *tb[CTA_HELP_MAX+1];
1467 
1468 	err = nla_parse_nested_deprecated(tb, CTA_HELP_MAX, attr,
1469 					  help_nla_policy, NULL);
1470 	if (err < 0)
1471 		return err;
1472 
1473 	if (!tb[CTA_HELP_NAME])
1474 		return -EINVAL;
1475 
1476 	*helper_name = nla_data(tb[CTA_HELP_NAME]);
1477 
1478 	if (tb[CTA_HELP_INFO])
1479 		*helpinfo = tb[CTA_HELP_INFO];
1480 
1481 	return 0;
1482 }
1483 
1484 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
1485 	[CTA_TUPLE_ORIG]	= { .type = NLA_NESTED },
1486 	[CTA_TUPLE_REPLY]	= { .type = NLA_NESTED },
1487 	[CTA_STATUS] 		= { .type = NLA_U32 },
1488 	[CTA_PROTOINFO]		= { .type = NLA_NESTED },
1489 	[CTA_HELP]		= { .type = NLA_NESTED },
1490 	[CTA_NAT_SRC]		= { .type = NLA_NESTED },
1491 	[CTA_TIMEOUT] 		= { .type = NLA_U32 },
1492 	[CTA_MARK]		= { .type = NLA_U32 },
1493 	[CTA_ID]		= { .type = NLA_U32 },
1494 	[CTA_NAT_DST]		= { .type = NLA_NESTED },
1495 	[CTA_TUPLE_MASTER]	= { .type = NLA_NESTED },
1496 	[CTA_NAT_SEQ_ADJ_ORIG]  = { .type = NLA_NESTED },
1497 	[CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
1498 	[CTA_ZONE]		= { .type = NLA_U16 },
1499 	[CTA_MARK_MASK]		= { .type = NLA_U32 },
1500 	[CTA_LABELS]		= { .type = NLA_BINARY,
1501 				    .len = NF_CT_LABELS_MAX_SIZE },
1502 	[CTA_LABELS_MASK]	= { .type = NLA_BINARY,
1503 				    .len = NF_CT_LABELS_MAX_SIZE },
1504 	[CTA_FILTER]		= { .type = NLA_NESTED },
1505 };
1506 
1507 static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
1508 {
1509 	if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
1510 		return 0;
1511 
1512 	return ctnetlink_filter_match(ct, data);
1513 }
1514 
1515 static int ctnetlink_flush_conntrack(struct net *net,
1516 				     const struct nlattr * const cda[],
1517 				     u32 portid, int report, u8 family)
1518 {
1519 	struct ctnetlink_filter *filter = NULL;
1520 
1521 	if (ctnetlink_needs_filter(family, cda)) {
1522 		if (cda[CTA_FILTER])
1523 			return -EOPNOTSUPP;
1524 
1525 		filter = ctnetlink_alloc_filter(cda, family);
1526 		if (IS_ERR(filter))
1527 			return PTR_ERR(filter);
1528 	}
1529 
1530 	nf_ct_iterate_cleanup_net(net, ctnetlink_flush_iterate, filter,
1531 				  portid, report);
1532 	kfree(filter);
1533 
1534 	return 0;
1535 }
1536 
1537 static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
1538 				   struct sk_buff *skb,
1539 				   const struct nlmsghdr *nlh,
1540 				   const struct nlattr * const cda[],
1541 				   struct netlink_ext_ack *extack)
1542 {
1543 	struct nf_conntrack_tuple_hash *h;
1544 	struct nf_conntrack_tuple tuple;
1545 	struct nf_conn *ct;
1546 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1547 	struct nf_conntrack_zone zone;
1548 	int err;
1549 
1550 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1551 	if (err < 0)
1552 		return err;
1553 
1554 	if (cda[CTA_TUPLE_ORIG])
1555 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
1556 					    nfmsg->nfgen_family, &zone);
1557 	else if (cda[CTA_TUPLE_REPLY])
1558 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
1559 					    nfmsg->nfgen_family, &zone);
1560 	else {
1561 		u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
1562 
1563 		return ctnetlink_flush_conntrack(net, cda,
1564 						 NETLINK_CB(skb).portid,
1565 						 nlmsg_report(nlh), u3);
1566 	}
1567 
1568 	if (err < 0)
1569 		return err;
1570 
1571 	h = nf_conntrack_find_get(net, &zone, &tuple);
1572 	if (!h)
1573 		return -ENOENT;
1574 
1575 	ct = nf_ct_tuplehash_to_ctrack(h);
1576 
1577 	if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) {
1578 		nf_ct_put(ct);
1579 		return -EBUSY;
1580 	}
1581 
1582 	if (cda[CTA_ID]) {
1583 		__be32 id = nla_get_be32(cda[CTA_ID]);
1584 
1585 		if (id != (__force __be32)nf_ct_get_id(ct)) {
1586 			nf_ct_put(ct);
1587 			return -ENOENT;
1588 		}
1589 	}
1590 
1591 	nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
1592 	nf_ct_put(ct);
1593 
1594 	return 0;
1595 }
1596 
1597 static int ctnetlink_get_conntrack(struct net *net, struct sock *ctnl,
1598 				   struct sk_buff *skb,
1599 				   const struct nlmsghdr *nlh,
1600 				   const struct nlattr * const cda[],
1601 				   struct netlink_ext_ack *extack)
1602 {
1603 	struct nf_conntrack_tuple_hash *h;
1604 	struct nf_conntrack_tuple tuple;
1605 	struct nf_conn *ct;
1606 	struct sk_buff *skb2 = NULL;
1607 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1608 	u_int8_t u3 = nfmsg->nfgen_family;
1609 	struct nf_conntrack_zone zone;
1610 	int err;
1611 
1612 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1613 		struct netlink_dump_control c = {
1614 			.start = ctnetlink_start,
1615 			.dump = ctnetlink_dump_table,
1616 			.done = ctnetlink_done,
1617 			.data = (void *)cda,
1618 		};
1619 
1620 		return netlink_dump_start(ctnl, skb, nlh, &c);
1621 	}
1622 
1623 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1624 	if (err < 0)
1625 		return err;
1626 
1627 	if (cda[CTA_TUPLE_ORIG])
1628 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
1629 					    u3, &zone);
1630 	else if (cda[CTA_TUPLE_REPLY])
1631 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
1632 					    u3, &zone);
1633 	else
1634 		return -EINVAL;
1635 
1636 	if (err < 0)
1637 		return err;
1638 
1639 	h = nf_conntrack_find_get(net, &zone, &tuple);
1640 	if (!h)
1641 		return -ENOENT;
1642 
1643 	ct = nf_ct_tuplehash_to_ctrack(h);
1644 
1645 	err = -ENOMEM;
1646 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1647 	if (skb2 == NULL) {
1648 		nf_ct_put(ct);
1649 		return -ENOMEM;
1650 	}
1651 
1652 	err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1653 				  NFNL_MSG_TYPE(nlh->nlmsg_type), ct, true, 0);
1654 	nf_ct_put(ct);
1655 	if (err <= 0)
1656 		goto free;
1657 
1658 	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1659 	if (err < 0)
1660 		goto out;
1661 
1662 	return 0;
1663 
1664 free:
1665 	kfree_skb(skb2);
1666 out:
1667 	/* this avoids a loop in nfnetlink. */
1668 	return err == -EAGAIN ? -ENOBUFS : err;
1669 }
1670 
1671 static int ctnetlink_done_list(struct netlink_callback *cb)
1672 {
1673 	if (cb->args[1])
1674 		nf_ct_put((struct nf_conn *)cb->args[1]);
1675 	return 0;
1676 }
1677 
1678 static int
1679 ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
1680 {
1681 	struct nf_conn *ct, *last;
1682 	struct nf_conntrack_tuple_hash *h;
1683 	struct hlist_nulls_node *n;
1684 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1685 	u_int8_t l3proto = nfmsg->nfgen_family;
1686 	int res;
1687 	int cpu;
1688 	struct hlist_nulls_head *list;
1689 	struct net *net = sock_net(skb->sk);
1690 
1691 	if (cb->args[2])
1692 		return 0;
1693 
1694 	last = (struct nf_conn *)cb->args[1];
1695 
1696 	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1697 		struct ct_pcpu *pcpu;
1698 
1699 		if (!cpu_possible(cpu))
1700 			continue;
1701 
1702 		pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1703 		spin_lock_bh(&pcpu->lock);
1704 		list = dying ? &pcpu->dying : &pcpu->unconfirmed;
1705 restart:
1706 		hlist_nulls_for_each_entry(h, n, list, hnnode) {
1707 			ct = nf_ct_tuplehash_to_ctrack(h);
1708 			if (l3proto && nf_ct_l3num(ct) != l3proto)
1709 				continue;
1710 			if (cb->args[1]) {
1711 				if (ct != last)
1712 					continue;
1713 				cb->args[1] = 0;
1714 			}
1715 
1716 			/* We can't dump extension info for the unconfirmed
1717 			 * list because unconfirmed conntracks can have
1718 			 * ct->ext reallocated (and thus freed).
1719 			 *
1720 			 * In the dying list case ct->ext can't be free'd
1721 			 * until after we drop pcpu->lock.
1722 			 */
1723 			res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1724 						  cb->nlh->nlmsg_seq,
1725 						  NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1726 						  ct, dying ? true : false, 0);
1727 			if (res < 0) {
1728 				if (!atomic_inc_not_zero(&ct->ct_general.use))
1729 					continue;
1730 				cb->args[0] = cpu;
1731 				cb->args[1] = (unsigned long)ct;
1732 				spin_unlock_bh(&pcpu->lock);
1733 				goto out;
1734 			}
1735 		}
1736 		if (cb->args[1]) {
1737 			cb->args[1] = 0;
1738 			goto restart;
1739 		}
1740 		spin_unlock_bh(&pcpu->lock);
1741 	}
1742 	cb->args[2] = 1;
1743 out:
1744 	if (last)
1745 		nf_ct_put(last);
1746 
1747 	return skb->len;
1748 }
1749 
1750 static int
1751 ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
1752 {
1753 	return ctnetlink_dump_list(skb, cb, true);
1754 }
1755 
1756 static int ctnetlink_get_ct_dying(struct net *net, struct sock *ctnl,
1757 				  struct sk_buff *skb,
1758 				  const struct nlmsghdr *nlh,
1759 				  const struct nlattr * const cda[],
1760 				  struct netlink_ext_ack *extack)
1761 {
1762 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1763 		struct netlink_dump_control c = {
1764 			.dump = ctnetlink_dump_dying,
1765 			.done = ctnetlink_done_list,
1766 		};
1767 		return netlink_dump_start(ctnl, skb, nlh, &c);
1768 	}
1769 
1770 	return -EOPNOTSUPP;
1771 }
1772 
1773 static int
1774 ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
1775 {
1776 	return ctnetlink_dump_list(skb, cb, false);
1777 }
1778 
1779 static int ctnetlink_get_ct_unconfirmed(struct net *net, struct sock *ctnl,
1780 					struct sk_buff *skb,
1781 					const struct nlmsghdr *nlh,
1782 					const struct nlattr * const cda[],
1783 					struct netlink_ext_ack *extack)
1784 {
1785 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1786 		struct netlink_dump_control c = {
1787 			.dump = ctnetlink_dump_unconfirmed,
1788 			.done = ctnetlink_done_list,
1789 		};
1790 		return netlink_dump_start(ctnl, skb, nlh, &c);
1791 	}
1792 
1793 	return -EOPNOTSUPP;
1794 }
1795 
1796 #if IS_ENABLED(CONFIG_NF_NAT)
1797 static int
1798 ctnetlink_parse_nat_setup(struct nf_conn *ct,
1799 			  enum nf_nat_manip_type manip,
1800 			  const struct nlattr *attr)
1801 	__must_hold(RCU)
1802 {
1803 	struct nf_nat_hook *nat_hook;
1804 	int err;
1805 
1806 	nat_hook = rcu_dereference(nf_nat_hook);
1807 	if (!nat_hook) {
1808 #ifdef CONFIG_MODULES
1809 		rcu_read_unlock();
1810 		nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1811 		if (request_module("nf-nat") < 0) {
1812 			nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1813 			rcu_read_lock();
1814 			return -EOPNOTSUPP;
1815 		}
1816 		nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1817 		rcu_read_lock();
1818 		nat_hook = rcu_dereference(nf_nat_hook);
1819 		if (nat_hook)
1820 			return -EAGAIN;
1821 #endif
1822 		return -EOPNOTSUPP;
1823 	}
1824 
1825 	err = nat_hook->parse_nat_setup(ct, manip, attr);
1826 	if (err == -EAGAIN) {
1827 #ifdef CONFIG_MODULES
1828 		rcu_read_unlock();
1829 		nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1830 		if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
1831 			nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1832 			rcu_read_lock();
1833 			return -EOPNOTSUPP;
1834 		}
1835 		nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1836 		rcu_read_lock();
1837 #else
1838 		err = -EOPNOTSUPP;
1839 #endif
1840 	}
1841 	return err;
1842 }
1843 #endif
1844 
1845 static void
1846 __ctnetlink_change_status(struct nf_conn *ct, unsigned long on,
1847 			  unsigned long off)
1848 {
1849 	unsigned int bit;
1850 
1851 	/* Ignore these unchangable bits */
1852 	on &= ~IPS_UNCHANGEABLE_MASK;
1853 	off &= ~IPS_UNCHANGEABLE_MASK;
1854 
1855 	for (bit = 0; bit < __IPS_MAX_BIT; bit++) {
1856 		if (on & (1 << bit))
1857 			set_bit(bit, &ct->status);
1858 		else if (off & (1 << bit))
1859 			clear_bit(bit, &ct->status);
1860 	}
1861 }
1862 
1863 static int
1864 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1865 {
1866 	unsigned long d;
1867 	unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
1868 	d = ct->status ^ status;
1869 
1870 	if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
1871 		/* unchangeable */
1872 		return -EBUSY;
1873 
1874 	if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
1875 		/* SEEN_REPLY bit can only be set */
1876 		return -EBUSY;
1877 
1878 	if (d & IPS_ASSURED && !(status & IPS_ASSURED))
1879 		/* ASSURED bit can only be set */
1880 		return -EBUSY;
1881 
1882 	__ctnetlink_change_status(ct, status, 0);
1883 	return 0;
1884 }
1885 
1886 static int
1887 ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1888 {
1889 #if IS_ENABLED(CONFIG_NF_NAT)
1890 	int ret;
1891 
1892 	if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1893 		return 0;
1894 
1895 	ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
1896 					cda[CTA_NAT_DST]);
1897 	if (ret < 0)
1898 		return ret;
1899 
1900 	return ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC,
1901 					 cda[CTA_NAT_SRC]);
1902 #else
1903 	if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1904 		return 0;
1905 	return -EOPNOTSUPP;
1906 #endif
1907 }
1908 
1909 static int ctnetlink_change_helper(struct nf_conn *ct,
1910 				   const struct nlattr * const cda[])
1911 {
1912 	struct nf_conntrack_helper *helper;
1913 	struct nf_conn_help *help = nfct_help(ct);
1914 	char *helpname = NULL;
1915 	struct nlattr *helpinfo = NULL;
1916 	int err;
1917 
1918 	err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1919 	if (err < 0)
1920 		return err;
1921 
1922 	/* don't change helper of sibling connections */
1923 	if (ct->master) {
1924 		/* If we try to change the helper to the same thing twice,
1925 		 * treat the second attempt as a no-op instead of returning
1926 		 * an error.
1927 		 */
1928 		err = -EBUSY;
1929 		if (help) {
1930 			rcu_read_lock();
1931 			helper = rcu_dereference(help->helper);
1932 			if (helper && !strcmp(helper->name, helpname))
1933 				err = 0;
1934 			rcu_read_unlock();
1935 		}
1936 
1937 		return err;
1938 	}
1939 
1940 	if (!strcmp(helpname, "")) {
1941 		if (help && help->helper) {
1942 			/* we had a helper before ... */
1943 			nf_ct_remove_expectations(ct);
1944 			RCU_INIT_POINTER(help->helper, NULL);
1945 		}
1946 
1947 		return 0;
1948 	}
1949 
1950 	rcu_read_lock();
1951 	helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1952 					    nf_ct_protonum(ct));
1953 	if (helper == NULL) {
1954 		rcu_read_unlock();
1955 		return -EOPNOTSUPP;
1956 	}
1957 
1958 	if (help) {
1959 		if (help->helper == helper) {
1960 			/* update private helper data if allowed. */
1961 			if (helper->from_nlattr)
1962 				helper->from_nlattr(helpinfo, ct);
1963 			err = 0;
1964 		} else
1965 			err = -EBUSY;
1966 	} else {
1967 		/* we cannot set a helper for an existing conntrack */
1968 		err = -EOPNOTSUPP;
1969 	}
1970 
1971 	rcu_read_unlock();
1972 	return err;
1973 }
1974 
1975 static int ctnetlink_change_timeout(struct nf_conn *ct,
1976 				    const struct nlattr * const cda[])
1977 {
1978 	u64 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
1979 
1980 	if (timeout > INT_MAX)
1981 		timeout = INT_MAX;
1982 	ct->timeout = nfct_time_stamp + (u32)timeout;
1983 
1984 	if (test_bit(IPS_DYING_BIT, &ct->status))
1985 		return -ETIME;
1986 
1987 	return 0;
1988 }
1989 
1990 #if defined(CONFIG_NF_CONNTRACK_MARK)
1991 static void ctnetlink_change_mark(struct nf_conn *ct,
1992 				    const struct nlattr * const cda[])
1993 {
1994 	u32 mark, newmark, mask = 0;
1995 
1996 	if (cda[CTA_MARK_MASK])
1997 		mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
1998 
1999 	mark = ntohl(nla_get_be32(cda[CTA_MARK]));
2000 	newmark = (ct->mark & mask) ^ mark;
2001 	if (newmark != ct->mark)
2002 		ct->mark = newmark;
2003 }
2004 #endif
2005 
2006 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
2007 	[CTA_PROTOINFO_TCP]	= { .type = NLA_NESTED },
2008 	[CTA_PROTOINFO_DCCP]	= { .type = NLA_NESTED },
2009 	[CTA_PROTOINFO_SCTP]	= { .type = NLA_NESTED },
2010 };
2011 
2012 static int ctnetlink_change_protoinfo(struct nf_conn *ct,
2013 				      const struct nlattr * const cda[])
2014 {
2015 	const struct nlattr *attr = cda[CTA_PROTOINFO];
2016 	const struct nf_conntrack_l4proto *l4proto;
2017 	struct nlattr *tb[CTA_PROTOINFO_MAX+1];
2018 	int err = 0;
2019 
2020 	err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_MAX, attr,
2021 					  protoinfo_policy, NULL);
2022 	if (err < 0)
2023 		return err;
2024 
2025 	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
2026 	if (l4proto->from_nlattr)
2027 		err = l4proto->from_nlattr(tb, ct);
2028 
2029 	return err;
2030 }
2031 
2032 static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = {
2033 	[CTA_SEQADJ_CORRECTION_POS]	= { .type = NLA_U32 },
2034 	[CTA_SEQADJ_OFFSET_BEFORE]	= { .type = NLA_U32 },
2035 	[CTA_SEQADJ_OFFSET_AFTER]	= { .type = NLA_U32 },
2036 };
2037 
2038 static int change_seq_adj(struct nf_ct_seqadj *seq,
2039 			  const struct nlattr * const attr)
2040 {
2041 	int err;
2042 	struct nlattr *cda[CTA_SEQADJ_MAX+1];
2043 
2044 	err = nla_parse_nested_deprecated(cda, CTA_SEQADJ_MAX, attr,
2045 					  seqadj_policy, NULL);
2046 	if (err < 0)
2047 		return err;
2048 
2049 	if (!cda[CTA_SEQADJ_CORRECTION_POS])
2050 		return -EINVAL;
2051 
2052 	seq->correction_pos =
2053 		ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS]));
2054 
2055 	if (!cda[CTA_SEQADJ_OFFSET_BEFORE])
2056 		return -EINVAL;
2057 
2058 	seq->offset_before =
2059 		ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE]));
2060 
2061 	if (!cda[CTA_SEQADJ_OFFSET_AFTER])
2062 		return -EINVAL;
2063 
2064 	seq->offset_after =
2065 		ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER]));
2066 
2067 	return 0;
2068 }
2069 
2070 static int
2071 ctnetlink_change_seq_adj(struct nf_conn *ct,
2072 			 const struct nlattr * const cda[])
2073 {
2074 	struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
2075 	int ret = 0;
2076 
2077 	if (!seqadj)
2078 		return 0;
2079 
2080 	spin_lock_bh(&ct->lock);
2081 	if (cda[CTA_SEQ_ADJ_ORIG]) {
2082 		ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL],
2083 				     cda[CTA_SEQ_ADJ_ORIG]);
2084 		if (ret < 0)
2085 			goto err;
2086 
2087 		set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
2088 	}
2089 
2090 	if (cda[CTA_SEQ_ADJ_REPLY]) {
2091 		ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY],
2092 				     cda[CTA_SEQ_ADJ_REPLY]);
2093 		if (ret < 0)
2094 			goto err;
2095 
2096 		set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
2097 	}
2098 
2099 	spin_unlock_bh(&ct->lock);
2100 	return 0;
2101 err:
2102 	spin_unlock_bh(&ct->lock);
2103 	return ret;
2104 }
2105 
2106 static const struct nla_policy synproxy_policy[CTA_SYNPROXY_MAX + 1] = {
2107 	[CTA_SYNPROXY_ISN]	= { .type = NLA_U32 },
2108 	[CTA_SYNPROXY_ITS]	= { .type = NLA_U32 },
2109 	[CTA_SYNPROXY_TSOFF]	= { .type = NLA_U32 },
2110 };
2111 
2112 static int ctnetlink_change_synproxy(struct nf_conn *ct,
2113 				     const struct nlattr * const cda[])
2114 {
2115 	struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
2116 	struct nlattr *tb[CTA_SYNPROXY_MAX + 1];
2117 	int err;
2118 
2119 	if (!synproxy)
2120 		return 0;
2121 
2122 	err = nla_parse_nested_deprecated(tb, CTA_SYNPROXY_MAX,
2123 					  cda[CTA_SYNPROXY], synproxy_policy,
2124 					  NULL);
2125 	if (err < 0)
2126 		return err;
2127 
2128 	if (!tb[CTA_SYNPROXY_ISN] ||
2129 	    !tb[CTA_SYNPROXY_ITS] ||
2130 	    !tb[CTA_SYNPROXY_TSOFF])
2131 		return -EINVAL;
2132 
2133 	synproxy->isn = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ISN]));
2134 	synproxy->its = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ITS]));
2135 	synproxy->tsoff = ntohl(nla_get_be32(tb[CTA_SYNPROXY_TSOFF]));
2136 
2137 	return 0;
2138 }
2139 
2140 static int
2141 ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
2142 {
2143 #ifdef CONFIG_NF_CONNTRACK_LABELS
2144 	size_t len = nla_len(cda[CTA_LABELS]);
2145 	const void *mask = cda[CTA_LABELS_MASK];
2146 
2147 	if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
2148 		return -EINVAL;
2149 
2150 	if (mask) {
2151 		if (nla_len(cda[CTA_LABELS_MASK]) == 0 ||
2152 		    nla_len(cda[CTA_LABELS_MASK]) != len)
2153 			return -EINVAL;
2154 		mask = nla_data(cda[CTA_LABELS_MASK]);
2155 	}
2156 
2157 	len /= sizeof(u32);
2158 
2159 	return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len);
2160 #else
2161 	return -EOPNOTSUPP;
2162 #endif
2163 }
2164 
2165 static int
2166 ctnetlink_change_conntrack(struct nf_conn *ct,
2167 			   const struct nlattr * const cda[])
2168 {
2169 	int err;
2170 
2171 	/* only allow NAT changes and master assignation for new conntracks */
2172 	if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
2173 		return -EOPNOTSUPP;
2174 
2175 	if (cda[CTA_HELP]) {
2176 		err = ctnetlink_change_helper(ct, cda);
2177 		if (err < 0)
2178 			return err;
2179 	}
2180 
2181 	if (cda[CTA_TIMEOUT]) {
2182 		err = ctnetlink_change_timeout(ct, cda);
2183 		if (err < 0)
2184 			return err;
2185 	}
2186 
2187 	if (cda[CTA_STATUS]) {
2188 		err = ctnetlink_change_status(ct, cda);
2189 		if (err < 0)
2190 			return err;
2191 	}
2192 
2193 	if (cda[CTA_PROTOINFO]) {
2194 		err = ctnetlink_change_protoinfo(ct, cda);
2195 		if (err < 0)
2196 			return err;
2197 	}
2198 
2199 #if defined(CONFIG_NF_CONNTRACK_MARK)
2200 	if (cda[CTA_MARK])
2201 		ctnetlink_change_mark(ct, cda);
2202 #endif
2203 
2204 	if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
2205 		err = ctnetlink_change_seq_adj(ct, cda);
2206 		if (err < 0)
2207 			return err;
2208 	}
2209 
2210 	if (cda[CTA_SYNPROXY]) {
2211 		err = ctnetlink_change_synproxy(ct, cda);
2212 		if (err < 0)
2213 			return err;
2214 	}
2215 
2216 	if (cda[CTA_LABELS]) {
2217 		err = ctnetlink_attach_labels(ct, cda);
2218 		if (err < 0)
2219 			return err;
2220 	}
2221 
2222 	return 0;
2223 }
2224 
2225 static struct nf_conn *
2226 ctnetlink_create_conntrack(struct net *net,
2227 			   const struct nf_conntrack_zone *zone,
2228 			   const struct nlattr * const cda[],
2229 			   struct nf_conntrack_tuple *otuple,
2230 			   struct nf_conntrack_tuple *rtuple,
2231 			   u8 u3)
2232 {
2233 	struct nf_conn *ct;
2234 	int err = -EINVAL;
2235 	struct nf_conntrack_helper *helper;
2236 	struct nf_conn_tstamp *tstamp;
2237 	u64 timeout;
2238 
2239 	ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
2240 	if (IS_ERR(ct))
2241 		return ERR_PTR(-ENOMEM);
2242 
2243 	if (!cda[CTA_TIMEOUT])
2244 		goto err1;
2245 
2246 	timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
2247 	if (timeout > INT_MAX)
2248 		timeout = INT_MAX;
2249 	ct->timeout = (u32)timeout + nfct_time_stamp;
2250 
2251 	rcu_read_lock();
2252  	if (cda[CTA_HELP]) {
2253 		char *helpname = NULL;
2254 		struct nlattr *helpinfo = NULL;
2255 
2256 		err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
2257  		if (err < 0)
2258 			goto err2;
2259 
2260 		helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2261 						    nf_ct_protonum(ct));
2262 		if (helper == NULL) {
2263 			rcu_read_unlock();
2264 #ifdef CONFIG_MODULES
2265 			if (request_module("nfct-helper-%s", helpname) < 0) {
2266 				err = -EOPNOTSUPP;
2267 				goto err1;
2268 			}
2269 
2270 			rcu_read_lock();
2271 			helper = __nf_conntrack_helper_find(helpname,
2272 							    nf_ct_l3num(ct),
2273 							    nf_ct_protonum(ct));
2274 			if (helper) {
2275 				err = -EAGAIN;
2276 				goto err2;
2277 			}
2278 			rcu_read_unlock();
2279 #endif
2280 			err = -EOPNOTSUPP;
2281 			goto err1;
2282 		} else {
2283 			struct nf_conn_help *help;
2284 
2285 			help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
2286 			if (help == NULL) {
2287 				err = -ENOMEM;
2288 				goto err2;
2289 			}
2290 			/* set private helper data if allowed. */
2291 			if (helper->from_nlattr)
2292 				helper->from_nlattr(helpinfo, ct);
2293 
2294 			/* not in hash table yet so not strictly necessary */
2295 			RCU_INIT_POINTER(help->helper, helper);
2296 		}
2297 	} else {
2298 		/* try an implicit helper assignation */
2299 		err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
2300 		if (err < 0)
2301 			goto err2;
2302 	}
2303 
2304 	err = ctnetlink_setup_nat(ct, cda);
2305 	if (err < 0)
2306 		goto err2;
2307 
2308 	nf_ct_acct_ext_add(ct, GFP_ATOMIC);
2309 	nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
2310 	nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
2311 	nf_ct_labels_ext_add(ct);
2312 	nfct_seqadj_ext_add(ct);
2313 	nfct_synproxy_ext_add(ct);
2314 
2315 	/* we must add conntrack extensions before confirmation. */
2316 	ct->status |= IPS_CONFIRMED;
2317 
2318 	if (cda[CTA_STATUS]) {
2319 		err = ctnetlink_change_status(ct, cda);
2320 		if (err < 0)
2321 			goto err2;
2322 	}
2323 
2324 	if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
2325 		err = ctnetlink_change_seq_adj(ct, cda);
2326 		if (err < 0)
2327 			goto err2;
2328 	}
2329 
2330 	memset(&ct->proto, 0, sizeof(ct->proto));
2331 	if (cda[CTA_PROTOINFO]) {
2332 		err = ctnetlink_change_protoinfo(ct, cda);
2333 		if (err < 0)
2334 			goto err2;
2335 	}
2336 
2337 	if (cda[CTA_SYNPROXY]) {
2338 		err = ctnetlink_change_synproxy(ct, cda);
2339 		if (err < 0)
2340 			goto err2;
2341 	}
2342 
2343 #if defined(CONFIG_NF_CONNTRACK_MARK)
2344 	if (cda[CTA_MARK])
2345 		ctnetlink_change_mark(ct, cda);
2346 #endif
2347 
2348 	/* setup master conntrack: this is a confirmed expectation */
2349 	if (cda[CTA_TUPLE_MASTER]) {
2350 		struct nf_conntrack_tuple master;
2351 		struct nf_conntrack_tuple_hash *master_h;
2352 		struct nf_conn *master_ct;
2353 
2354 		err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER,
2355 					    u3, NULL);
2356 		if (err < 0)
2357 			goto err2;
2358 
2359 		master_h = nf_conntrack_find_get(net, zone, &master);
2360 		if (master_h == NULL) {
2361 			err = -ENOENT;
2362 			goto err2;
2363 		}
2364 		master_ct = nf_ct_tuplehash_to_ctrack(master_h);
2365 		__set_bit(IPS_EXPECTED_BIT, &ct->status);
2366 		ct->master = master_ct;
2367 	}
2368 	tstamp = nf_conn_tstamp_find(ct);
2369 	if (tstamp)
2370 		tstamp->start = ktime_get_real_ns();
2371 
2372 	err = nf_conntrack_hash_check_insert(ct);
2373 	if (err < 0)
2374 		goto err2;
2375 
2376 	rcu_read_unlock();
2377 
2378 	return ct;
2379 
2380 err2:
2381 	rcu_read_unlock();
2382 err1:
2383 	nf_conntrack_free(ct);
2384 	return ERR_PTR(err);
2385 }
2386 
2387 static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
2388 				   struct sk_buff *skb,
2389 				   const struct nlmsghdr *nlh,
2390 				   const struct nlattr * const cda[],
2391 				   struct netlink_ext_ack *extack)
2392 {
2393 	struct nf_conntrack_tuple otuple, rtuple;
2394 	struct nf_conntrack_tuple_hash *h = NULL;
2395 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2396 	struct nf_conn *ct;
2397 	u_int8_t u3 = nfmsg->nfgen_family;
2398 	struct nf_conntrack_zone zone;
2399 	int err;
2400 
2401 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
2402 	if (err < 0)
2403 		return err;
2404 
2405 	if (cda[CTA_TUPLE_ORIG]) {
2406 		err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG,
2407 					    u3, &zone);
2408 		if (err < 0)
2409 			return err;
2410 	}
2411 
2412 	if (cda[CTA_TUPLE_REPLY]) {
2413 		err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY,
2414 					    u3, &zone);
2415 		if (err < 0)
2416 			return err;
2417 	}
2418 
2419 	if (cda[CTA_TUPLE_ORIG])
2420 		h = nf_conntrack_find_get(net, &zone, &otuple);
2421 	else if (cda[CTA_TUPLE_REPLY])
2422 		h = nf_conntrack_find_get(net, &zone, &rtuple);
2423 
2424 	if (h == NULL) {
2425 		err = -ENOENT;
2426 		if (nlh->nlmsg_flags & NLM_F_CREATE) {
2427 			enum ip_conntrack_events events;
2428 
2429 			if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
2430 				return -EINVAL;
2431 			if (otuple.dst.protonum != rtuple.dst.protonum)
2432 				return -EINVAL;
2433 
2434 			ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
2435 							&rtuple, u3);
2436 			if (IS_ERR(ct))
2437 				return PTR_ERR(ct);
2438 
2439 			err = 0;
2440 			if (test_bit(IPS_EXPECTED_BIT, &ct->status))
2441 				events = 1 << IPCT_RELATED;
2442 			else
2443 				events = 1 << IPCT_NEW;
2444 
2445 			if (cda[CTA_LABELS] &&
2446 			    ctnetlink_attach_labels(ct, cda) == 0)
2447 				events |= (1 << IPCT_LABEL);
2448 
2449 			nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
2450 						      (1 << IPCT_ASSURED) |
2451 						      (1 << IPCT_HELPER) |
2452 						      (1 << IPCT_PROTOINFO) |
2453 						      (1 << IPCT_SEQADJ) |
2454 						      (1 << IPCT_MARK) |
2455 						      (1 << IPCT_SYNPROXY) |
2456 						      events,
2457 						      ct, NETLINK_CB(skb).portid,
2458 						      nlmsg_report(nlh));
2459 			nf_ct_put(ct);
2460 		}
2461 
2462 		return err;
2463 	}
2464 	/* implicit 'else' */
2465 
2466 	err = -EEXIST;
2467 	ct = nf_ct_tuplehash_to_ctrack(h);
2468 	if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
2469 		err = ctnetlink_change_conntrack(ct, cda);
2470 		if (err == 0) {
2471 			nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
2472 						      (1 << IPCT_ASSURED) |
2473 						      (1 << IPCT_HELPER) |
2474 						      (1 << IPCT_LABEL) |
2475 						      (1 << IPCT_PROTOINFO) |
2476 						      (1 << IPCT_SEQADJ) |
2477 						      (1 << IPCT_MARK) |
2478 						      (1 << IPCT_SYNPROXY),
2479 						      ct, NETLINK_CB(skb).portid,
2480 						      nlmsg_report(nlh));
2481 		}
2482 	}
2483 
2484 	nf_ct_put(ct);
2485 	return err;
2486 }
2487 
2488 static int
2489 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2490 				__u16 cpu, const struct ip_conntrack_stat *st)
2491 {
2492 	struct nlmsghdr *nlh;
2493 	struct nfgenmsg *nfmsg;
2494 	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2495 
2496 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
2497 			      IPCTNL_MSG_CT_GET_STATS_CPU);
2498 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2499 	if (nlh == NULL)
2500 		goto nlmsg_failure;
2501 
2502 	nfmsg = nlmsg_data(nlh);
2503 	nfmsg->nfgen_family = AF_UNSPEC;
2504 	nfmsg->version      = NFNETLINK_V0;
2505 	nfmsg->res_id	    = htons(cpu);
2506 
2507 	if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
2508 	    nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
2509 	    nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
2510 	    nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
2511 				htonl(st->insert_failed)) ||
2512 	    nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
2513 	    nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
2514 	    nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
2515 	    nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
2516 				htonl(st->search_restart)) ||
2517 	    nla_put_be32(skb, CTA_STATS_CLASH_RESOLVE,
2518 				htonl(st->clash_resolve)))
2519 		goto nla_put_failure;
2520 
2521 	nlmsg_end(skb, nlh);
2522 	return skb->len;
2523 
2524 nla_put_failure:
2525 nlmsg_failure:
2526 	nlmsg_cancel(skb, nlh);
2527 	return -1;
2528 }
2529 
2530 static int
2531 ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
2532 {
2533 	int cpu;
2534 	struct net *net = sock_net(skb->sk);
2535 
2536 	if (cb->args[0] == nr_cpu_ids)
2537 		return 0;
2538 
2539 	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
2540 		const struct ip_conntrack_stat *st;
2541 
2542 		if (!cpu_possible(cpu))
2543 			continue;
2544 
2545 		st = per_cpu_ptr(net->ct.stat, cpu);
2546 		if (ctnetlink_ct_stat_cpu_fill_info(skb,
2547 						    NETLINK_CB(cb->skb).portid,
2548 						    cb->nlh->nlmsg_seq,
2549 						    cpu, st) < 0)
2550 				break;
2551 	}
2552 	cb->args[0] = cpu;
2553 
2554 	return skb->len;
2555 }
2556 
2557 static int ctnetlink_stat_ct_cpu(struct net *net, struct sock *ctnl,
2558 				 struct sk_buff *skb,
2559 				 const struct nlmsghdr *nlh,
2560 				 const struct nlattr * const cda[],
2561 				 struct netlink_ext_ack *extack)
2562 {
2563 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
2564 		struct netlink_dump_control c = {
2565 			.dump = ctnetlink_ct_stat_cpu_dump,
2566 		};
2567 		return netlink_dump_start(ctnl, skb, nlh, &c);
2568 	}
2569 
2570 	return 0;
2571 }
2572 
2573 static int
2574 ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
2575 			    struct net *net)
2576 {
2577 	struct nlmsghdr *nlh;
2578 	struct nfgenmsg *nfmsg;
2579 	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2580 	unsigned int nr_conntracks = atomic_read(&net->ct.count);
2581 
2582 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS);
2583 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2584 	if (nlh == NULL)
2585 		goto nlmsg_failure;
2586 
2587 	nfmsg = nlmsg_data(nlh);
2588 	nfmsg->nfgen_family = AF_UNSPEC;
2589 	nfmsg->version      = NFNETLINK_V0;
2590 	nfmsg->res_id	    = 0;
2591 
2592 	if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
2593 		goto nla_put_failure;
2594 
2595 	if (nla_put_be32(skb, CTA_STATS_GLOBAL_MAX_ENTRIES, htonl(nf_conntrack_max)))
2596 		goto nla_put_failure;
2597 
2598 	nlmsg_end(skb, nlh);
2599 	return skb->len;
2600 
2601 nla_put_failure:
2602 nlmsg_failure:
2603 	nlmsg_cancel(skb, nlh);
2604 	return -1;
2605 }
2606 
2607 static int ctnetlink_stat_ct(struct net *net, struct sock *ctnl,
2608 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
2609 			     const struct nlattr * const cda[],
2610 			     struct netlink_ext_ack *extack)
2611 {
2612 	struct sk_buff *skb2;
2613 	int err;
2614 
2615 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2616 	if (skb2 == NULL)
2617 		return -ENOMEM;
2618 
2619 	err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
2620 					  nlh->nlmsg_seq,
2621 					  NFNL_MSG_TYPE(nlh->nlmsg_type),
2622 					  sock_net(skb->sk));
2623 	if (err <= 0)
2624 		goto free;
2625 
2626 	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2627 	if (err < 0)
2628 		goto out;
2629 
2630 	return 0;
2631 
2632 free:
2633 	kfree_skb(skb2);
2634 out:
2635 	/* this avoids a loop in nfnetlink. */
2636 	return err == -EAGAIN ? -ENOBUFS : err;
2637 }
2638 
2639 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2640 	[CTA_EXPECT_MASTER]	= { .type = NLA_NESTED },
2641 	[CTA_EXPECT_TUPLE]	= { .type = NLA_NESTED },
2642 	[CTA_EXPECT_MASK]	= { .type = NLA_NESTED },
2643 	[CTA_EXPECT_TIMEOUT]	= { .type = NLA_U32 },
2644 	[CTA_EXPECT_ID]		= { .type = NLA_U32 },
2645 	[CTA_EXPECT_HELP_NAME]	= { .type = NLA_NUL_STRING,
2646 				    .len = NF_CT_HELPER_NAME_LEN - 1 },
2647 	[CTA_EXPECT_ZONE]	= { .type = NLA_U16 },
2648 	[CTA_EXPECT_FLAGS]	= { .type = NLA_U32 },
2649 	[CTA_EXPECT_CLASS]	= { .type = NLA_U32 },
2650 	[CTA_EXPECT_NAT]	= { .type = NLA_NESTED },
2651 	[CTA_EXPECT_FN]		= { .type = NLA_NUL_STRING },
2652 };
2653 
2654 static struct nf_conntrack_expect *
2655 ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
2656 		       struct nf_conntrack_helper *helper,
2657 		       struct nf_conntrack_tuple *tuple,
2658 		       struct nf_conntrack_tuple *mask);
2659 
2660 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
2661 static size_t
2662 ctnetlink_glue_build_size(const struct nf_conn *ct)
2663 {
2664 	return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
2665 	       + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
2666 	       + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
2667 	       + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
2668 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
2669 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
2670 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
2671 	       + nla_total_size(0) /* CTA_PROTOINFO */
2672 	       + nla_total_size(0) /* CTA_HELP */
2673 	       + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
2674 	       + ctnetlink_secctx_size(ct)
2675 #if IS_ENABLED(CONFIG_NF_NAT)
2676 	       + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
2677 	       + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
2678 #endif
2679 #ifdef CONFIG_NF_CONNTRACK_MARK
2680 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2681 #endif
2682 #ifdef CONFIG_NF_CONNTRACK_ZONES
2683 	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
2684 #endif
2685 	       + ctnetlink_proto_size(ct)
2686 	       ;
2687 }
2688 
2689 static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
2690 {
2691 	const struct nf_conntrack_zone *zone;
2692 	struct nlattr *nest_parms;
2693 
2694 	zone = nf_ct_zone(ct);
2695 
2696 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
2697 	if (!nest_parms)
2698 		goto nla_put_failure;
2699 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
2700 		goto nla_put_failure;
2701 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
2702 				   NF_CT_ZONE_DIR_ORIG) < 0)
2703 		goto nla_put_failure;
2704 	nla_nest_end(skb, nest_parms);
2705 
2706 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
2707 	if (!nest_parms)
2708 		goto nla_put_failure;
2709 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
2710 		goto nla_put_failure;
2711 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
2712 				   NF_CT_ZONE_DIR_REPL) < 0)
2713 		goto nla_put_failure;
2714 	nla_nest_end(skb, nest_parms);
2715 
2716 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
2717 				   NF_CT_DEFAULT_ZONE_DIR) < 0)
2718 		goto nla_put_failure;
2719 
2720 	if (ctnetlink_dump_id(skb, ct) < 0)
2721 		goto nla_put_failure;
2722 
2723 	if (ctnetlink_dump_status(skb, ct) < 0)
2724 		goto nla_put_failure;
2725 
2726 	if (ctnetlink_dump_timeout(skb, ct, false) < 0)
2727 		goto nla_put_failure;
2728 
2729 	if (ctnetlink_dump_protoinfo(skb, ct, false) < 0)
2730 		goto nla_put_failure;
2731 
2732 	if (ctnetlink_dump_helpinfo(skb, ct) < 0)
2733 		goto nla_put_failure;
2734 
2735 #ifdef CONFIG_NF_CONNTRACK_SECMARK
2736 	if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
2737 		goto nla_put_failure;
2738 #endif
2739 	if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
2740 		goto nla_put_failure;
2741 
2742 	if ((ct->status & IPS_SEQ_ADJUST) &&
2743 	    ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
2744 		goto nla_put_failure;
2745 
2746 	if (ctnetlink_dump_ct_synproxy(skb, ct) < 0)
2747 		goto nla_put_failure;
2748 
2749 #ifdef CONFIG_NF_CONNTRACK_MARK
2750 	if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
2751 		goto nla_put_failure;
2752 #endif
2753 	if (ctnetlink_dump_labels(skb, ct) < 0)
2754 		goto nla_put_failure;
2755 	return 0;
2756 
2757 nla_put_failure:
2758 	return -ENOSPC;
2759 }
2760 
2761 static int
2762 ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct,
2763 		     enum ip_conntrack_info ctinfo,
2764 		     u_int16_t ct_attr, u_int16_t ct_info_attr)
2765 {
2766 	struct nlattr *nest_parms;
2767 
2768 	nest_parms = nla_nest_start(skb, ct_attr);
2769 	if (!nest_parms)
2770 		goto nla_put_failure;
2771 
2772 	if (__ctnetlink_glue_build(skb, ct) < 0)
2773 		goto nla_put_failure;
2774 
2775 	nla_nest_end(skb, nest_parms);
2776 
2777 	if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo)))
2778 		goto nla_put_failure;
2779 
2780 	return 0;
2781 
2782 nla_put_failure:
2783 	return -ENOSPC;
2784 }
2785 
2786 static int
2787 ctnetlink_update_status(struct nf_conn *ct, const struct nlattr * const cda[])
2788 {
2789 	unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
2790 	unsigned long d = ct->status ^ status;
2791 
2792 	if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
2793 		/* SEEN_REPLY bit can only be set */
2794 		return -EBUSY;
2795 
2796 	if (d & IPS_ASSURED && !(status & IPS_ASSURED))
2797 		/* ASSURED bit can only be set */
2798 		return -EBUSY;
2799 
2800 	/* This check is less strict than ctnetlink_change_status()
2801 	 * because callers often flip IPS_EXPECTED bits when sending
2802 	 * an NFQA_CT attribute to the kernel.  So ignore the
2803 	 * unchangeable bits but do not error out. Also user programs
2804 	 * are allowed to clear the bits that they are allowed to change.
2805 	 */
2806 	__ctnetlink_change_status(ct, status, ~status);
2807 	return 0;
2808 }
2809 
2810 static int
2811 ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
2812 {
2813 	int err;
2814 
2815 	if (cda[CTA_TIMEOUT]) {
2816 		err = ctnetlink_change_timeout(ct, cda);
2817 		if (err < 0)
2818 			return err;
2819 	}
2820 	if (cda[CTA_STATUS]) {
2821 		err = ctnetlink_update_status(ct, cda);
2822 		if (err < 0)
2823 			return err;
2824 	}
2825 	if (cda[CTA_HELP]) {
2826 		err = ctnetlink_change_helper(ct, cda);
2827 		if (err < 0)
2828 			return err;
2829 	}
2830 	if (cda[CTA_LABELS]) {
2831 		err = ctnetlink_attach_labels(ct, cda);
2832 		if (err < 0)
2833 			return err;
2834 	}
2835 #if defined(CONFIG_NF_CONNTRACK_MARK)
2836 	if (cda[CTA_MARK]) {
2837 		ctnetlink_change_mark(ct, cda);
2838 	}
2839 #endif
2840 	return 0;
2841 }
2842 
2843 static int
2844 ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct)
2845 {
2846 	struct nlattr *cda[CTA_MAX+1];
2847 	int ret;
2848 
2849 	ret = nla_parse_nested_deprecated(cda, CTA_MAX, attr, ct_nla_policy,
2850 					  NULL);
2851 	if (ret < 0)
2852 		return ret;
2853 
2854 	return ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct);
2855 }
2856 
2857 static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda,
2858 				    const struct nf_conn *ct,
2859 				    struct nf_conntrack_tuple *tuple,
2860 				    struct nf_conntrack_tuple *mask)
2861 {
2862 	int err;
2863 
2864 	err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
2865 				    nf_ct_l3num(ct), NULL);
2866 	if (err < 0)
2867 		return err;
2868 
2869 	return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
2870 				     nf_ct_l3num(ct), NULL);
2871 }
2872 
2873 static int
2874 ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2875 			     u32 portid, u32 report)
2876 {
2877 	struct nlattr *cda[CTA_EXPECT_MAX+1];
2878 	struct nf_conntrack_tuple tuple, mask;
2879 	struct nf_conntrack_helper *helper = NULL;
2880 	struct nf_conntrack_expect *exp;
2881 	int err;
2882 
2883 	err = nla_parse_nested_deprecated(cda, CTA_EXPECT_MAX, attr,
2884 					  exp_nla_policy, NULL);
2885 	if (err < 0)
2886 		return err;
2887 
2888 	err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda,
2889 				       ct, &tuple, &mask);
2890 	if (err < 0)
2891 		return err;
2892 
2893 	if (cda[CTA_EXPECT_HELP_NAME]) {
2894 		const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2895 
2896 		helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2897 						    nf_ct_protonum(ct));
2898 		if (helper == NULL)
2899 			return -EOPNOTSUPP;
2900 	}
2901 
2902 	exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
2903 				     helper, &tuple, &mask);
2904 	if (IS_ERR(exp))
2905 		return PTR_ERR(exp);
2906 
2907 	err = nf_ct_expect_related_report(exp, portid, report, 0);
2908 	nf_ct_expect_put(exp);
2909 	return err;
2910 }
2911 
2912 static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
2913 				  enum ip_conntrack_info ctinfo, int diff)
2914 {
2915 	if (!(ct->status & IPS_NAT_MASK))
2916 		return;
2917 
2918 	nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff);
2919 }
2920 
2921 static struct nfnl_ct_hook ctnetlink_glue_hook = {
2922 	.build_size	= ctnetlink_glue_build_size,
2923 	.build		= ctnetlink_glue_build,
2924 	.parse		= ctnetlink_glue_parse,
2925 	.attach_expect	= ctnetlink_glue_attach_expect,
2926 	.seq_adjust	= ctnetlink_glue_seqadj,
2927 };
2928 #endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */
2929 
2930 /***********************************************************************
2931  * EXPECT
2932  ***********************************************************************/
2933 
2934 static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2935 				    const struct nf_conntrack_tuple *tuple,
2936 				    u32 type)
2937 {
2938 	struct nlattr *nest_parms;
2939 
2940 	nest_parms = nla_nest_start(skb, type);
2941 	if (!nest_parms)
2942 		goto nla_put_failure;
2943 	if (ctnetlink_dump_tuples(skb, tuple) < 0)
2944 		goto nla_put_failure;
2945 	nla_nest_end(skb, nest_parms);
2946 
2947 	return 0;
2948 
2949 nla_put_failure:
2950 	return -1;
2951 }
2952 
2953 static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
2954 				   const struct nf_conntrack_tuple *tuple,
2955 				   const struct nf_conntrack_tuple_mask *mask)
2956 {
2957 	const struct nf_conntrack_l4proto *l4proto;
2958 	struct nf_conntrack_tuple m;
2959 	struct nlattr *nest_parms;
2960 	int ret;
2961 
2962 	memset(&m, 0xFF, sizeof(m));
2963 	memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
2964 	m.src.u.all = mask->src.u.all;
2965 	m.dst.protonum = tuple->dst.protonum;
2966 
2967 	nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
2968 	if (!nest_parms)
2969 		goto nla_put_failure;
2970 
2971 	rcu_read_lock();
2972 	ret = ctnetlink_dump_tuples_ip(skb, &m);
2973 	if (ret >= 0) {
2974 		l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
2975 		ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
2976 	}
2977 	rcu_read_unlock();
2978 
2979 	if (unlikely(ret < 0))
2980 		goto nla_put_failure;
2981 
2982 	nla_nest_end(skb, nest_parms);
2983 
2984 	return 0;
2985 
2986 nla_put_failure:
2987 	return -1;
2988 }
2989 
2990 static const union nf_inet_addr any_addr;
2991 
2992 static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
2993 {
2994 	static __read_mostly siphash_key_t exp_id_seed;
2995 	unsigned long a, b, c, d;
2996 
2997 	net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
2998 
2999 	a = (unsigned long)exp;
3000 	b = (unsigned long)exp->helper;
3001 	c = (unsigned long)exp->master;
3002 	d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
3003 
3004 #ifdef CONFIG_64BIT
3005 	return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
3006 #else
3007 	return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
3008 #endif
3009 }
3010 
3011 static int
3012 ctnetlink_exp_dump_expect(struct sk_buff *skb,
3013 			  const struct nf_conntrack_expect *exp)
3014 {
3015 	struct nf_conn *master = exp->master;
3016 	long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
3017 	struct nf_conn_help *help;
3018 #if IS_ENABLED(CONFIG_NF_NAT)
3019 	struct nlattr *nest_parms;
3020 	struct nf_conntrack_tuple nat_tuple = {};
3021 #endif
3022 	struct nf_ct_helper_expectfn *expfn;
3023 
3024 	if (timeout < 0)
3025 		timeout = 0;
3026 
3027 	if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
3028 		goto nla_put_failure;
3029 	if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
3030 		goto nla_put_failure;
3031 	if (ctnetlink_exp_dump_tuple(skb,
3032 				 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
3033 				 CTA_EXPECT_MASTER) < 0)
3034 		goto nla_put_failure;
3035 
3036 #if IS_ENABLED(CONFIG_NF_NAT)
3037 	if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
3038 	    exp->saved_proto.all) {
3039 		nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT);
3040 		if (!nest_parms)
3041 			goto nla_put_failure;
3042 
3043 		if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
3044 			goto nla_put_failure;
3045 
3046 		nat_tuple.src.l3num = nf_ct_l3num(master);
3047 		nat_tuple.src.u3 = exp->saved_addr;
3048 		nat_tuple.dst.protonum = nf_ct_protonum(master);
3049 		nat_tuple.src.u = exp->saved_proto;
3050 
3051 		if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
3052 						CTA_EXPECT_NAT_TUPLE) < 0)
3053 	                goto nla_put_failure;
3054 	        nla_nest_end(skb, nest_parms);
3055 	}
3056 #endif
3057 	if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
3058 	    nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
3059 	    nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
3060 	    nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
3061 		goto nla_put_failure;
3062 	help = nfct_help(master);
3063 	if (help) {
3064 		struct nf_conntrack_helper *helper;
3065 
3066 		helper = rcu_dereference(help->helper);
3067 		if (helper &&
3068 		    nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
3069 			goto nla_put_failure;
3070 	}
3071 	expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
3072 	if (expfn != NULL &&
3073 	    nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
3074 		goto nla_put_failure;
3075 
3076 	return 0;
3077 
3078 nla_put_failure:
3079 	return -1;
3080 }
3081 
3082 static int
3083 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
3084 			int event, const struct nf_conntrack_expect *exp)
3085 {
3086 	struct nlmsghdr *nlh;
3087 	struct nfgenmsg *nfmsg;
3088 	unsigned int flags = portid ? NLM_F_MULTI : 0;
3089 
3090 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event);
3091 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
3092 	if (nlh == NULL)
3093 		goto nlmsg_failure;
3094 
3095 	nfmsg = nlmsg_data(nlh);
3096 	nfmsg->nfgen_family = exp->tuple.src.l3num;
3097 	nfmsg->version	    = NFNETLINK_V0;
3098 	nfmsg->res_id	    = 0;
3099 
3100 	if (ctnetlink_exp_dump_expect(skb, exp) < 0)
3101 		goto nla_put_failure;
3102 
3103 	nlmsg_end(skb, nlh);
3104 	return skb->len;
3105 
3106 nlmsg_failure:
3107 nla_put_failure:
3108 	nlmsg_cancel(skb, nlh);
3109 	return -1;
3110 }
3111 
3112 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3113 static int
3114 ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
3115 {
3116 	struct nf_conntrack_expect *exp = item->exp;
3117 	struct net *net = nf_ct_exp_net(exp);
3118 	struct nlmsghdr *nlh;
3119 	struct nfgenmsg *nfmsg;
3120 	struct sk_buff *skb;
3121 	unsigned int type, group;
3122 	int flags = 0;
3123 
3124 	if (events & (1 << IPEXP_DESTROY)) {
3125 		type = IPCTNL_MSG_EXP_DELETE;
3126 		group = NFNLGRP_CONNTRACK_EXP_DESTROY;
3127 	} else if (events & (1 << IPEXP_NEW)) {
3128 		type = IPCTNL_MSG_EXP_NEW;
3129 		flags = NLM_F_CREATE|NLM_F_EXCL;
3130 		group = NFNLGRP_CONNTRACK_EXP_NEW;
3131 	} else
3132 		return 0;
3133 
3134 	if (!item->report && !nfnetlink_has_listeners(net, group))
3135 		return 0;
3136 
3137 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3138 	if (skb == NULL)
3139 		goto errout;
3140 
3141 	type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, type);
3142 	nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
3143 	if (nlh == NULL)
3144 		goto nlmsg_failure;
3145 
3146 	nfmsg = nlmsg_data(nlh);
3147 	nfmsg->nfgen_family = exp->tuple.src.l3num;
3148 	nfmsg->version	    = NFNETLINK_V0;
3149 	nfmsg->res_id	    = 0;
3150 
3151 	if (ctnetlink_exp_dump_expect(skb, exp) < 0)
3152 		goto nla_put_failure;
3153 
3154 	nlmsg_end(skb, nlh);
3155 	nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
3156 	return 0;
3157 
3158 nla_put_failure:
3159 	nlmsg_cancel(skb, nlh);
3160 nlmsg_failure:
3161 	kfree_skb(skb);
3162 errout:
3163 	nfnetlink_set_err(net, 0, 0, -ENOBUFS);
3164 	return 0;
3165 }
3166 #endif
3167 static int ctnetlink_exp_done(struct netlink_callback *cb)
3168 {
3169 	if (cb->args[1])
3170 		nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
3171 	return 0;
3172 }
3173 
3174 static int
3175 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
3176 {
3177 	struct net *net = sock_net(skb->sk);
3178 	struct nf_conntrack_expect *exp, *last;
3179 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
3180 	u_int8_t l3proto = nfmsg->nfgen_family;
3181 
3182 	rcu_read_lock();
3183 	last = (struct nf_conntrack_expect *)cb->args[1];
3184 	for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
3185 restart:
3186 		hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
3187 					 hnode) {
3188 			if (l3proto && exp->tuple.src.l3num != l3proto)
3189 				continue;
3190 
3191 			if (!net_eq(nf_ct_net(exp->master), net))
3192 				continue;
3193 
3194 			if (cb->args[1]) {
3195 				if (exp != last)
3196 					continue;
3197 				cb->args[1] = 0;
3198 			}
3199 			if (ctnetlink_exp_fill_info(skb,
3200 						    NETLINK_CB(cb->skb).portid,
3201 						    cb->nlh->nlmsg_seq,
3202 						    IPCTNL_MSG_EXP_NEW,
3203 						    exp) < 0) {
3204 				if (!refcount_inc_not_zero(&exp->use))
3205 					continue;
3206 				cb->args[1] = (unsigned long)exp;
3207 				goto out;
3208 			}
3209 		}
3210 		if (cb->args[1]) {
3211 			cb->args[1] = 0;
3212 			goto restart;
3213 		}
3214 	}
3215 out:
3216 	rcu_read_unlock();
3217 	if (last)
3218 		nf_ct_expect_put(last);
3219 
3220 	return skb->len;
3221 }
3222 
3223 static int
3224 ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
3225 {
3226 	struct nf_conntrack_expect *exp, *last;
3227 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
3228 	struct nf_conn *ct = cb->data;
3229 	struct nf_conn_help *help = nfct_help(ct);
3230 	u_int8_t l3proto = nfmsg->nfgen_family;
3231 
3232 	if (cb->args[0])
3233 		return 0;
3234 
3235 	rcu_read_lock();
3236 	last = (struct nf_conntrack_expect *)cb->args[1];
3237 restart:
3238 	hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
3239 		if (l3proto && exp->tuple.src.l3num != l3proto)
3240 			continue;
3241 		if (cb->args[1]) {
3242 			if (exp != last)
3243 				continue;
3244 			cb->args[1] = 0;
3245 		}
3246 		if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
3247 					    cb->nlh->nlmsg_seq,
3248 					    IPCTNL_MSG_EXP_NEW,
3249 					    exp) < 0) {
3250 			if (!refcount_inc_not_zero(&exp->use))
3251 				continue;
3252 			cb->args[1] = (unsigned long)exp;
3253 			goto out;
3254 		}
3255 	}
3256 	if (cb->args[1]) {
3257 		cb->args[1] = 0;
3258 		goto restart;
3259 	}
3260 	cb->args[0] = 1;
3261 out:
3262 	rcu_read_unlock();
3263 	if (last)
3264 		nf_ct_expect_put(last);
3265 
3266 	return skb->len;
3267 }
3268 
3269 static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
3270 				 struct sk_buff *skb,
3271 				 const struct nlmsghdr *nlh,
3272 				 const struct nlattr * const cda[],
3273 				 struct netlink_ext_ack *extack)
3274 {
3275 	int err;
3276 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3277 	u_int8_t u3 = nfmsg->nfgen_family;
3278 	struct nf_conntrack_tuple tuple;
3279 	struct nf_conntrack_tuple_hash *h;
3280 	struct nf_conn *ct;
3281 	struct nf_conntrack_zone zone;
3282 	struct netlink_dump_control c = {
3283 		.dump = ctnetlink_exp_ct_dump_table,
3284 		.done = ctnetlink_exp_done,
3285 	};
3286 
3287 	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
3288 				    u3, NULL);
3289 	if (err < 0)
3290 		return err;
3291 
3292 	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3293 	if (err < 0)
3294 		return err;
3295 
3296 	h = nf_conntrack_find_get(net, &zone, &tuple);
3297 	if (!h)
3298 		return -ENOENT;
3299 
3300 	ct = nf_ct_tuplehash_to_ctrack(h);
3301 	/* No expectation linked to this connection tracking. */
3302 	if (!nfct_help(ct)) {
3303 		nf_ct_put(ct);
3304 		return 0;
3305 	}
3306 
3307 	c.data = ct;
3308 
3309 	err = netlink_dump_start(ctnl, skb, nlh, &c);
3310 	nf_ct_put(ct);
3311 
3312 	return err;
3313 }
3314 
3315 static int ctnetlink_get_expect(struct net *net, struct sock *ctnl,
3316 				struct sk_buff *skb, const struct nlmsghdr *nlh,
3317 				const struct nlattr * const cda[],
3318 				struct netlink_ext_ack *extack)
3319 {
3320 	struct nf_conntrack_tuple tuple;
3321 	struct nf_conntrack_expect *exp;
3322 	struct sk_buff *skb2;
3323 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3324 	u_int8_t u3 = nfmsg->nfgen_family;
3325 	struct nf_conntrack_zone zone;
3326 	int err;
3327 
3328 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
3329 		if (cda[CTA_EXPECT_MASTER])
3330 			return ctnetlink_dump_exp_ct(net, ctnl, skb, nlh, cda,
3331 						     extack);
3332 		else {
3333 			struct netlink_dump_control c = {
3334 				.dump = ctnetlink_exp_dump_table,
3335 				.done = ctnetlink_exp_done,
3336 			};
3337 			return netlink_dump_start(ctnl, skb, nlh, &c);
3338 		}
3339 	}
3340 
3341 	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3342 	if (err < 0)
3343 		return err;
3344 
3345 	if (cda[CTA_EXPECT_TUPLE])
3346 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3347 					    u3, NULL);
3348 	else if (cda[CTA_EXPECT_MASTER])
3349 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
3350 					    u3, NULL);
3351 	else
3352 		return -EINVAL;
3353 
3354 	if (err < 0)
3355 		return err;
3356 
3357 	exp = nf_ct_expect_find_get(net, &zone, &tuple);
3358 	if (!exp)
3359 		return -ENOENT;
3360 
3361 	if (cda[CTA_EXPECT_ID]) {
3362 		__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
3363 
3364 		if (id != nf_expect_get_id(exp)) {
3365 			nf_ct_expect_put(exp);
3366 			return -ENOENT;
3367 		}
3368 	}
3369 
3370 	err = -ENOMEM;
3371 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3372 	if (skb2 == NULL) {
3373 		nf_ct_expect_put(exp);
3374 		goto out;
3375 	}
3376 
3377 	rcu_read_lock();
3378 	err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
3379 				      nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
3380 	rcu_read_unlock();
3381 	nf_ct_expect_put(exp);
3382 	if (err <= 0)
3383 		goto free;
3384 
3385 	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
3386 	if (err < 0)
3387 		goto out;
3388 
3389 	return 0;
3390 
3391 free:
3392 	kfree_skb(skb2);
3393 out:
3394 	/* this avoids a loop in nfnetlink. */
3395 	return err == -EAGAIN ? -ENOBUFS : err;
3396 }
3397 
3398 static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data)
3399 {
3400 	const struct nf_conn_help *m_help;
3401 	const char *name = data;
3402 
3403 	m_help = nfct_help(exp->master);
3404 
3405 	return strcmp(m_help->helper->name, name) == 0;
3406 }
3407 
3408 static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data)
3409 {
3410 	return true;
3411 }
3412 
3413 static int ctnetlink_del_expect(struct net *net, struct sock *ctnl,
3414 				struct sk_buff *skb, const struct nlmsghdr *nlh,
3415 				const struct nlattr * const cda[],
3416 				struct netlink_ext_ack *extack)
3417 {
3418 	struct nf_conntrack_expect *exp;
3419 	struct nf_conntrack_tuple tuple;
3420 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3421 	u_int8_t u3 = nfmsg->nfgen_family;
3422 	struct nf_conntrack_zone zone;
3423 	int err;
3424 
3425 	if (cda[CTA_EXPECT_TUPLE]) {
3426 		/* delete a single expect by tuple */
3427 		err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3428 		if (err < 0)
3429 			return err;
3430 
3431 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3432 					    u3, NULL);
3433 		if (err < 0)
3434 			return err;
3435 
3436 		/* bump usage count to 2 */
3437 		exp = nf_ct_expect_find_get(net, &zone, &tuple);
3438 		if (!exp)
3439 			return -ENOENT;
3440 
3441 		if (cda[CTA_EXPECT_ID]) {
3442 			__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
3443 			if (ntohl(id) != (u32)(unsigned long)exp) {
3444 				nf_ct_expect_put(exp);
3445 				return -ENOENT;
3446 			}
3447 		}
3448 
3449 		/* after list removal, usage count == 1 */
3450 		spin_lock_bh(&nf_conntrack_expect_lock);
3451 		if (del_timer(&exp->timeout)) {
3452 			nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
3453 						   nlmsg_report(nlh));
3454 			nf_ct_expect_put(exp);
3455 		}
3456 		spin_unlock_bh(&nf_conntrack_expect_lock);
3457 		/* have to put what we 'get' above.
3458 		 * after this line usage count == 0 */
3459 		nf_ct_expect_put(exp);
3460 	} else if (cda[CTA_EXPECT_HELP_NAME]) {
3461 		char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3462 
3463 		nf_ct_expect_iterate_net(net, expect_iter_name, name,
3464 					 NETLINK_CB(skb).portid,
3465 					 nlmsg_report(nlh));
3466 	} else {
3467 		/* This basically means we have to flush everything*/
3468 		nf_ct_expect_iterate_net(net, expect_iter_all, NULL,
3469 					 NETLINK_CB(skb).portid,
3470 					 nlmsg_report(nlh));
3471 	}
3472 
3473 	return 0;
3474 }
3475 static int
3476 ctnetlink_change_expect(struct nf_conntrack_expect *x,
3477 			const struct nlattr * const cda[])
3478 {
3479 	if (cda[CTA_EXPECT_TIMEOUT]) {
3480 		if (!del_timer(&x->timeout))
3481 			return -ETIME;
3482 
3483 		x->timeout.expires = jiffies +
3484 			ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
3485 		add_timer(&x->timeout);
3486 	}
3487 	return 0;
3488 }
3489 
3490 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
3491 	[CTA_EXPECT_NAT_DIR]	= { .type = NLA_U32 },
3492 	[CTA_EXPECT_NAT_TUPLE]	= { .type = NLA_NESTED },
3493 };
3494 
3495 static int
3496 ctnetlink_parse_expect_nat(const struct nlattr *attr,
3497 			   struct nf_conntrack_expect *exp,
3498 			   u_int8_t u3)
3499 {
3500 #if IS_ENABLED(CONFIG_NF_NAT)
3501 	struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
3502 	struct nf_conntrack_tuple nat_tuple = {};
3503 	int err;
3504 
3505 	err = nla_parse_nested_deprecated(tb, CTA_EXPECT_NAT_MAX, attr,
3506 					  exp_nat_nla_policy, NULL);
3507 	if (err < 0)
3508 		return err;
3509 
3510 	if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
3511 		return -EINVAL;
3512 
3513 	err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
3514 				    &nat_tuple, CTA_EXPECT_NAT_TUPLE,
3515 				    u3, NULL);
3516 	if (err < 0)
3517 		return err;
3518 
3519 	exp->saved_addr = nat_tuple.src.u3;
3520 	exp->saved_proto = nat_tuple.src.u;
3521 	exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
3522 
3523 	return 0;
3524 #else
3525 	return -EOPNOTSUPP;
3526 #endif
3527 }
3528 
3529 static struct nf_conntrack_expect *
3530 ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
3531 		       struct nf_conntrack_helper *helper,
3532 		       struct nf_conntrack_tuple *tuple,
3533 		       struct nf_conntrack_tuple *mask)
3534 {
3535 	u_int32_t class = 0;
3536 	struct nf_conntrack_expect *exp;
3537 	struct nf_conn_help *help;
3538 	int err;
3539 
3540 	help = nfct_help(ct);
3541 	if (!help)
3542 		return ERR_PTR(-EOPNOTSUPP);
3543 
3544 	if (cda[CTA_EXPECT_CLASS] && helper) {
3545 		class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
3546 		if (class > helper->expect_class_max)
3547 			return ERR_PTR(-EINVAL);
3548 	}
3549 	exp = nf_ct_expect_alloc(ct);
3550 	if (!exp)
3551 		return ERR_PTR(-ENOMEM);
3552 
3553 	if (cda[CTA_EXPECT_FLAGS]) {
3554 		exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
3555 		exp->flags &= ~NF_CT_EXPECT_USERSPACE;
3556 	} else {
3557 		exp->flags = 0;
3558 	}
3559 	if (cda[CTA_EXPECT_FN]) {
3560 		const char *name = nla_data(cda[CTA_EXPECT_FN]);
3561 		struct nf_ct_helper_expectfn *expfn;
3562 
3563 		expfn = nf_ct_helper_expectfn_find_by_name(name);
3564 		if (expfn == NULL) {
3565 			err = -EINVAL;
3566 			goto err_out;
3567 		}
3568 		exp->expectfn = expfn->expectfn;
3569 	} else
3570 		exp->expectfn = NULL;
3571 
3572 	exp->class = class;
3573 	exp->master = ct;
3574 	exp->helper = helper;
3575 	exp->tuple = *tuple;
3576 	exp->mask.src.u3 = mask->src.u3;
3577 	exp->mask.src.u.all = mask->src.u.all;
3578 
3579 	if (cda[CTA_EXPECT_NAT]) {
3580 		err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
3581 						 exp, nf_ct_l3num(ct));
3582 		if (err < 0)
3583 			goto err_out;
3584 	}
3585 	return exp;
3586 err_out:
3587 	nf_ct_expect_put(exp);
3588 	return ERR_PTR(err);
3589 }
3590 
3591 static int
3592 ctnetlink_create_expect(struct net *net,
3593 			const struct nf_conntrack_zone *zone,
3594 			const struct nlattr * const cda[],
3595 			u_int8_t u3, u32 portid, int report)
3596 {
3597 	struct nf_conntrack_tuple tuple, mask, master_tuple;
3598 	struct nf_conntrack_tuple_hash *h = NULL;
3599 	struct nf_conntrack_helper *helper = NULL;
3600 	struct nf_conntrack_expect *exp;
3601 	struct nf_conn *ct;
3602 	int err;
3603 
3604 	/* caller guarantees that those three CTA_EXPECT_* exist */
3605 	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3606 				    u3, NULL);
3607 	if (err < 0)
3608 		return err;
3609 	err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK,
3610 				    u3, NULL);
3611 	if (err < 0)
3612 		return err;
3613 	err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER,
3614 				    u3, NULL);
3615 	if (err < 0)
3616 		return err;
3617 
3618 	/* Look for master conntrack of this expectation */
3619 	h = nf_conntrack_find_get(net, zone, &master_tuple);
3620 	if (!h)
3621 		return -ENOENT;
3622 	ct = nf_ct_tuplehash_to_ctrack(h);
3623 
3624 	rcu_read_lock();
3625 	if (cda[CTA_EXPECT_HELP_NAME]) {
3626 		const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3627 
3628 		helper = __nf_conntrack_helper_find(helpname, u3,
3629 						    nf_ct_protonum(ct));
3630 		if (helper == NULL) {
3631 			rcu_read_unlock();
3632 #ifdef CONFIG_MODULES
3633 			if (request_module("nfct-helper-%s", helpname) < 0) {
3634 				err = -EOPNOTSUPP;
3635 				goto err_ct;
3636 			}
3637 			rcu_read_lock();
3638 			helper = __nf_conntrack_helper_find(helpname, u3,
3639 							    nf_ct_protonum(ct));
3640 			if (helper) {
3641 				err = -EAGAIN;
3642 				goto err_rcu;
3643 			}
3644 			rcu_read_unlock();
3645 #endif
3646 			err = -EOPNOTSUPP;
3647 			goto err_ct;
3648 		}
3649 	}
3650 
3651 	exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
3652 	if (IS_ERR(exp)) {
3653 		err = PTR_ERR(exp);
3654 		goto err_rcu;
3655 	}
3656 
3657 	err = nf_ct_expect_related_report(exp, portid, report, 0);
3658 	nf_ct_expect_put(exp);
3659 err_rcu:
3660 	rcu_read_unlock();
3661 err_ct:
3662 	nf_ct_put(ct);
3663 	return err;
3664 }
3665 
3666 static int ctnetlink_new_expect(struct net *net, struct sock *ctnl,
3667 				struct sk_buff *skb, const struct nlmsghdr *nlh,
3668 				const struct nlattr * const cda[],
3669 				struct netlink_ext_ack *extack)
3670 {
3671 	struct nf_conntrack_tuple tuple;
3672 	struct nf_conntrack_expect *exp;
3673 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3674 	u_int8_t u3 = nfmsg->nfgen_family;
3675 	struct nf_conntrack_zone zone;
3676 	int err;
3677 
3678 	if (!cda[CTA_EXPECT_TUPLE]
3679 	    || !cda[CTA_EXPECT_MASK]
3680 	    || !cda[CTA_EXPECT_MASTER])
3681 		return -EINVAL;
3682 
3683 	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3684 	if (err < 0)
3685 		return err;
3686 
3687 	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3688 				    u3, NULL);
3689 	if (err < 0)
3690 		return err;
3691 
3692 	spin_lock_bh(&nf_conntrack_expect_lock);
3693 	exp = __nf_ct_expect_find(net, &zone, &tuple);
3694 	if (!exp) {
3695 		spin_unlock_bh(&nf_conntrack_expect_lock);
3696 		err = -ENOENT;
3697 		if (nlh->nlmsg_flags & NLM_F_CREATE) {
3698 			err = ctnetlink_create_expect(net, &zone, cda, u3,
3699 						      NETLINK_CB(skb).portid,
3700 						      nlmsg_report(nlh));
3701 		}
3702 		return err;
3703 	}
3704 
3705 	err = -EEXIST;
3706 	if (!(nlh->nlmsg_flags & NLM_F_EXCL))
3707 		err = ctnetlink_change_expect(exp, cda);
3708 	spin_unlock_bh(&nf_conntrack_expect_lock);
3709 
3710 	return err;
3711 }
3712 
3713 static int
3714 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
3715 			     const struct ip_conntrack_stat *st)
3716 {
3717 	struct nlmsghdr *nlh;
3718 	struct nfgenmsg *nfmsg;
3719 	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
3720 
3721 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
3722 			      IPCTNL_MSG_EXP_GET_STATS_CPU);
3723 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
3724 	if (nlh == NULL)
3725 		goto nlmsg_failure;
3726 
3727 	nfmsg = nlmsg_data(nlh);
3728 	nfmsg->nfgen_family = AF_UNSPEC;
3729 	nfmsg->version      = NFNETLINK_V0;
3730 	nfmsg->res_id	    = htons(cpu);
3731 
3732 	if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
3733 	    nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
3734 	    nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
3735 		goto nla_put_failure;
3736 
3737 	nlmsg_end(skb, nlh);
3738 	return skb->len;
3739 
3740 nla_put_failure:
3741 nlmsg_failure:
3742 	nlmsg_cancel(skb, nlh);
3743 	return -1;
3744 }
3745 
3746 static int
3747 ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
3748 {
3749 	int cpu;
3750 	struct net *net = sock_net(skb->sk);
3751 
3752 	if (cb->args[0] == nr_cpu_ids)
3753 		return 0;
3754 
3755 	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
3756 		const struct ip_conntrack_stat *st;
3757 
3758 		if (!cpu_possible(cpu))
3759 			continue;
3760 
3761 		st = per_cpu_ptr(net->ct.stat, cpu);
3762 		if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
3763 						 cb->nlh->nlmsg_seq,
3764 						 cpu, st) < 0)
3765 			break;
3766 	}
3767 	cb->args[0] = cpu;
3768 
3769 	return skb->len;
3770 }
3771 
3772 static int ctnetlink_stat_exp_cpu(struct net *net, struct sock *ctnl,
3773 				  struct sk_buff *skb,
3774 				  const struct nlmsghdr *nlh,
3775 				  const struct nlattr * const cda[],
3776 				  struct netlink_ext_ack *extack)
3777 {
3778 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
3779 		struct netlink_dump_control c = {
3780 			.dump = ctnetlink_exp_stat_cpu_dump,
3781 		};
3782 		return netlink_dump_start(ctnl, skb, nlh, &c);
3783 	}
3784 
3785 	return 0;
3786 }
3787 
3788 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3789 static struct nf_ct_event_notifier ctnl_notifier = {
3790 	.fcn = ctnetlink_conntrack_event,
3791 };
3792 
3793 static struct nf_exp_event_notifier ctnl_notifier_exp = {
3794 	.fcn = ctnetlink_expect_event,
3795 };
3796 #endif
3797 
3798 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
3799 	[IPCTNL_MSG_CT_NEW]		= { .call = ctnetlink_new_conntrack,
3800 					    .attr_count = CTA_MAX,
3801 					    .policy = ct_nla_policy },
3802 	[IPCTNL_MSG_CT_GET] 		= { .call = ctnetlink_get_conntrack,
3803 					    .attr_count = CTA_MAX,
3804 					    .policy = ct_nla_policy },
3805 	[IPCTNL_MSG_CT_DELETE]  	= { .call = ctnetlink_del_conntrack,
3806 					    .attr_count = CTA_MAX,
3807 					    .policy = ct_nla_policy },
3808 	[IPCTNL_MSG_CT_GET_CTRZERO] 	= { .call = ctnetlink_get_conntrack,
3809 					    .attr_count = CTA_MAX,
3810 					    .policy = ct_nla_policy },
3811 	[IPCTNL_MSG_CT_GET_STATS_CPU]	= { .call = ctnetlink_stat_ct_cpu },
3812 	[IPCTNL_MSG_CT_GET_STATS]	= { .call = ctnetlink_stat_ct },
3813 	[IPCTNL_MSG_CT_GET_DYING]	= { .call = ctnetlink_get_ct_dying },
3814 	[IPCTNL_MSG_CT_GET_UNCONFIRMED]	= { .call = ctnetlink_get_ct_unconfirmed },
3815 };
3816 
3817 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
3818 	[IPCTNL_MSG_EXP_GET]		= { .call = ctnetlink_get_expect,
3819 					    .attr_count = CTA_EXPECT_MAX,
3820 					    .policy = exp_nla_policy },
3821 	[IPCTNL_MSG_EXP_NEW]		= { .call = ctnetlink_new_expect,
3822 					    .attr_count = CTA_EXPECT_MAX,
3823 					    .policy = exp_nla_policy },
3824 	[IPCTNL_MSG_EXP_DELETE]		= { .call = ctnetlink_del_expect,
3825 					    .attr_count = CTA_EXPECT_MAX,
3826 					    .policy = exp_nla_policy },
3827 	[IPCTNL_MSG_EXP_GET_STATS_CPU]	= { .call = ctnetlink_stat_exp_cpu },
3828 };
3829 
3830 static const struct nfnetlink_subsystem ctnl_subsys = {
3831 	.name				= "conntrack",
3832 	.subsys_id			= NFNL_SUBSYS_CTNETLINK,
3833 	.cb_count			= IPCTNL_MSG_MAX,
3834 	.cb				= ctnl_cb,
3835 };
3836 
3837 static const struct nfnetlink_subsystem ctnl_exp_subsys = {
3838 	.name				= "conntrack_expect",
3839 	.subsys_id			= NFNL_SUBSYS_CTNETLINK_EXP,
3840 	.cb_count			= IPCTNL_MSG_EXP_MAX,
3841 	.cb				= ctnl_exp_cb,
3842 };
3843 
3844 MODULE_ALIAS("ip_conntrack_netlink");
3845 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
3846 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
3847 
3848 static int __net_init ctnetlink_net_init(struct net *net)
3849 {
3850 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3851 	int ret;
3852 
3853 	ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
3854 	if (ret < 0) {
3855 		pr_err("ctnetlink_init: cannot register notifier.\n");
3856 		goto err_out;
3857 	}
3858 
3859 	ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
3860 	if (ret < 0) {
3861 		pr_err("ctnetlink_init: cannot expect register notifier.\n");
3862 		goto err_unreg_notifier;
3863 	}
3864 #endif
3865 	return 0;
3866 
3867 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3868 err_unreg_notifier:
3869 	nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3870 err_out:
3871 	return ret;
3872 #endif
3873 }
3874 
3875 static void ctnetlink_net_exit(struct net *net)
3876 {
3877 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3878 	nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
3879 	nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3880 #endif
3881 }
3882 
3883 static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
3884 {
3885 	struct net *net;
3886 
3887 	list_for_each_entry(net, net_exit_list, exit_list)
3888 		ctnetlink_net_exit(net);
3889 
3890 	/* wait for other cpus until they are done with ctnl_notifiers */
3891 	synchronize_rcu();
3892 }
3893 
3894 static struct pernet_operations ctnetlink_net_ops = {
3895 	.init		= ctnetlink_net_init,
3896 	.exit_batch	= ctnetlink_net_exit_batch,
3897 };
3898 
3899 static int __init ctnetlink_init(void)
3900 {
3901 	int ret;
3902 
3903 	ret = nfnetlink_subsys_register(&ctnl_subsys);
3904 	if (ret < 0) {
3905 		pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
3906 		goto err_out;
3907 	}
3908 
3909 	ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
3910 	if (ret < 0) {
3911 		pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
3912 		goto err_unreg_subsys;
3913 	}
3914 
3915 	ret = register_pernet_subsys(&ctnetlink_net_ops);
3916 	if (ret < 0) {
3917 		pr_err("ctnetlink_init: cannot register pernet operations\n");
3918 		goto err_unreg_exp_subsys;
3919 	}
3920 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3921 	/* setup interaction between nf_queue and nf_conntrack_netlink. */
3922 	RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook);
3923 #endif
3924 	return 0;
3925 
3926 err_unreg_exp_subsys:
3927 	nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3928 err_unreg_subsys:
3929 	nfnetlink_subsys_unregister(&ctnl_subsys);
3930 err_out:
3931 	return ret;
3932 }
3933 
3934 static void __exit ctnetlink_exit(void)
3935 {
3936 	unregister_pernet_subsys(&ctnetlink_net_ops);
3937 	nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3938 	nfnetlink_subsys_unregister(&ctnl_subsys);
3939 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3940 	RCU_INIT_POINTER(nfnl_ct_hook, NULL);
3941 #endif
3942 	synchronize_rcu();
3943 }
3944 
3945 module_init(ctnetlink_init);
3946 module_exit(ctnetlink_exit);
3947