1 /* Connection tracking via netlink socket. Allows for user space
2  * protocol helpers and general trouble making from userspace.
3  *
4  * (C) 2001 by Jay Schulist <jschlst@samba.org>
5  * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6  * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7  * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
8  *
9  * Initial connection tracking via netlink development funded and
10  * generally made possible by Network Robots, Inc. (www.networkrobots.com)
11  *
12  * Further development of this code funded by Astaro AG (http://www.astaro.com)
13  *
14  * This software may be used and distributed according to the terms
15  * of the GNU General Public License, incorporated herein by reference.
16  */
17 
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
32 #include <linux/siphash.h>
33 
34 #include <linux/netfilter.h>
35 #include <net/netlink.h>
36 #include <net/sock.h>
37 #include <net/netfilter/nf_conntrack.h>
38 #include <net/netfilter/nf_conntrack_core.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_seqadj.h>
42 #include <net/netfilter/nf_conntrack_l4proto.h>
43 #include <net/netfilter/nf_conntrack_tuple.h>
44 #include <net/netfilter/nf_conntrack_acct.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_conntrack_timestamp.h>
47 #include <net/netfilter/nf_conntrack_labels.h>
48 #include <net/netfilter/nf_conntrack_synproxy.h>
49 #if IS_ENABLED(CONFIG_NF_NAT)
50 #include <net/netfilter/nf_nat.h>
51 #include <net/netfilter/nf_nat_helper.h>
52 #endif
53 
54 #include <linux/netfilter/nfnetlink.h>
55 #include <linux/netfilter/nfnetlink_conntrack.h>
56 
57 MODULE_LICENSE("GPL");
58 
59 static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
60 				const struct nf_conntrack_tuple *tuple,
61 				const struct nf_conntrack_l4proto *l4proto)
62 {
63 	int ret = 0;
64 	struct nlattr *nest_parms;
65 
66 	nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO);
67 	if (!nest_parms)
68 		goto nla_put_failure;
69 	if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
70 		goto nla_put_failure;
71 
72 	if (likely(l4proto->tuple_to_nlattr))
73 		ret = l4proto->tuple_to_nlattr(skb, tuple);
74 
75 	nla_nest_end(skb, nest_parms);
76 
77 	return ret;
78 
79 nla_put_failure:
80 	return -1;
81 }
82 
83 static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
84 				const struct nf_conntrack_tuple *tuple)
85 {
86 	if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
87 	    nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
88 		return -EMSGSIZE;
89 	return 0;
90 }
91 
92 static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
93 				const struct nf_conntrack_tuple *tuple)
94 {
95 	if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) ||
96 	    nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6))
97 		return -EMSGSIZE;
98 	return 0;
99 }
100 
101 static int ctnetlink_dump_tuples_ip(struct sk_buff *skb,
102 				    const struct nf_conntrack_tuple *tuple)
103 {
104 	int ret = 0;
105 	struct nlattr *nest_parms;
106 
107 	nest_parms = nla_nest_start(skb, CTA_TUPLE_IP);
108 	if (!nest_parms)
109 		goto nla_put_failure;
110 
111 	switch (tuple->src.l3num) {
112 	case NFPROTO_IPV4:
113 		ret = ipv4_tuple_to_nlattr(skb, tuple);
114 		break;
115 	case NFPROTO_IPV6:
116 		ret = ipv6_tuple_to_nlattr(skb, tuple);
117 		break;
118 	}
119 
120 	nla_nest_end(skb, nest_parms);
121 
122 	return ret;
123 
124 nla_put_failure:
125 	return -1;
126 }
127 
128 static int ctnetlink_dump_tuples(struct sk_buff *skb,
129 				 const struct nf_conntrack_tuple *tuple)
130 {
131 	const struct nf_conntrack_l4proto *l4proto;
132 	int ret;
133 
134 	rcu_read_lock();
135 	ret = ctnetlink_dump_tuples_ip(skb, tuple);
136 
137 	if (ret >= 0) {
138 		l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
139 		ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
140 	}
141 	rcu_read_unlock();
142 	return ret;
143 }
144 
145 static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
146 				  const struct nf_conntrack_zone *zone, int dir)
147 {
148 	if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
149 		return 0;
150 	if (nla_put_be16(skb, attrtype, htons(zone->id)))
151 		goto nla_put_failure;
152 	return 0;
153 
154 nla_put_failure:
155 	return -1;
156 }
157 
158 static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
159 {
160 	if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
161 		goto nla_put_failure;
162 	return 0;
163 
164 nla_put_failure:
165 	return -1;
166 }
167 
168 static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
169 {
170 	long timeout = nf_ct_expires(ct) / HZ;
171 
172 	if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
173 		goto nla_put_failure;
174 	return 0;
175 
176 nla_put_failure:
177 	return -1;
178 }
179 
180 static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
181 {
182 	const struct nf_conntrack_l4proto *l4proto;
183 	struct nlattr *nest_proto;
184 	int ret;
185 
186 	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
187 	if (!l4proto->to_nlattr)
188 		return 0;
189 
190 	nest_proto = nla_nest_start(skb, CTA_PROTOINFO);
191 	if (!nest_proto)
192 		goto nla_put_failure;
193 
194 	ret = l4proto->to_nlattr(skb, nest_proto, ct);
195 
196 	nla_nest_end(skb, nest_proto);
197 
198 	return ret;
199 
200 nla_put_failure:
201 	return -1;
202 }
203 
204 static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
205 				   const struct nf_conn *ct)
206 {
207 	struct nlattr *nest_helper;
208 	const struct nf_conn_help *help = nfct_help(ct);
209 	struct nf_conntrack_helper *helper;
210 
211 	if (!help)
212 		return 0;
213 
214 	helper = rcu_dereference(help->helper);
215 	if (!helper)
216 		goto out;
217 
218 	nest_helper = nla_nest_start(skb, CTA_HELP);
219 	if (!nest_helper)
220 		goto nla_put_failure;
221 	if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
222 		goto nla_put_failure;
223 
224 	if (helper->to_nlattr)
225 		helper->to_nlattr(skb, ct);
226 
227 	nla_nest_end(skb, nest_helper);
228 out:
229 	return 0;
230 
231 nla_put_failure:
232 	return -1;
233 }
234 
235 static int
236 dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct,
237 	      enum ip_conntrack_dir dir, int type)
238 {
239 	enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
240 	struct nf_conn_counter *counter = acct->counter;
241 	struct nlattr *nest_count;
242 	u64 pkts, bytes;
243 
244 	if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
245 		pkts = atomic64_xchg(&counter[dir].packets, 0);
246 		bytes = atomic64_xchg(&counter[dir].bytes, 0);
247 	} else {
248 		pkts = atomic64_read(&counter[dir].packets);
249 		bytes = atomic64_read(&counter[dir].bytes);
250 	}
251 
252 	nest_count = nla_nest_start(skb, attr);
253 	if (!nest_count)
254 		goto nla_put_failure;
255 
256 	if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts),
257 			 CTA_COUNTERS_PAD) ||
258 	    nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes),
259 			 CTA_COUNTERS_PAD))
260 		goto nla_put_failure;
261 
262 	nla_nest_end(skb, nest_count);
263 
264 	return 0;
265 
266 nla_put_failure:
267 	return -1;
268 }
269 
270 static int
271 ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type)
272 {
273 	struct nf_conn_acct *acct = nf_conn_acct_find(ct);
274 
275 	if (!acct)
276 		return 0;
277 
278 	if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0)
279 		return -1;
280 	if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0)
281 		return -1;
282 
283 	return 0;
284 }
285 
286 static int
287 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
288 {
289 	struct nlattr *nest_count;
290 	const struct nf_conn_tstamp *tstamp;
291 
292 	tstamp = nf_conn_tstamp_find(ct);
293 	if (!tstamp)
294 		return 0;
295 
296 	nest_count = nla_nest_start(skb, CTA_TIMESTAMP);
297 	if (!nest_count)
298 		goto nla_put_failure;
299 
300 	if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start),
301 			 CTA_TIMESTAMP_PAD) ||
302 	    (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
303 					       cpu_to_be64(tstamp->stop),
304 					       CTA_TIMESTAMP_PAD)))
305 		goto nla_put_failure;
306 	nla_nest_end(skb, nest_count);
307 
308 	return 0;
309 
310 nla_put_failure:
311 	return -1;
312 }
313 
314 #ifdef CONFIG_NF_CONNTRACK_MARK
315 static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
316 {
317 	if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
318 		goto nla_put_failure;
319 	return 0;
320 
321 nla_put_failure:
322 	return -1;
323 }
324 #else
325 #define ctnetlink_dump_mark(a, b) (0)
326 #endif
327 
328 #ifdef CONFIG_NF_CONNTRACK_SECMARK
329 static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
330 {
331 	struct nlattr *nest_secctx;
332 	int len, ret;
333 	char *secctx;
334 
335 	ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
336 	if (ret)
337 		return 0;
338 
339 	ret = -1;
340 	nest_secctx = nla_nest_start(skb, CTA_SECCTX);
341 	if (!nest_secctx)
342 		goto nla_put_failure;
343 
344 	if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
345 		goto nla_put_failure;
346 	nla_nest_end(skb, nest_secctx);
347 
348 	ret = 0;
349 nla_put_failure:
350 	security_release_secctx(secctx, len);
351 	return ret;
352 }
353 #else
354 #define ctnetlink_dump_secctx(a, b) (0)
355 #endif
356 
357 #ifdef CONFIG_NF_CONNTRACK_LABELS
358 static inline int ctnetlink_label_size(const struct nf_conn *ct)
359 {
360 	struct nf_conn_labels *labels = nf_ct_labels_find(ct);
361 
362 	if (!labels)
363 		return 0;
364 	return nla_total_size(sizeof(labels->bits));
365 }
366 
367 static int
368 ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
369 {
370 	struct nf_conn_labels *labels = nf_ct_labels_find(ct);
371 	unsigned int i;
372 
373 	if (!labels)
374 		return 0;
375 
376 	i = 0;
377 	do {
378 		if (labels->bits[i] != 0)
379 			return nla_put(skb, CTA_LABELS, sizeof(labels->bits),
380 				       labels->bits);
381 		i++;
382 	} while (i < ARRAY_SIZE(labels->bits));
383 
384 	return 0;
385 }
386 #else
387 #define ctnetlink_dump_labels(a, b) (0)
388 #define ctnetlink_label_size(a)	(0)
389 #endif
390 
391 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
392 
393 static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
394 {
395 	struct nlattr *nest_parms;
396 
397 	if (!(ct->status & IPS_EXPECTED))
398 		return 0;
399 
400 	nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER);
401 	if (!nest_parms)
402 		goto nla_put_failure;
403 	if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
404 		goto nla_put_failure;
405 	nla_nest_end(skb, nest_parms);
406 
407 	return 0;
408 
409 nla_put_failure:
410 	return -1;
411 }
412 
413 static int
414 dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type)
415 {
416 	struct nlattr *nest_parms;
417 
418 	nest_parms = nla_nest_start(skb, type);
419 	if (!nest_parms)
420 		goto nla_put_failure;
421 
422 	if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS,
423 			 htonl(seq->correction_pos)) ||
424 	    nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE,
425 			 htonl(seq->offset_before)) ||
426 	    nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER,
427 			 htonl(seq->offset_after)))
428 		goto nla_put_failure;
429 
430 	nla_nest_end(skb, nest_parms);
431 
432 	return 0;
433 
434 nla_put_failure:
435 	return -1;
436 }
437 
438 static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct)
439 {
440 	struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
441 	struct nf_ct_seqadj *seq;
442 
443 	if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj)
444 		return 0;
445 
446 	spin_lock_bh(&ct->lock);
447 	seq = &seqadj->seq[IP_CT_DIR_ORIGINAL];
448 	if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1)
449 		goto err;
450 
451 	seq = &seqadj->seq[IP_CT_DIR_REPLY];
452 	if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1)
453 		goto err;
454 
455 	spin_unlock_bh(&ct->lock);
456 	return 0;
457 err:
458 	spin_unlock_bh(&ct->lock);
459 	return -1;
460 }
461 
462 static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct)
463 {
464 	struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
465 	struct nlattr *nest_parms;
466 
467 	if (!synproxy)
468 		return 0;
469 
470 	nest_parms = nla_nest_start(skb, CTA_SYNPROXY);
471 	if (!nest_parms)
472 		goto nla_put_failure;
473 
474 	if (nla_put_be32(skb, CTA_SYNPROXY_ISN, htonl(synproxy->isn)) ||
475 	    nla_put_be32(skb, CTA_SYNPROXY_ITS, htonl(synproxy->its)) ||
476 	    nla_put_be32(skb, CTA_SYNPROXY_TSOFF, htonl(synproxy->tsoff)))
477 		goto nla_put_failure;
478 
479 	nla_nest_end(skb, nest_parms);
480 
481 	return 0;
482 
483 nla_put_failure:
484 	return -1;
485 }
486 
487 static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
488 {
489 	__be32 id = (__force __be32)nf_ct_get_id(ct);
490 
491 	if (nla_put_be32(skb, CTA_ID, id))
492 		goto nla_put_failure;
493 	return 0;
494 
495 nla_put_failure:
496 	return -1;
497 }
498 
499 static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
500 {
501 	if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
502 		goto nla_put_failure;
503 	return 0;
504 
505 nla_put_failure:
506 	return -1;
507 }
508 
509 /* all these functions access ct->ext. Caller must either hold a reference
510  * on ct or prevent its deletion by holding either the bucket spinlock or
511  * pcpu dying list lock.
512  */
513 static int ctnetlink_dump_extinfo(struct sk_buff *skb,
514 				  struct nf_conn *ct, u32 type)
515 {
516 	if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
517 	    ctnetlink_dump_timestamp(skb, ct) < 0 ||
518 	    ctnetlink_dump_helpinfo(skb, ct) < 0 ||
519 	    ctnetlink_dump_labels(skb, ct) < 0 ||
520 	    ctnetlink_dump_ct_seq_adj(skb, ct) < 0 ||
521 	    ctnetlink_dump_ct_synproxy(skb, ct) < 0)
522 		return -1;
523 
524 	return 0;
525 }
526 
527 static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
528 {
529 	if (ctnetlink_dump_status(skb, ct) < 0 ||
530 	    ctnetlink_dump_mark(skb, ct) < 0 ||
531 	    ctnetlink_dump_secctx(skb, ct) < 0 ||
532 	    ctnetlink_dump_id(skb, ct) < 0 ||
533 	    ctnetlink_dump_use(skb, ct) < 0 ||
534 	    ctnetlink_dump_master(skb, ct) < 0)
535 		return -1;
536 
537 	if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
538 	    (ctnetlink_dump_timeout(skb, ct) < 0 ||
539 	     ctnetlink_dump_protoinfo(skb, ct) < 0))
540 		return -1;
541 
542 	return 0;
543 }
544 
545 static int
546 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
547 		    struct nf_conn *ct, bool extinfo)
548 {
549 	const struct nf_conntrack_zone *zone;
550 	struct nlmsghdr *nlh;
551 	struct nfgenmsg *nfmsg;
552 	struct nlattr *nest_parms;
553 	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
554 
555 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW);
556 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
557 	if (nlh == NULL)
558 		goto nlmsg_failure;
559 
560 	nfmsg = nlmsg_data(nlh);
561 	nfmsg->nfgen_family = nf_ct_l3num(ct);
562 	nfmsg->version      = NFNETLINK_V0;
563 	nfmsg->res_id	    = 0;
564 
565 	zone = nf_ct_zone(ct);
566 
567 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
568 	if (!nest_parms)
569 		goto nla_put_failure;
570 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
571 		goto nla_put_failure;
572 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
573 				   NF_CT_ZONE_DIR_ORIG) < 0)
574 		goto nla_put_failure;
575 	nla_nest_end(skb, nest_parms);
576 
577 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
578 	if (!nest_parms)
579 		goto nla_put_failure;
580 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
581 		goto nla_put_failure;
582 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
583 				   NF_CT_ZONE_DIR_REPL) < 0)
584 		goto nla_put_failure;
585 	nla_nest_end(skb, nest_parms);
586 
587 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
588 				   NF_CT_DEFAULT_ZONE_DIR) < 0)
589 		goto nla_put_failure;
590 
591 	if (ctnetlink_dump_info(skb, ct) < 0)
592 		goto nla_put_failure;
593 	if (extinfo && ctnetlink_dump_extinfo(skb, ct, type) < 0)
594 		goto nla_put_failure;
595 
596 	nlmsg_end(skb, nlh);
597 	return skb->len;
598 
599 nlmsg_failure:
600 nla_put_failure:
601 	nlmsg_cancel(skb, nlh);
602 	return -1;
603 }
604 
605 static const struct nla_policy cta_ip_nla_policy[CTA_IP_MAX + 1] = {
606 	[CTA_IP_V4_SRC]	= { .type = NLA_U32 },
607 	[CTA_IP_V4_DST]	= { .type = NLA_U32 },
608 	[CTA_IP_V6_SRC]	= { .len = sizeof(__be32) * 4 },
609 	[CTA_IP_V6_DST]	= { .len = sizeof(__be32) * 4 },
610 };
611 
612 #if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS)
613 static size_t ctnetlink_proto_size(const struct nf_conn *ct)
614 {
615 	const struct nf_conntrack_l4proto *l4proto;
616 	size_t len, len4 = 0;
617 
618 	len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1);
619 	len *= 3u; /* ORIG, REPLY, MASTER */
620 
621 	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
622 	len += l4proto->nlattr_size;
623 	if (l4proto->nlattr_tuple_size) {
624 		len4 = l4proto->nlattr_tuple_size();
625 		len4 *= 3u; /* ORIG, REPLY, MASTER */
626 	}
627 
628 	return len + len4;
629 }
630 #endif
631 
632 static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
633 {
634 	if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
635 		return 0;
636 	return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
637 	       + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
638 	       + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
639 	       ;
640 }
641 
642 static inline int ctnetlink_secctx_size(const struct nf_conn *ct)
643 {
644 #ifdef CONFIG_NF_CONNTRACK_SECMARK
645 	int len, ret;
646 
647 	ret = security_secid_to_secctx(ct->secmark, NULL, &len);
648 	if (ret)
649 		return 0;
650 
651 	return nla_total_size(0) /* CTA_SECCTX */
652 	       + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
653 #else
654 	return 0;
655 #endif
656 }
657 
658 static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct)
659 {
660 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
661 	if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
662 		return 0;
663 	return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t));
664 #else
665 	return 0;
666 #endif
667 }
668 
669 #ifdef CONFIG_NF_CONNTRACK_EVENTS
670 static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
671 {
672 	return NLMSG_ALIGN(sizeof(struct nfgenmsg))
673 	       + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
674 	       + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
675 	       + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
676 	       + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
677 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
678 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
679 	       + ctnetlink_acct_size(ct)
680 	       + ctnetlink_timestamp_size(ct)
681 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
682 	       + nla_total_size(0) /* CTA_PROTOINFO */
683 	       + nla_total_size(0) /* CTA_HELP */
684 	       + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
685 	       + ctnetlink_secctx_size(ct)
686 #if IS_ENABLED(CONFIG_NF_NAT)
687 	       + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
688 	       + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
689 #endif
690 #ifdef CONFIG_NF_CONNTRACK_MARK
691 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
692 #endif
693 #ifdef CONFIG_NF_CONNTRACK_ZONES
694 	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
695 #endif
696 	       + ctnetlink_proto_size(ct)
697 	       + ctnetlink_label_size(ct)
698 	       ;
699 }
700 
701 static int
702 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
703 {
704 	const struct nf_conntrack_zone *zone;
705 	struct net *net;
706 	struct nlmsghdr *nlh;
707 	struct nfgenmsg *nfmsg;
708 	struct nlattr *nest_parms;
709 	struct nf_conn *ct = item->ct;
710 	struct sk_buff *skb;
711 	unsigned int type;
712 	unsigned int flags = 0, group;
713 	int err;
714 
715 	if (events & (1 << IPCT_DESTROY)) {
716 		type = IPCTNL_MSG_CT_DELETE;
717 		group = NFNLGRP_CONNTRACK_DESTROY;
718 	} else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
719 		type = IPCTNL_MSG_CT_NEW;
720 		flags = NLM_F_CREATE|NLM_F_EXCL;
721 		group = NFNLGRP_CONNTRACK_NEW;
722 	} else if (events) {
723 		type = IPCTNL_MSG_CT_NEW;
724 		group = NFNLGRP_CONNTRACK_UPDATE;
725 	} else
726 		return 0;
727 
728 	net = nf_ct_net(ct);
729 	if (!item->report && !nfnetlink_has_listeners(net, group))
730 		return 0;
731 
732 	skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
733 	if (skb == NULL)
734 		goto errout;
735 
736 	type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, type);
737 	nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
738 	if (nlh == NULL)
739 		goto nlmsg_failure;
740 
741 	nfmsg = nlmsg_data(nlh);
742 	nfmsg->nfgen_family = nf_ct_l3num(ct);
743 	nfmsg->version	= NFNETLINK_V0;
744 	nfmsg->res_id	= 0;
745 
746 	zone = nf_ct_zone(ct);
747 
748 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
749 	if (!nest_parms)
750 		goto nla_put_failure;
751 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
752 		goto nla_put_failure;
753 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
754 				   NF_CT_ZONE_DIR_ORIG) < 0)
755 		goto nla_put_failure;
756 	nla_nest_end(skb, nest_parms);
757 
758 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
759 	if (!nest_parms)
760 		goto nla_put_failure;
761 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
762 		goto nla_put_failure;
763 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
764 				   NF_CT_ZONE_DIR_REPL) < 0)
765 		goto nla_put_failure;
766 	nla_nest_end(skb, nest_parms);
767 
768 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
769 				   NF_CT_DEFAULT_ZONE_DIR) < 0)
770 		goto nla_put_failure;
771 
772 	if (ctnetlink_dump_id(skb, ct) < 0)
773 		goto nla_put_failure;
774 
775 	if (ctnetlink_dump_status(skb, ct) < 0)
776 		goto nla_put_failure;
777 
778 	if (events & (1 << IPCT_DESTROY)) {
779 		if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
780 		    ctnetlink_dump_timestamp(skb, ct) < 0)
781 			goto nla_put_failure;
782 	} else {
783 		if (ctnetlink_dump_timeout(skb, ct) < 0)
784 			goto nla_put_failure;
785 
786 		if (events & (1 << IPCT_PROTOINFO)
787 		    && ctnetlink_dump_protoinfo(skb, ct) < 0)
788 			goto nla_put_failure;
789 
790 		if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
791 		    && ctnetlink_dump_helpinfo(skb, ct) < 0)
792 			goto nla_put_failure;
793 
794 #ifdef CONFIG_NF_CONNTRACK_SECMARK
795 		if ((events & (1 << IPCT_SECMARK) || ct->secmark)
796 		    && ctnetlink_dump_secctx(skb, ct) < 0)
797 			goto nla_put_failure;
798 #endif
799 		if (events & (1 << IPCT_LABEL) &&
800 		     ctnetlink_dump_labels(skb, ct) < 0)
801 			goto nla_put_failure;
802 
803 		if (events & (1 << IPCT_RELATED) &&
804 		    ctnetlink_dump_master(skb, ct) < 0)
805 			goto nla_put_failure;
806 
807 		if (events & (1 << IPCT_SEQADJ) &&
808 		    ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
809 			goto nla_put_failure;
810 
811 		if (events & (1 << IPCT_SYNPROXY) &&
812 		    ctnetlink_dump_ct_synproxy(skb, ct) < 0)
813 			goto nla_put_failure;
814 	}
815 
816 #ifdef CONFIG_NF_CONNTRACK_MARK
817 	if ((events & (1 << IPCT_MARK) || ct->mark)
818 	    && ctnetlink_dump_mark(skb, ct) < 0)
819 		goto nla_put_failure;
820 #endif
821 	nlmsg_end(skb, nlh);
822 	err = nfnetlink_send(skb, net, item->portid, group, item->report,
823 			     GFP_ATOMIC);
824 	if (err == -ENOBUFS || err == -EAGAIN)
825 		return -ENOBUFS;
826 
827 	return 0;
828 
829 nla_put_failure:
830 	nlmsg_cancel(skb, nlh);
831 nlmsg_failure:
832 	kfree_skb(skb);
833 errout:
834 	if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
835 		return -ENOBUFS;
836 
837 	return 0;
838 }
839 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
840 
841 static int ctnetlink_done(struct netlink_callback *cb)
842 {
843 	if (cb->args[1])
844 		nf_ct_put((struct nf_conn *)cb->args[1]);
845 	kfree(cb->data);
846 	return 0;
847 }
848 
849 struct ctnetlink_filter {
850 	u8 family;
851 	struct {
852 		u_int32_t val;
853 		u_int32_t mask;
854 	} mark;
855 };
856 
857 static struct ctnetlink_filter *
858 ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
859 {
860 	struct ctnetlink_filter *filter;
861 
862 #ifndef CONFIG_NF_CONNTRACK_MARK
863 	if (cda[CTA_MARK] || cda[CTA_MARK_MASK])
864 		return ERR_PTR(-EOPNOTSUPP);
865 #endif
866 
867 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
868 	if (filter == NULL)
869 		return ERR_PTR(-ENOMEM);
870 
871 	filter->family = family;
872 
873 #ifdef CONFIG_NF_CONNTRACK_MARK
874 	if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
875 		filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
876 		filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
877 	}
878 #endif
879 	return filter;
880 }
881 
882 static int ctnetlink_start(struct netlink_callback *cb)
883 {
884 	const struct nlattr * const *cda = cb->data;
885 	struct ctnetlink_filter *filter = NULL;
886 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
887 	u8 family = nfmsg->nfgen_family;
888 
889 	if (family || (cda[CTA_MARK] && cda[CTA_MARK_MASK])) {
890 		filter = ctnetlink_alloc_filter(cda, family);
891 		if (IS_ERR(filter))
892 			return PTR_ERR(filter);
893 	}
894 
895 	cb->data = filter;
896 	return 0;
897 }
898 
899 static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
900 {
901 	struct ctnetlink_filter *filter = data;
902 
903 	if (filter == NULL)
904 		goto out;
905 
906 	/* Match entries of a given L3 protocol number.
907 	 * If it is not specified, ie. l3proto == 0,
908 	 * then match everything.
909 	 */
910 	if (filter->family && nf_ct_l3num(ct) != filter->family)
911 		goto ignore_entry;
912 
913 #ifdef CONFIG_NF_CONNTRACK_MARK
914 	if ((ct->mark & filter->mark.mask) != filter->mark.val)
915 		goto ignore_entry;
916 #endif
917 
918 out:
919 	return 1;
920 
921 ignore_entry:
922 	return 0;
923 }
924 
925 static int
926 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
927 {
928 	struct net *net = sock_net(skb->sk);
929 	struct nf_conn *ct, *last;
930 	struct nf_conntrack_tuple_hash *h;
931 	struct hlist_nulls_node *n;
932 	struct nf_conn *nf_ct_evict[8];
933 	int res, i;
934 	spinlock_t *lockp;
935 
936 	last = (struct nf_conn *)cb->args[1];
937 	i = 0;
938 
939 	local_bh_disable();
940 	for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
941 restart:
942 		while (i) {
943 			i--;
944 			if (nf_ct_should_gc(nf_ct_evict[i]))
945 				nf_ct_kill(nf_ct_evict[i]);
946 			nf_ct_put(nf_ct_evict[i]);
947 		}
948 
949 		lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
950 		nf_conntrack_lock(lockp);
951 		if (cb->args[0] >= nf_conntrack_htable_size) {
952 			spin_unlock(lockp);
953 			goto out;
954 		}
955 		hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
956 					   hnnode) {
957 			if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
958 				continue;
959 			ct = nf_ct_tuplehash_to_ctrack(h);
960 			if (nf_ct_is_expired(ct)) {
961 				if (i < ARRAY_SIZE(nf_ct_evict) &&
962 				    atomic_inc_not_zero(&ct->ct_general.use))
963 					nf_ct_evict[i++] = ct;
964 				continue;
965 			}
966 
967 			if (!net_eq(net, nf_ct_net(ct)))
968 				continue;
969 
970 			if (cb->args[1]) {
971 				if (ct != last)
972 					continue;
973 				cb->args[1] = 0;
974 			}
975 			if (!ctnetlink_filter_match(ct, cb->data))
976 				continue;
977 
978 			res =
979 			ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
980 					    cb->nlh->nlmsg_seq,
981 					    NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
982 					    ct, true);
983 			if (res < 0) {
984 				nf_conntrack_get(&ct->ct_general);
985 				cb->args[1] = (unsigned long)ct;
986 				spin_unlock(lockp);
987 				goto out;
988 			}
989 		}
990 		spin_unlock(lockp);
991 		if (cb->args[1]) {
992 			cb->args[1] = 0;
993 			goto restart;
994 		}
995 	}
996 out:
997 	local_bh_enable();
998 	if (last) {
999 		/* nf ct hash resize happened, now clear the leftover. */
1000 		if ((struct nf_conn *)cb->args[1] == last)
1001 			cb->args[1] = 0;
1002 
1003 		nf_ct_put(last);
1004 	}
1005 
1006 	while (i) {
1007 		i--;
1008 		if (nf_ct_should_gc(nf_ct_evict[i]))
1009 			nf_ct_kill(nf_ct_evict[i]);
1010 		nf_ct_put(nf_ct_evict[i]);
1011 	}
1012 
1013 	return skb->len;
1014 }
1015 
1016 static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
1017 				struct nf_conntrack_tuple *t)
1018 {
1019 	if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST])
1020 		return -EINVAL;
1021 
1022 	t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
1023 	t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
1024 
1025 	return 0;
1026 }
1027 
1028 static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
1029 				struct nf_conntrack_tuple *t)
1030 {
1031 	if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST])
1032 		return -EINVAL;
1033 
1034 	t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
1035 	t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
1036 
1037 	return 0;
1038 }
1039 
1040 static int ctnetlink_parse_tuple_ip(struct nlattr *attr,
1041 				    struct nf_conntrack_tuple *tuple)
1042 {
1043 	struct nlattr *tb[CTA_IP_MAX+1];
1044 	int ret = 0;
1045 
1046 	ret = nla_parse_nested_deprecated(tb, CTA_IP_MAX, attr, NULL, NULL);
1047 	if (ret < 0)
1048 		return ret;
1049 
1050 	ret = nla_validate_nested_deprecated(attr, CTA_IP_MAX,
1051 					     cta_ip_nla_policy, NULL);
1052 	if (ret)
1053 		return ret;
1054 
1055 	switch (tuple->src.l3num) {
1056 	case NFPROTO_IPV4:
1057 		ret = ipv4_nlattr_to_tuple(tb, tuple);
1058 		break;
1059 	case NFPROTO_IPV6:
1060 		ret = ipv6_nlattr_to_tuple(tb, tuple);
1061 		break;
1062 	}
1063 
1064 	return ret;
1065 }
1066 
1067 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
1068 	[CTA_PROTO_NUM]	= { .type = NLA_U8 },
1069 };
1070 
1071 static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
1072 				       struct nf_conntrack_tuple *tuple)
1073 {
1074 	const struct nf_conntrack_l4proto *l4proto;
1075 	struct nlattr *tb[CTA_PROTO_MAX+1];
1076 	int ret = 0;
1077 
1078 	ret = nla_parse_nested_deprecated(tb, CTA_PROTO_MAX, attr,
1079 					  proto_nla_policy, NULL);
1080 	if (ret < 0)
1081 		return ret;
1082 
1083 	if (!tb[CTA_PROTO_NUM])
1084 		return -EINVAL;
1085 	tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
1086 
1087 	rcu_read_lock();
1088 	l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
1089 
1090 	if (likely(l4proto->nlattr_to_tuple)) {
1091 		ret = nla_validate_nested_deprecated(attr, CTA_PROTO_MAX,
1092 						     l4proto->nla_policy,
1093 						     NULL);
1094 		if (ret == 0)
1095 			ret = l4proto->nlattr_to_tuple(tb, tuple);
1096 	}
1097 
1098 	rcu_read_unlock();
1099 
1100 	return ret;
1101 }
1102 
1103 static int
1104 ctnetlink_parse_zone(const struct nlattr *attr,
1105 		     struct nf_conntrack_zone *zone)
1106 {
1107 	nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
1108 			NF_CT_DEFAULT_ZONE_DIR, 0);
1109 #ifdef CONFIG_NF_CONNTRACK_ZONES
1110 	if (attr)
1111 		zone->id = ntohs(nla_get_be16(attr));
1112 #else
1113 	if (attr)
1114 		return -EOPNOTSUPP;
1115 #endif
1116 	return 0;
1117 }
1118 
1119 static int
1120 ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
1121 			   struct nf_conntrack_zone *zone)
1122 {
1123 	int ret;
1124 
1125 	if (zone->id != NF_CT_DEFAULT_ZONE_ID)
1126 		return -EINVAL;
1127 
1128 	ret = ctnetlink_parse_zone(attr, zone);
1129 	if (ret < 0)
1130 		return ret;
1131 
1132 	if (type == CTA_TUPLE_REPLY)
1133 		zone->dir = NF_CT_ZONE_DIR_REPL;
1134 	else
1135 		zone->dir = NF_CT_ZONE_DIR_ORIG;
1136 
1137 	return 0;
1138 }
1139 
1140 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
1141 	[CTA_TUPLE_IP]		= { .type = NLA_NESTED },
1142 	[CTA_TUPLE_PROTO]	= { .type = NLA_NESTED },
1143 	[CTA_TUPLE_ZONE]	= { .type = NLA_U16 },
1144 };
1145 
1146 static int
1147 ctnetlink_parse_tuple(const struct nlattr * const cda[],
1148 		      struct nf_conntrack_tuple *tuple, u32 type,
1149 		      u_int8_t l3num, struct nf_conntrack_zone *zone)
1150 {
1151 	struct nlattr *tb[CTA_TUPLE_MAX+1];
1152 	int err;
1153 
1154 	memset(tuple, 0, sizeof(*tuple));
1155 
1156 	err = nla_parse_nested_deprecated(tb, CTA_TUPLE_MAX, cda[type],
1157 					  tuple_nla_policy, NULL);
1158 	if (err < 0)
1159 		return err;
1160 
1161 	if (!tb[CTA_TUPLE_IP])
1162 		return -EINVAL;
1163 
1164 	tuple->src.l3num = l3num;
1165 
1166 	err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
1167 	if (err < 0)
1168 		return err;
1169 
1170 	if (!tb[CTA_TUPLE_PROTO])
1171 		return -EINVAL;
1172 
1173 	err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
1174 	if (err < 0)
1175 		return err;
1176 
1177 	if (tb[CTA_TUPLE_ZONE]) {
1178 		if (!zone)
1179 			return -EINVAL;
1180 
1181 		err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
1182 						 type, zone);
1183 		if (err < 0)
1184 			return err;
1185 	}
1186 
1187 	/* orig and expect tuples get DIR_ORIGINAL */
1188 	if (type == CTA_TUPLE_REPLY)
1189 		tuple->dst.dir = IP_CT_DIR_REPLY;
1190 	else
1191 		tuple->dst.dir = IP_CT_DIR_ORIGINAL;
1192 
1193 	return 0;
1194 }
1195 
1196 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
1197 	[CTA_HELP_NAME]		= { .type = NLA_NUL_STRING,
1198 				    .len = NF_CT_HELPER_NAME_LEN - 1 },
1199 };
1200 
1201 static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
1202 				struct nlattr **helpinfo)
1203 {
1204 	int err;
1205 	struct nlattr *tb[CTA_HELP_MAX+1];
1206 
1207 	err = nla_parse_nested_deprecated(tb, CTA_HELP_MAX, attr,
1208 					  help_nla_policy, NULL);
1209 	if (err < 0)
1210 		return err;
1211 
1212 	if (!tb[CTA_HELP_NAME])
1213 		return -EINVAL;
1214 
1215 	*helper_name = nla_data(tb[CTA_HELP_NAME]);
1216 
1217 	if (tb[CTA_HELP_INFO])
1218 		*helpinfo = tb[CTA_HELP_INFO];
1219 
1220 	return 0;
1221 }
1222 
1223 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
1224 	[CTA_TUPLE_ORIG]	= { .type = NLA_NESTED },
1225 	[CTA_TUPLE_REPLY]	= { .type = NLA_NESTED },
1226 	[CTA_STATUS] 		= { .type = NLA_U32 },
1227 	[CTA_PROTOINFO]		= { .type = NLA_NESTED },
1228 	[CTA_HELP]		= { .type = NLA_NESTED },
1229 	[CTA_NAT_SRC]		= { .type = NLA_NESTED },
1230 	[CTA_TIMEOUT] 		= { .type = NLA_U32 },
1231 	[CTA_MARK]		= { .type = NLA_U32 },
1232 	[CTA_ID]		= { .type = NLA_U32 },
1233 	[CTA_NAT_DST]		= { .type = NLA_NESTED },
1234 	[CTA_TUPLE_MASTER]	= { .type = NLA_NESTED },
1235 	[CTA_NAT_SEQ_ADJ_ORIG]  = { .type = NLA_NESTED },
1236 	[CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
1237 	[CTA_ZONE]		= { .type = NLA_U16 },
1238 	[CTA_MARK_MASK]		= { .type = NLA_U32 },
1239 	[CTA_LABELS]		= { .type = NLA_BINARY,
1240 				    .len = NF_CT_LABELS_MAX_SIZE },
1241 	[CTA_LABELS_MASK]	= { .type = NLA_BINARY,
1242 				    .len = NF_CT_LABELS_MAX_SIZE },
1243 };
1244 
1245 static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
1246 {
1247 	if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
1248 		return 0;
1249 
1250 	return ctnetlink_filter_match(ct, data);
1251 }
1252 
1253 static int ctnetlink_flush_conntrack(struct net *net,
1254 				     const struct nlattr * const cda[],
1255 				     u32 portid, int report, u8 family)
1256 {
1257 	struct ctnetlink_filter *filter = NULL;
1258 
1259 	if (family || (cda[CTA_MARK] && cda[CTA_MARK_MASK])) {
1260 		filter = ctnetlink_alloc_filter(cda, family);
1261 		if (IS_ERR(filter))
1262 			return PTR_ERR(filter);
1263 	}
1264 
1265 	nf_ct_iterate_cleanup_net(net, ctnetlink_flush_iterate, filter,
1266 				  portid, report);
1267 	kfree(filter);
1268 
1269 	return 0;
1270 }
1271 
1272 static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
1273 				   struct sk_buff *skb,
1274 				   const struct nlmsghdr *nlh,
1275 				   const struct nlattr * const cda[],
1276 				   struct netlink_ext_ack *extack)
1277 {
1278 	struct nf_conntrack_tuple_hash *h;
1279 	struct nf_conntrack_tuple tuple;
1280 	struct nf_conn *ct;
1281 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1282 	struct nf_conntrack_zone zone;
1283 	int err;
1284 
1285 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1286 	if (err < 0)
1287 		return err;
1288 
1289 	if (cda[CTA_TUPLE_ORIG])
1290 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
1291 					    nfmsg->nfgen_family, &zone);
1292 	else if (cda[CTA_TUPLE_REPLY])
1293 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
1294 					    nfmsg->nfgen_family, &zone);
1295 	else {
1296 		u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
1297 
1298 		return ctnetlink_flush_conntrack(net, cda,
1299 						 NETLINK_CB(skb).portid,
1300 						 nlmsg_report(nlh), u3);
1301 	}
1302 
1303 	if (err < 0)
1304 		return err;
1305 
1306 	h = nf_conntrack_find_get(net, &zone, &tuple);
1307 	if (!h)
1308 		return -ENOENT;
1309 
1310 	ct = nf_ct_tuplehash_to_ctrack(h);
1311 
1312 	if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) {
1313 		nf_ct_put(ct);
1314 		return -EBUSY;
1315 	}
1316 
1317 	if (cda[CTA_ID]) {
1318 		__be32 id = nla_get_be32(cda[CTA_ID]);
1319 
1320 		if (id != (__force __be32)nf_ct_get_id(ct)) {
1321 			nf_ct_put(ct);
1322 			return -ENOENT;
1323 		}
1324 	}
1325 
1326 	nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
1327 	nf_ct_put(ct);
1328 
1329 	return 0;
1330 }
1331 
1332 static int ctnetlink_get_conntrack(struct net *net, struct sock *ctnl,
1333 				   struct sk_buff *skb,
1334 				   const struct nlmsghdr *nlh,
1335 				   const struct nlattr * const cda[],
1336 				   struct netlink_ext_ack *extack)
1337 {
1338 	struct nf_conntrack_tuple_hash *h;
1339 	struct nf_conntrack_tuple tuple;
1340 	struct nf_conn *ct;
1341 	struct sk_buff *skb2 = NULL;
1342 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1343 	u_int8_t u3 = nfmsg->nfgen_family;
1344 	struct nf_conntrack_zone zone;
1345 	int err;
1346 
1347 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1348 		struct netlink_dump_control c = {
1349 			.start = ctnetlink_start,
1350 			.dump = ctnetlink_dump_table,
1351 			.done = ctnetlink_done,
1352 			.data = (void *)cda,
1353 		};
1354 
1355 		return netlink_dump_start(ctnl, skb, nlh, &c);
1356 	}
1357 
1358 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1359 	if (err < 0)
1360 		return err;
1361 
1362 	if (cda[CTA_TUPLE_ORIG])
1363 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
1364 					    u3, &zone);
1365 	else if (cda[CTA_TUPLE_REPLY])
1366 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
1367 					    u3, &zone);
1368 	else
1369 		return -EINVAL;
1370 
1371 	if (err < 0)
1372 		return err;
1373 
1374 	h = nf_conntrack_find_get(net, &zone, &tuple);
1375 	if (!h)
1376 		return -ENOENT;
1377 
1378 	ct = nf_ct_tuplehash_to_ctrack(h);
1379 
1380 	err = -ENOMEM;
1381 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1382 	if (skb2 == NULL) {
1383 		nf_ct_put(ct);
1384 		return -ENOMEM;
1385 	}
1386 
1387 	err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1388 				  NFNL_MSG_TYPE(nlh->nlmsg_type), ct, true);
1389 	nf_ct_put(ct);
1390 	if (err <= 0)
1391 		goto free;
1392 
1393 	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1394 	if (err < 0)
1395 		goto out;
1396 
1397 	return 0;
1398 
1399 free:
1400 	kfree_skb(skb2);
1401 out:
1402 	/* this avoids a loop in nfnetlink. */
1403 	return err == -EAGAIN ? -ENOBUFS : err;
1404 }
1405 
1406 static int ctnetlink_done_list(struct netlink_callback *cb)
1407 {
1408 	if (cb->args[1])
1409 		nf_ct_put((struct nf_conn *)cb->args[1]);
1410 	return 0;
1411 }
1412 
1413 static int
1414 ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
1415 {
1416 	struct nf_conn *ct, *last;
1417 	struct nf_conntrack_tuple_hash *h;
1418 	struct hlist_nulls_node *n;
1419 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1420 	u_int8_t l3proto = nfmsg->nfgen_family;
1421 	int res;
1422 	int cpu;
1423 	struct hlist_nulls_head *list;
1424 	struct net *net = sock_net(skb->sk);
1425 
1426 	if (cb->args[2])
1427 		return 0;
1428 
1429 	last = (struct nf_conn *)cb->args[1];
1430 
1431 	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1432 		struct ct_pcpu *pcpu;
1433 
1434 		if (!cpu_possible(cpu))
1435 			continue;
1436 
1437 		pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1438 		spin_lock_bh(&pcpu->lock);
1439 		list = dying ? &pcpu->dying : &pcpu->unconfirmed;
1440 restart:
1441 		hlist_nulls_for_each_entry(h, n, list, hnnode) {
1442 			ct = nf_ct_tuplehash_to_ctrack(h);
1443 			if (l3proto && nf_ct_l3num(ct) != l3proto)
1444 				continue;
1445 			if (cb->args[1]) {
1446 				if (ct != last)
1447 					continue;
1448 				cb->args[1] = 0;
1449 			}
1450 
1451 			/* We can't dump extension info for the unconfirmed
1452 			 * list because unconfirmed conntracks can have
1453 			 * ct->ext reallocated (and thus freed).
1454 			 *
1455 			 * In the dying list case ct->ext can't be free'd
1456 			 * until after we drop pcpu->lock.
1457 			 */
1458 			res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1459 						  cb->nlh->nlmsg_seq,
1460 						  NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1461 						  ct, dying ? true : false);
1462 			if (res < 0) {
1463 				if (!atomic_inc_not_zero(&ct->ct_general.use))
1464 					continue;
1465 				cb->args[0] = cpu;
1466 				cb->args[1] = (unsigned long)ct;
1467 				spin_unlock_bh(&pcpu->lock);
1468 				goto out;
1469 			}
1470 		}
1471 		if (cb->args[1]) {
1472 			cb->args[1] = 0;
1473 			goto restart;
1474 		}
1475 		spin_unlock_bh(&pcpu->lock);
1476 	}
1477 	cb->args[2] = 1;
1478 out:
1479 	if (last)
1480 		nf_ct_put(last);
1481 
1482 	return skb->len;
1483 }
1484 
1485 static int
1486 ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
1487 {
1488 	return ctnetlink_dump_list(skb, cb, true);
1489 }
1490 
1491 static int ctnetlink_get_ct_dying(struct net *net, struct sock *ctnl,
1492 				  struct sk_buff *skb,
1493 				  const struct nlmsghdr *nlh,
1494 				  const struct nlattr * const cda[],
1495 				  struct netlink_ext_ack *extack)
1496 {
1497 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1498 		struct netlink_dump_control c = {
1499 			.dump = ctnetlink_dump_dying,
1500 			.done = ctnetlink_done_list,
1501 		};
1502 		return netlink_dump_start(ctnl, skb, nlh, &c);
1503 	}
1504 
1505 	return -EOPNOTSUPP;
1506 }
1507 
1508 static int
1509 ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
1510 {
1511 	return ctnetlink_dump_list(skb, cb, false);
1512 }
1513 
1514 static int ctnetlink_get_ct_unconfirmed(struct net *net, struct sock *ctnl,
1515 					struct sk_buff *skb,
1516 					const struct nlmsghdr *nlh,
1517 					const struct nlattr * const cda[],
1518 					struct netlink_ext_ack *extack)
1519 {
1520 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1521 		struct netlink_dump_control c = {
1522 			.dump = ctnetlink_dump_unconfirmed,
1523 			.done = ctnetlink_done_list,
1524 		};
1525 		return netlink_dump_start(ctnl, skb, nlh, &c);
1526 	}
1527 
1528 	return -EOPNOTSUPP;
1529 }
1530 
1531 #if IS_ENABLED(CONFIG_NF_NAT)
1532 static int
1533 ctnetlink_parse_nat_setup(struct nf_conn *ct,
1534 			  enum nf_nat_manip_type manip,
1535 			  const struct nlattr *attr)
1536 	__must_hold(RCU)
1537 {
1538 	struct nf_nat_hook *nat_hook;
1539 	int err;
1540 
1541 	nat_hook = rcu_dereference(nf_nat_hook);
1542 	if (!nat_hook) {
1543 #ifdef CONFIG_MODULES
1544 		rcu_read_unlock();
1545 		nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1546 		if (request_module("nf-nat") < 0) {
1547 			nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1548 			rcu_read_lock();
1549 			return -EOPNOTSUPP;
1550 		}
1551 		nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1552 		rcu_read_lock();
1553 		nat_hook = rcu_dereference(nf_nat_hook);
1554 		if (nat_hook)
1555 			return -EAGAIN;
1556 #endif
1557 		return -EOPNOTSUPP;
1558 	}
1559 
1560 	err = nat_hook->parse_nat_setup(ct, manip, attr);
1561 	if (err == -EAGAIN) {
1562 #ifdef CONFIG_MODULES
1563 		rcu_read_unlock();
1564 		nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1565 		if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
1566 			nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1567 			rcu_read_lock();
1568 			return -EOPNOTSUPP;
1569 		}
1570 		nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1571 		rcu_read_lock();
1572 #else
1573 		err = -EOPNOTSUPP;
1574 #endif
1575 	}
1576 	return err;
1577 }
1578 #endif
1579 
1580 static void
1581 __ctnetlink_change_status(struct nf_conn *ct, unsigned long on,
1582 			  unsigned long off)
1583 {
1584 	unsigned int bit;
1585 
1586 	/* Ignore these unchangable bits */
1587 	on &= ~IPS_UNCHANGEABLE_MASK;
1588 	off &= ~IPS_UNCHANGEABLE_MASK;
1589 
1590 	for (bit = 0; bit < __IPS_MAX_BIT; bit++) {
1591 		if (on & (1 << bit))
1592 			set_bit(bit, &ct->status);
1593 		else if (off & (1 << bit))
1594 			clear_bit(bit, &ct->status);
1595 	}
1596 }
1597 
1598 static int
1599 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1600 {
1601 	unsigned long d;
1602 	unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
1603 	d = ct->status ^ status;
1604 
1605 	if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
1606 		/* unchangeable */
1607 		return -EBUSY;
1608 
1609 	if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
1610 		/* SEEN_REPLY bit can only be set */
1611 		return -EBUSY;
1612 
1613 	if (d & IPS_ASSURED && !(status & IPS_ASSURED))
1614 		/* ASSURED bit can only be set */
1615 		return -EBUSY;
1616 
1617 	__ctnetlink_change_status(ct, status, 0);
1618 	return 0;
1619 }
1620 
1621 static int
1622 ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1623 {
1624 #if IS_ENABLED(CONFIG_NF_NAT)
1625 	int ret;
1626 
1627 	if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1628 		return 0;
1629 
1630 	ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
1631 					cda[CTA_NAT_DST]);
1632 	if (ret < 0)
1633 		return ret;
1634 
1635 	return ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC,
1636 					 cda[CTA_NAT_SRC]);
1637 #else
1638 	if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1639 		return 0;
1640 	return -EOPNOTSUPP;
1641 #endif
1642 }
1643 
1644 static int ctnetlink_change_helper(struct nf_conn *ct,
1645 				   const struct nlattr * const cda[])
1646 {
1647 	struct nf_conntrack_helper *helper;
1648 	struct nf_conn_help *help = nfct_help(ct);
1649 	char *helpname = NULL;
1650 	struct nlattr *helpinfo = NULL;
1651 	int err;
1652 
1653 	err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1654 	if (err < 0)
1655 		return err;
1656 
1657 	/* don't change helper of sibling connections */
1658 	if (ct->master) {
1659 		/* If we try to change the helper to the same thing twice,
1660 		 * treat the second attempt as a no-op instead of returning
1661 		 * an error.
1662 		 */
1663 		err = -EBUSY;
1664 		if (help) {
1665 			rcu_read_lock();
1666 			helper = rcu_dereference(help->helper);
1667 			if (helper && !strcmp(helper->name, helpname))
1668 				err = 0;
1669 			rcu_read_unlock();
1670 		}
1671 
1672 		return err;
1673 	}
1674 
1675 	if (!strcmp(helpname, "")) {
1676 		if (help && help->helper) {
1677 			/* we had a helper before ... */
1678 			nf_ct_remove_expectations(ct);
1679 			RCU_INIT_POINTER(help->helper, NULL);
1680 		}
1681 
1682 		return 0;
1683 	}
1684 
1685 	rcu_read_lock();
1686 	helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1687 					    nf_ct_protonum(ct));
1688 	if (helper == NULL) {
1689 		rcu_read_unlock();
1690 		return -EOPNOTSUPP;
1691 	}
1692 
1693 	if (help) {
1694 		if (help->helper == helper) {
1695 			/* update private helper data if allowed. */
1696 			if (helper->from_nlattr)
1697 				helper->from_nlattr(helpinfo, ct);
1698 			err = 0;
1699 		} else
1700 			err = -EBUSY;
1701 	} else {
1702 		/* we cannot set a helper for an existing conntrack */
1703 		err = -EOPNOTSUPP;
1704 	}
1705 
1706 	rcu_read_unlock();
1707 	return err;
1708 }
1709 
1710 static int ctnetlink_change_timeout(struct nf_conn *ct,
1711 				    const struct nlattr * const cda[])
1712 {
1713 	u64 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
1714 
1715 	if (timeout > INT_MAX)
1716 		timeout = INT_MAX;
1717 	ct->timeout = nfct_time_stamp + (u32)timeout;
1718 
1719 	if (test_bit(IPS_DYING_BIT, &ct->status))
1720 		return -ETIME;
1721 
1722 	return 0;
1723 }
1724 
1725 #if defined(CONFIG_NF_CONNTRACK_MARK)
1726 static void ctnetlink_change_mark(struct nf_conn *ct,
1727 				    const struct nlattr * const cda[])
1728 {
1729 	u32 mark, newmark, mask = 0;
1730 
1731 	if (cda[CTA_MARK_MASK])
1732 		mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
1733 
1734 	mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1735 	newmark = (ct->mark & mask) ^ mark;
1736 	if (newmark != ct->mark)
1737 		ct->mark = newmark;
1738 }
1739 #endif
1740 
1741 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1742 	[CTA_PROTOINFO_TCP]	= { .type = NLA_NESTED },
1743 	[CTA_PROTOINFO_DCCP]	= { .type = NLA_NESTED },
1744 	[CTA_PROTOINFO_SCTP]	= { .type = NLA_NESTED },
1745 };
1746 
1747 static int ctnetlink_change_protoinfo(struct nf_conn *ct,
1748 				      const struct nlattr * const cda[])
1749 {
1750 	const struct nlattr *attr = cda[CTA_PROTOINFO];
1751 	const struct nf_conntrack_l4proto *l4proto;
1752 	struct nlattr *tb[CTA_PROTOINFO_MAX+1];
1753 	int err = 0;
1754 
1755 	err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_MAX, attr,
1756 					  protoinfo_policy, NULL);
1757 	if (err < 0)
1758 		return err;
1759 
1760 	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
1761 	if (l4proto->from_nlattr)
1762 		err = l4proto->from_nlattr(tb, ct);
1763 
1764 	return err;
1765 }
1766 
1767 static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = {
1768 	[CTA_SEQADJ_CORRECTION_POS]	= { .type = NLA_U32 },
1769 	[CTA_SEQADJ_OFFSET_BEFORE]	= { .type = NLA_U32 },
1770 	[CTA_SEQADJ_OFFSET_AFTER]	= { .type = NLA_U32 },
1771 };
1772 
1773 static int change_seq_adj(struct nf_ct_seqadj *seq,
1774 			  const struct nlattr * const attr)
1775 {
1776 	int err;
1777 	struct nlattr *cda[CTA_SEQADJ_MAX+1];
1778 
1779 	err = nla_parse_nested_deprecated(cda, CTA_SEQADJ_MAX, attr,
1780 					  seqadj_policy, NULL);
1781 	if (err < 0)
1782 		return err;
1783 
1784 	if (!cda[CTA_SEQADJ_CORRECTION_POS])
1785 		return -EINVAL;
1786 
1787 	seq->correction_pos =
1788 		ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS]));
1789 
1790 	if (!cda[CTA_SEQADJ_OFFSET_BEFORE])
1791 		return -EINVAL;
1792 
1793 	seq->offset_before =
1794 		ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE]));
1795 
1796 	if (!cda[CTA_SEQADJ_OFFSET_AFTER])
1797 		return -EINVAL;
1798 
1799 	seq->offset_after =
1800 		ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER]));
1801 
1802 	return 0;
1803 }
1804 
1805 static int
1806 ctnetlink_change_seq_adj(struct nf_conn *ct,
1807 			 const struct nlattr * const cda[])
1808 {
1809 	struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
1810 	int ret = 0;
1811 
1812 	if (!seqadj)
1813 		return 0;
1814 
1815 	spin_lock_bh(&ct->lock);
1816 	if (cda[CTA_SEQ_ADJ_ORIG]) {
1817 		ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL],
1818 				     cda[CTA_SEQ_ADJ_ORIG]);
1819 		if (ret < 0)
1820 			goto err;
1821 
1822 		set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
1823 	}
1824 
1825 	if (cda[CTA_SEQ_ADJ_REPLY]) {
1826 		ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY],
1827 				     cda[CTA_SEQ_ADJ_REPLY]);
1828 		if (ret < 0)
1829 			goto err;
1830 
1831 		set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
1832 	}
1833 
1834 	spin_unlock_bh(&ct->lock);
1835 	return 0;
1836 err:
1837 	spin_unlock_bh(&ct->lock);
1838 	return ret;
1839 }
1840 
1841 static const struct nla_policy synproxy_policy[CTA_SYNPROXY_MAX + 1] = {
1842 	[CTA_SYNPROXY_ISN]	= { .type = NLA_U32 },
1843 	[CTA_SYNPROXY_ITS]	= { .type = NLA_U32 },
1844 	[CTA_SYNPROXY_TSOFF]	= { .type = NLA_U32 },
1845 };
1846 
1847 static int ctnetlink_change_synproxy(struct nf_conn *ct,
1848 				     const struct nlattr * const cda[])
1849 {
1850 	struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
1851 	struct nlattr *tb[CTA_SYNPROXY_MAX + 1];
1852 	int err;
1853 
1854 	if (!synproxy)
1855 		return 0;
1856 
1857 	err = nla_parse_nested_deprecated(tb, CTA_SYNPROXY_MAX,
1858 					  cda[CTA_SYNPROXY], synproxy_policy,
1859 					  NULL);
1860 	if (err < 0)
1861 		return err;
1862 
1863 	if (!tb[CTA_SYNPROXY_ISN] ||
1864 	    !tb[CTA_SYNPROXY_ITS] ||
1865 	    !tb[CTA_SYNPROXY_TSOFF])
1866 		return -EINVAL;
1867 
1868 	synproxy->isn = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ISN]));
1869 	synproxy->its = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ITS]));
1870 	synproxy->tsoff = ntohl(nla_get_be32(tb[CTA_SYNPROXY_TSOFF]));
1871 
1872 	return 0;
1873 }
1874 
1875 static int
1876 ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
1877 {
1878 #ifdef CONFIG_NF_CONNTRACK_LABELS
1879 	size_t len = nla_len(cda[CTA_LABELS]);
1880 	const void *mask = cda[CTA_LABELS_MASK];
1881 
1882 	if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
1883 		return -EINVAL;
1884 
1885 	if (mask) {
1886 		if (nla_len(cda[CTA_LABELS_MASK]) == 0 ||
1887 		    nla_len(cda[CTA_LABELS_MASK]) != len)
1888 			return -EINVAL;
1889 		mask = nla_data(cda[CTA_LABELS_MASK]);
1890 	}
1891 
1892 	len /= sizeof(u32);
1893 
1894 	return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len);
1895 #else
1896 	return -EOPNOTSUPP;
1897 #endif
1898 }
1899 
1900 static int
1901 ctnetlink_change_conntrack(struct nf_conn *ct,
1902 			   const struct nlattr * const cda[])
1903 {
1904 	int err;
1905 
1906 	/* only allow NAT changes and master assignation for new conntracks */
1907 	if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
1908 		return -EOPNOTSUPP;
1909 
1910 	if (cda[CTA_HELP]) {
1911 		err = ctnetlink_change_helper(ct, cda);
1912 		if (err < 0)
1913 			return err;
1914 	}
1915 
1916 	if (cda[CTA_TIMEOUT]) {
1917 		err = ctnetlink_change_timeout(ct, cda);
1918 		if (err < 0)
1919 			return err;
1920 	}
1921 
1922 	if (cda[CTA_STATUS]) {
1923 		err = ctnetlink_change_status(ct, cda);
1924 		if (err < 0)
1925 			return err;
1926 	}
1927 
1928 	if (cda[CTA_PROTOINFO]) {
1929 		err = ctnetlink_change_protoinfo(ct, cda);
1930 		if (err < 0)
1931 			return err;
1932 	}
1933 
1934 #if defined(CONFIG_NF_CONNTRACK_MARK)
1935 	if (cda[CTA_MARK])
1936 		ctnetlink_change_mark(ct, cda);
1937 #endif
1938 
1939 	if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
1940 		err = ctnetlink_change_seq_adj(ct, cda);
1941 		if (err < 0)
1942 			return err;
1943 	}
1944 
1945 	if (cda[CTA_SYNPROXY]) {
1946 		err = ctnetlink_change_synproxy(ct, cda);
1947 		if (err < 0)
1948 			return err;
1949 	}
1950 
1951 	if (cda[CTA_LABELS]) {
1952 		err = ctnetlink_attach_labels(ct, cda);
1953 		if (err < 0)
1954 			return err;
1955 	}
1956 
1957 	return 0;
1958 }
1959 
1960 static struct nf_conn *
1961 ctnetlink_create_conntrack(struct net *net,
1962 			   const struct nf_conntrack_zone *zone,
1963 			   const struct nlattr * const cda[],
1964 			   struct nf_conntrack_tuple *otuple,
1965 			   struct nf_conntrack_tuple *rtuple,
1966 			   u8 u3)
1967 {
1968 	struct nf_conn *ct;
1969 	int err = -EINVAL;
1970 	struct nf_conntrack_helper *helper;
1971 	struct nf_conn_tstamp *tstamp;
1972 	u64 timeout;
1973 
1974 	ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1975 	if (IS_ERR(ct))
1976 		return ERR_PTR(-ENOMEM);
1977 
1978 	if (!cda[CTA_TIMEOUT])
1979 		goto err1;
1980 
1981 	timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
1982 	if (timeout > INT_MAX)
1983 		timeout = INT_MAX;
1984 	ct->timeout = (u32)timeout + nfct_time_stamp;
1985 
1986 	rcu_read_lock();
1987  	if (cda[CTA_HELP]) {
1988 		char *helpname = NULL;
1989 		struct nlattr *helpinfo = NULL;
1990 
1991 		err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1992  		if (err < 0)
1993 			goto err2;
1994 
1995 		helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1996 						    nf_ct_protonum(ct));
1997 		if (helper == NULL) {
1998 			rcu_read_unlock();
1999 #ifdef CONFIG_MODULES
2000 			if (request_module("nfct-helper-%s", helpname) < 0) {
2001 				err = -EOPNOTSUPP;
2002 				goto err1;
2003 			}
2004 
2005 			rcu_read_lock();
2006 			helper = __nf_conntrack_helper_find(helpname,
2007 							    nf_ct_l3num(ct),
2008 							    nf_ct_protonum(ct));
2009 			if (helper) {
2010 				err = -EAGAIN;
2011 				goto err2;
2012 			}
2013 			rcu_read_unlock();
2014 #endif
2015 			err = -EOPNOTSUPP;
2016 			goto err1;
2017 		} else {
2018 			struct nf_conn_help *help;
2019 
2020 			help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
2021 			if (help == NULL) {
2022 				err = -ENOMEM;
2023 				goto err2;
2024 			}
2025 			/* set private helper data if allowed. */
2026 			if (helper->from_nlattr)
2027 				helper->from_nlattr(helpinfo, ct);
2028 
2029 			/* not in hash table yet so not strictly necessary */
2030 			RCU_INIT_POINTER(help->helper, helper);
2031 		}
2032 	} else {
2033 		/* try an implicit helper assignation */
2034 		err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
2035 		if (err < 0)
2036 			goto err2;
2037 	}
2038 
2039 	err = ctnetlink_setup_nat(ct, cda);
2040 	if (err < 0)
2041 		goto err2;
2042 
2043 	nf_ct_acct_ext_add(ct, GFP_ATOMIC);
2044 	nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
2045 	nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
2046 	nf_ct_labels_ext_add(ct);
2047 	nfct_seqadj_ext_add(ct);
2048 	nfct_synproxy_ext_add(ct);
2049 
2050 	/* we must add conntrack extensions before confirmation. */
2051 	ct->status |= IPS_CONFIRMED;
2052 
2053 	if (cda[CTA_STATUS]) {
2054 		err = ctnetlink_change_status(ct, cda);
2055 		if (err < 0)
2056 			goto err2;
2057 	}
2058 
2059 	if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
2060 		err = ctnetlink_change_seq_adj(ct, cda);
2061 		if (err < 0)
2062 			goto err2;
2063 	}
2064 
2065 	memset(&ct->proto, 0, sizeof(ct->proto));
2066 	if (cda[CTA_PROTOINFO]) {
2067 		err = ctnetlink_change_protoinfo(ct, cda);
2068 		if (err < 0)
2069 			goto err2;
2070 	}
2071 
2072 	if (cda[CTA_SYNPROXY]) {
2073 		err = ctnetlink_change_synproxy(ct, cda);
2074 		if (err < 0)
2075 			goto err2;
2076 	}
2077 
2078 #if defined(CONFIG_NF_CONNTRACK_MARK)
2079 	if (cda[CTA_MARK])
2080 		ctnetlink_change_mark(ct, cda);
2081 #endif
2082 
2083 	/* setup master conntrack: this is a confirmed expectation */
2084 	if (cda[CTA_TUPLE_MASTER]) {
2085 		struct nf_conntrack_tuple master;
2086 		struct nf_conntrack_tuple_hash *master_h;
2087 		struct nf_conn *master_ct;
2088 
2089 		err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER,
2090 					    u3, NULL);
2091 		if (err < 0)
2092 			goto err2;
2093 
2094 		master_h = nf_conntrack_find_get(net, zone, &master);
2095 		if (master_h == NULL) {
2096 			err = -ENOENT;
2097 			goto err2;
2098 		}
2099 		master_ct = nf_ct_tuplehash_to_ctrack(master_h);
2100 		__set_bit(IPS_EXPECTED_BIT, &ct->status);
2101 		ct->master = master_ct;
2102 	}
2103 	tstamp = nf_conn_tstamp_find(ct);
2104 	if (tstamp)
2105 		tstamp->start = ktime_get_real_ns();
2106 
2107 	err = nf_conntrack_hash_check_insert(ct);
2108 	if (err < 0)
2109 		goto err2;
2110 
2111 	rcu_read_unlock();
2112 
2113 	return ct;
2114 
2115 err2:
2116 	rcu_read_unlock();
2117 err1:
2118 	nf_conntrack_free(ct);
2119 	return ERR_PTR(err);
2120 }
2121 
2122 static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
2123 				   struct sk_buff *skb,
2124 				   const struct nlmsghdr *nlh,
2125 				   const struct nlattr * const cda[],
2126 				   struct netlink_ext_ack *extack)
2127 {
2128 	struct nf_conntrack_tuple otuple, rtuple;
2129 	struct nf_conntrack_tuple_hash *h = NULL;
2130 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2131 	struct nf_conn *ct;
2132 	u_int8_t u3 = nfmsg->nfgen_family;
2133 	struct nf_conntrack_zone zone;
2134 	int err;
2135 
2136 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
2137 	if (err < 0)
2138 		return err;
2139 
2140 	if (cda[CTA_TUPLE_ORIG]) {
2141 		err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG,
2142 					    u3, &zone);
2143 		if (err < 0)
2144 			return err;
2145 	}
2146 
2147 	if (cda[CTA_TUPLE_REPLY]) {
2148 		err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY,
2149 					    u3, &zone);
2150 		if (err < 0)
2151 			return err;
2152 	}
2153 
2154 	if (cda[CTA_TUPLE_ORIG])
2155 		h = nf_conntrack_find_get(net, &zone, &otuple);
2156 	else if (cda[CTA_TUPLE_REPLY])
2157 		h = nf_conntrack_find_get(net, &zone, &rtuple);
2158 
2159 	if (h == NULL) {
2160 		err = -ENOENT;
2161 		if (nlh->nlmsg_flags & NLM_F_CREATE) {
2162 			enum ip_conntrack_events events;
2163 
2164 			if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
2165 				return -EINVAL;
2166 			if (otuple.dst.protonum != rtuple.dst.protonum)
2167 				return -EINVAL;
2168 
2169 			ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
2170 							&rtuple, u3);
2171 			if (IS_ERR(ct))
2172 				return PTR_ERR(ct);
2173 
2174 			err = 0;
2175 			if (test_bit(IPS_EXPECTED_BIT, &ct->status))
2176 				events = 1 << IPCT_RELATED;
2177 			else
2178 				events = 1 << IPCT_NEW;
2179 
2180 			if (cda[CTA_LABELS] &&
2181 			    ctnetlink_attach_labels(ct, cda) == 0)
2182 				events |= (1 << IPCT_LABEL);
2183 
2184 			nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
2185 						      (1 << IPCT_ASSURED) |
2186 						      (1 << IPCT_HELPER) |
2187 						      (1 << IPCT_PROTOINFO) |
2188 						      (1 << IPCT_SEQADJ) |
2189 						      (1 << IPCT_MARK) |
2190 						      (1 << IPCT_SYNPROXY) |
2191 						      events,
2192 						      ct, NETLINK_CB(skb).portid,
2193 						      nlmsg_report(nlh));
2194 			nf_ct_put(ct);
2195 		}
2196 
2197 		return err;
2198 	}
2199 	/* implicit 'else' */
2200 
2201 	err = -EEXIST;
2202 	ct = nf_ct_tuplehash_to_ctrack(h);
2203 	if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
2204 		err = ctnetlink_change_conntrack(ct, cda);
2205 		if (err == 0) {
2206 			nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
2207 						      (1 << IPCT_ASSURED) |
2208 						      (1 << IPCT_HELPER) |
2209 						      (1 << IPCT_LABEL) |
2210 						      (1 << IPCT_PROTOINFO) |
2211 						      (1 << IPCT_SEQADJ) |
2212 						      (1 << IPCT_MARK) |
2213 						      (1 << IPCT_SYNPROXY),
2214 						      ct, NETLINK_CB(skb).portid,
2215 						      nlmsg_report(nlh));
2216 		}
2217 	}
2218 
2219 	nf_ct_put(ct);
2220 	return err;
2221 }
2222 
2223 static int
2224 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2225 				__u16 cpu, const struct ip_conntrack_stat *st)
2226 {
2227 	struct nlmsghdr *nlh;
2228 	struct nfgenmsg *nfmsg;
2229 	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2230 
2231 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
2232 			      IPCTNL_MSG_CT_GET_STATS_CPU);
2233 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2234 	if (nlh == NULL)
2235 		goto nlmsg_failure;
2236 
2237 	nfmsg = nlmsg_data(nlh);
2238 	nfmsg->nfgen_family = AF_UNSPEC;
2239 	nfmsg->version      = NFNETLINK_V0;
2240 	nfmsg->res_id	    = htons(cpu);
2241 
2242 	if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
2243 	    nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
2244 	    nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
2245 	    nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
2246 	    nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
2247 				htonl(st->insert_failed)) ||
2248 	    nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
2249 	    nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
2250 	    nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
2251 	    nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
2252 				htonl(st->search_restart)))
2253 		goto nla_put_failure;
2254 
2255 	nlmsg_end(skb, nlh);
2256 	return skb->len;
2257 
2258 nla_put_failure:
2259 nlmsg_failure:
2260 	nlmsg_cancel(skb, nlh);
2261 	return -1;
2262 }
2263 
2264 static int
2265 ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
2266 {
2267 	int cpu;
2268 	struct net *net = sock_net(skb->sk);
2269 
2270 	if (cb->args[0] == nr_cpu_ids)
2271 		return 0;
2272 
2273 	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
2274 		const struct ip_conntrack_stat *st;
2275 
2276 		if (!cpu_possible(cpu))
2277 			continue;
2278 
2279 		st = per_cpu_ptr(net->ct.stat, cpu);
2280 		if (ctnetlink_ct_stat_cpu_fill_info(skb,
2281 						    NETLINK_CB(cb->skb).portid,
2282 						    cb->nlh->nlmsg_seq,
2283 						    cpu, st) < 0)
2284 				break;
2285 	}
2286 	cb->args[0] = cpu;
2287 
2288 	return skb->len;
2289 }
2290 
2291 static int ctnetlink_stat_ct_cpu(struct net *net, struct sock *ctnl,
2292 				 struct sk_buff *skb,
2293 				 const struct nlmsghdr *nlh,
2294 				 const struct nlattr * const cda[],
2295 				 struct netlink_ext_ack *extack)
2296 {
2297 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
2298 		struct netlink_dump_control c = {
2299 			.dump = ctnetlink_ct_stat_cpu_dump,
2300 		};
2301 		return netlink_dump_start(ctnl, skb, nlh, &c);
2302 	}
2303 
2304 	return 0;
2305 }
2306 
2307 static int
2308 ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
2309 			    struct net *net)
2310 {
2311 	struct nlmsghdr *nlh;
2312 	struct nfgenmsg *nfmsg;
2313 	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2314 	unsigned int nr_conntracks = atomic_read(&net->ct.count);
2315 
2316 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS);
2317 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2318 	if (nlh == NULL)
2319 		goto nlmsg_failure;
2320 
2321 	nfmsg = nlmsg_data(nlh);
2322 	nfmsg->nfgen_family = AF_UNSPEC;
2323 	nfmsg->version      = NFNETLINK_V0;
2324 	nfmsg->res_id	    = 0;
2325 
2326 	if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
2327 		goto nla_put_failure;
2328 
2329 	if (nla_put_be32(skb, CTA_STATS_GLOBAL_MAX_ENTRIES, htonl(nf_conntrack_max)))
2330 		goto nla_put_failure;
2331 
2332 	nlmsg_end(skb, nlh);
2333 	return skb->len;
2334 
2335 nla_put_failure:
2336 nlmsg_failure:
2337 	nlmsg_cancel(skb, nlh);
2338 	return -1;
2339 }
2340 
2341 static int ctnetlink_stat_ct(struct net *net, struct sock *ctnl,
2342 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
2343 			     const struct nlattr * const cda[],
2344 			     struct netlink_ext_ack *extack)
2345 {
2346 	struct sk_buff *skb2;
2347 	int err;
2348 
2349 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2350 	if (skb2 == NULL)
2351 		return -ENOMEM;
2352 
2353 	err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
2354 					  nlh->nlmsg_seq,
2355 					  NFNL_MSG_TYPE(nlh->nlmsg_type),
2356 					  sock_net(skb->sk));
2357 	if (err <= 0)
2358 		goto free;
2359 
2360 	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2361 	if (err < 0)
2362 		goto out;
2363 
2364 	return 0;
2365 
2366 free:
2367 	kfree_skb(skb2);
2368 out:
2369 	/* this avoids a loop in nfnetlink. */
2370 	return err == -EAGAIN ? -ENOBUFS : err;
2371 }
2372 
2373 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2374 	[CTA_EXPECT_MASTER]	= { .type = NLA_NESTED },
2375 	[CTA_EXPECT_TUPLE]	= { .type = NLA_NESTED },
2376 	[CTA_EXPECT_MASK]	= { .type = NLA_NESTED },
2377 	[CTA_EXPECT_TIMEOUT]	= { .type = NLA_U32 },
2378 	[CTA_EXPECT_ID]		= { .type = NLA_U32 },
2379 	[CTA_EXPECT_HELP_NAME]	= { .type = NLA_NUL_STRING,
2380 				    .len = NF_CT_HELPER_NAME_LEN - 1 },
2381 	[CTA_EXPECT_ZONE]	= { .type = NLA_U16 },
2382 	[CTA_EXPECT_FLAGS]	= { .type = NLA_U32 },
2383 	[CTA_EXPECT_CLASS]	= { .type = NLA_U32 },
2384 	[CTA_EXPECT_NAT]	= { .type = NLA_NESTED },
2385 	[CTA_EXPECT_FN]		= { .type = NLA_NUL_STRING },
2386 };
2387 
2388 static struct nf_conntrack_expect *
2389 ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
2390 		       struct nf_conntrack_helper *helper,
2391 		       struct nf_conntrack_tuple *tuple,
2392 		       struct nf_conntrack_tuple *mask);
2393 
2394 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
2395 static size_t
2396 ctnetlink_glue_build_size(const struct nf_conn *ct)
2397 {
2398 	return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
2399 	       + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
2400 	       + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
2401 	       + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
2402 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
2403 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
2404 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
2405 	       + nla_total_size(0) /* CTA_PROTOINFO */
2406 	       + nla_total_size(0) /* CTA_HELP */
2407 	       + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
2408 	       + ctnetlink_secctx_size(ct)
2409 #if IS_ENABLED(CONFIG_NF_NAT)
2410 	       + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
2411 	       + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
2412 #endif
2413 #ifdef CONFIG_NF_CONNTRACK_MARK
2414 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2415 #endif
2416 #ifdef CONFIG_NF_CONNTRACK_ZONES
2417 	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
2418 #endif
2419 	       + ctnetlink_proto_size(ct)
2420 	       ;
2421 }
2422 
2423 static struct nf_conn *ctnetlink_glue_get_ct(const struct sk_buff *skb,
2424 					     enum ip_conntrack_info *ctinfo)
2425 {
2426 	return nf_ct_get(skb, ctinfo);
2427 }
2428 
2429 static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
2430 {
2431 	const struct nf_conntrack_zone *zone;
2432 	struct nlattr *nest_parms;
2433 
2434 	zone = nf_ct_zone(ct);
2435 
2436 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
2437 	if (!nest_parms)
2438 		goto nla_put_failure;
2439 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
2440 		goto nla_put_failure;
2441 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
2442 				   NF_CT_ZONE_DIR_ORIG) < 0)
2443 		goto nla_put_failure;
2444 	nla_nest_end(skb, nest_parms);
2445 
2446 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
2447 	if (!nest_parms)
2448 		goto nla_put_failure;
2449 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
2450 		goto nla_put_failure;
2451 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
2452 				   NF_CT_ZONE_DIR_REPL) < 0)
2453 		goto nla_put_failure;
2454 	nla_nest_end(skb, nest_parms);
2455 
2456 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
2457 				   NF_CT_DEFAULT_ZONE_DIR) < 0)
2458 		goto nla_put_failure;
2459 
2460 	if (ctnetlink_dump_id(skb, ct) < 0)
2461 		goto nla_put_failure;
2462 
2463 	if (ctnetlink_dump_status(skb, ct) < 0)
2464 		goto nla_put_failure;
2465 
2466 	if (ctnetlink_dump_timeout(skb, ct) < 0)
2467 		goto nla_put_failure;
2468 
2469 	if (ctnetlink_dump_protoinfo(skb, ct) < 0)
2470 		goto nla_put_failure;
2471 
2472 	if (ctnetlink_dump_helpinfo(skb, ct) < 0)
2473 		goto nla_put_failure;
2474 
2475 #ifdef CONFIG_NF_CONNTRACK_SECMARK
2476 	if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
2477 		goto nla_put_failure;
2478 #endif
2479 	if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
2480 		goto nla_put_failure;
2481 
2482 	if ((ct->status & IPS_SEQ_ADJUST) &&
2483 	    ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
2484 		goto nla_put_failure;
2485 
2486 	if (ctnetlink_dump_ct_synproxy(skb, ct) < 0)
2487 		goto nla_put_failure;
2488 
2489 #ifdef CONFIG_NF_CONNTRACK_MARK
2490 	if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
2491 		goto nla_put_failure;
2492 #endif
2493 	if (ctnetlink_dump_labels(skb, ct) < 0)
2494 		goto nla_put_failure;
2495 	return 0;
2496 
2497 nla_put_failure:
2498 	return -ENOSPC;
2499 }
2500 
2501 static int
2502 ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct,
2503 		     enum ip_conntrack_info ctinfo,
2504 		     u_int16_t ct_attr, u_int16_t ct_info_attr)
2505 {
2506 	struct nlattr *nest_parms;
2507 
2508 	nest_parms = nla_nest_start(skb, ct_attr);
2509 	if (!nest_parms)
2510 		goto nla_put_failure;
2511 
2512 	if (__ctnetlink_glue_build(skb, ct) < 0)
2513 		goto nla_put_failure;
2514 
2515 	nla_nest_end(skb, nest_parms);
2516 
2517 	if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo)))
2518 		goto nla_put_failure;
2519 
2520 	return 0;
2521 
2522 nla_put_failure:
2523 	return -ENOSPC;
2524 }
2525 
2526 static int
2527 ctnetlink_update_status(struct nf_conn *ct, const struct nlattr * const cda[])
2528 {
2529 	unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
2530 	unsigned long d = ct->status ^ status;
2531 
2532 	if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
2533 		/* SEEN_REPLY bit can only be set */
2534 		return -EBUSY;
2535 
2536 	if (d & IPS_ASSURED && !(status & IPS_ASSURED))
2537 		/* ASSURED bit can only be set */
2538 		return -EBUSY;
2539 
2540 	/* This check is less strict than ctnetlink_change_status()
2541 	 * because callers often flip IPS_EXPECTED bits when sending
2542 	 * an NFQA_CT attribute to the kernel.  So ignore the
2543 	 * unchangeable bits but do not error out. Also user programs
2544 	 * are allowed to clear the bits that they are allowed to change.
2545 	 */
2546 	__ctnetlink_change_status(ct, status, ~status);
2547 	return 0;
2548 }
2549 
2550 static int
2551 ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
2552 {
2553 	int err;
2554 
2555 	if (cda[CTA_TIMEOUT]) {
2556 		err = ctnetlink_change_timeout(ct, cda);
2557 		if (err < 0)
2558 			return err;
2559 	}
2560 	if (cda[CTA_STATUS]) {
2561 		err = ctnetlink_update_status(ct, cda);
2562 		if (err < 0)
2563 			return err;
2564 	}
2565 	if (cda[CTA_HELP]) {
2566 		err = ctnetlink_change_helper(ct, cda);
2567 		if (err < 0)
2568 			return err;
2569 	}
2570 	if (cda[CTA_LABELS]) {
2571 		err = ctnetlink_attach_labels(ct, cda);
2572 		if (err < 0)
2573 			return err;
2574 	}
2575 #if defined(CONFIG_NF_CONNTRACK_MARK)
2576 	if (cda[CTA_MARK]) {
2577 		ctnetlink_change_mark(ct, cda);
2578 	}
2579 #endif
2580 	return 0;
2581 }
2582 
2583 static int
2584 ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct)
2585 {
2586 	struct nlattr *cda[CTA_MAX+1];
2587 	int ret;
2588 
2589 	ret = nla_parse_nested_deprecated(cda, CTA_MAX, attr, ct_nla_policy,
2590 					  NULL);
2591 	if (ret < 0)
2592 		return ret;
2593 
2594 	return ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct);
2595 }
2596 
2597 static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda,
2598 				    const struct nf_conn *ct,
2599 				    struct nf_conntrack_tuple *tuple,
2600 				    struct nf_conntrack_tuple *mask)
2601 {
2602 	int err;
2603 
2604 	err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
2605 				    nf_ct_l3num(ct), NULL);
2606 	if (err < 0)
2607 		return err;
2608 
2609 	return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
2610 				     nf_ct_l3num(ct), NULL);
2611 }
2612 
2613 static int
2614 ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2615 			     u32 portid, u32 report)
2616 {
2617 	struct nlattr *cda[CTA_EXPECT_MAX+1];
2618 	struct nf_conntrack_tuple tuple, mask;
2619 	struct nf_conntrack_helper *helper = NULL;
2620 	struct nf_conntrack_expect *exp;
2621 	int err;
2622 
2623 	err = nla_parse_nested_deprecated(cda, CTA_EXPECT_MAX, attr,
2624 					  exp_nla_policy, NULL);
2625 	if (err < 0)
2626 		return err;
2627 
2628 	err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda,
2629 				       ct, &tuple, &mask);
2630 	if (err < 0)
2631 		return err;
2632 
2633 	if (cda[CTA_EXPECT_HELP_NAME]) {
2634 		const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2635 
2636 		helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2637 						    nf_ct_protonum(ct));
2638 		if (helper == NULL)
2639 			return -EOPNOTSUPP;
2640 	}
2641 
2642 	exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
2643 				     helper, &tuple, &mask);
2644 	if (IS_ERR(exp))
2645 		return PTR_ERR(exp);
2646 
2647 	err = nf_ct_expect_related_report(exp, portid, report, 0);
2648 	nf_ct_expect_put(exp);
2649 	return err;
2650 }
2651 
2652 static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
2653 				  enum ip_conntrack_info ctinfo, int diff)
2654 {
2655 	if (!(ct->status & IPS_NAT_MASK))
2656 		return;
2657 
2658 	nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff);
2659 }
2660 
2661 static struct nfnl_ct_hook ctnetlink_glue_hook = {
2662 	.get_ct		= ctnetlink_glue_get_ct,
2663 	.build_size	= ctnetlink_glue_build_size,
2664 	.build		= ctnetlink_glue_build,
2665 	.parse		= ctnetlink_glue_parse,
2666 	.attach_expect	= ctnetlink_glue_attach_expect,
2667 	.seq_adjust	= ctnetlink_glue_seqadj,
2668 };
2669 #endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */
2670 
2671 /***********************************************************************
2672  * EXPECT
2673  ***********************************************************************/
2674 
2675 static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2676 				    const struct nf_conntrack_tuple *tuple,
2677 				    u32 type)
2678 {
2679 	struct nlattr *nest_parms;
2680 
2681 	nest_parms = nla_nest_start(skb, type);
2682 	if (!nest_parms)
2683 		goto nla_put_failure;
2684 	if (ctnetlink_dump_tuples(skb, tuple) < 0)
2685 		goto nla_put_failure;
2686 	nla_nest_end(skb, nest_parms);
2687 
2688 	return 0;
2689 
2690 nla_put_failure:
2691 	return -1;
2692 }
2693 
2694 static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
2695 				   const struct nf_conntrack_tuple *tuple,
2696 				   const struct nf_conntrack_tuple_mask *mask)
2697 {
2698 	const struct nf_conntrack_l4proto *l4proto;
2699 	struct nf_conntrack_tuple m;
2700 	struct nlattr *nest_parms;
2701 	int ret;
2702 
2703 	memset(&m, 0xFF, sizeof(m));
2704 	memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
2705 	m.src.u.all = mask->src.u.all;
2706 	m.dst.protonum = tuple->dst.protonum;
2707 
2708 	nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
2709 	if (!nest_parms)
2710 		goto nla_put_failure;
2711 
2712 	rcu_read_lock();
2713 	ret = ctnetlink_dump_tuples_ip(skb, &m);
2714 	if (ret >= 0) {
2715 		l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
2716 		ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
2717 	}
2718 	rcu_read_unlock();
2719 
2720 	if (unlikely(ret < 0))
2721 		goto nla_put_failure;
2722 
2723 	nla_nest_end(skb, nest_parms);
2724 
2725 	return 0;
2726 
2727 nla_put_failure:
2728 	return -1;
2729 }
2730 
2731 static const union nf_inet_addr any_addr;
2732 
2733 static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
2734 {
2735 	static __read_mostly siphash_key_t exp_id_seed;
2736 	unsigned long a, b, c, d;
2737 
2738 	net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
2739 
2740 	a = (unsigned long)exp;
2741 	b = (unsigned long)exp->helper;
2742 	c = (unsigned long)exp->master;
2743 	d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
2744 
2745 #ifdef CONFIG_64BIT
2746 	return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
2747 #else
2748 	return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
2749 #endif
2750 }
2751 
2752 static int
2753 ctnetlink_exp_dump_expect(struct sk_buff *skb,
2754 			  const struct nf_conntrack_expect *exp)
2755 {
2756 	struct nf_conn *master = exp->master;
2757 	long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
2758 	struct nf_conn_help *help;
2759 #if IS_ENABLED(CONFIG_NF_NAT)
2760 	struct nlattr *nest_parms;
2761 	struct nf_conntrack_tuple nat_tuple = {};
2762 #endif
2763 	struct nf_ct_helper_expectfn *expfn;
2764 
2765 	if (timeout < 0)
2766 		timeout = 0;
2767 
2768 	if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
2769 		goto nla_put_failure;
2770 	if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
2771 		goto nla_put_failure;
2772 	if (ctnetlink_exp_dump_tuple(skb,
2773 				 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
2774 				 CTA_EXPECT_MASTER) < 0)
2775 		goto nla_put_failure;
2776 
2777 #if IS_ENABLED(CONFIG_NF_NAT)
2778 	if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
2779 	    exp->saved_proto.all) {
2780 		nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT);
2781 		if (!nest_parms)
2782 			goto nla_put_failure;
2783 
2784 		if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
2785 			goto nla_put_failure;
2786 
2787 		nat_tuple.src.l3num = nf_ct_l3num(master);
2788 		nat_tuple.src.u3 = exp->saved_addr;
2789 		nat_tuple.dst.protonum = nf_ct_protonum(master);
2790 		nat_tuple.src.u = exp->saved_proto;
2791 
2792 		if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
2793 						CTA_EXPECT_NAT_TUPLE) < 0)
2794 	                goto nla_put_failure;
2795 	        nla_nest_end(skb, nest_parms);
2796 	}
2797 #endif
2798 	if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
2799 	    nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
2800 	    nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
2801 	    nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
2802 		goto nla_put_failure;
2803 	help = nfct_help(master);
2804 	if (help) {
2805 		struct nf_conntrack_helper *helper;
2806 
2807 		helper = rcu_dereference(help->helper);
2808 		if (helper &&
2809 		    nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
2810 			goto nla_put_failure;
2811 	}
2812 	expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
2813 	if (expfn != NULL &&
2814 	    nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
2815 		goto nla_put_failure;
2816 
2817 	return 0;
2818 
2819 nla_put_failure:
2820 	return -1;
2821 }
2822 
2823 static int
2824 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2825 			int event, const struct nf_conntrack_expect *exp)
2826 {
2827 	struct nlmsghdr *nlh;
2828 	struct nfgenmsg *nfmsg;
2829 	unsigned int flags = portid ? NLM_F_MULTI : 0;
2830 
2831 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event);
2832 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2833 	if (nlh == NULL)
2834 		goto nlmsg_failure;
2835 
2836 	nfmsg = nlmsg_data(nlh);
2837 	nfmsg->nfgen_family = exp->tuple.src.l3num;
2838 	nfmsg->version	    = NFNETLINK_V0;
2839 	nfmsg->res_id	    = 0;
2840 
2841 	if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2842 		goto nla_put_failure;
2843 
2844 	nlmsg_end(skb, nlh);
2845 	return skb->len;
2846 
2847 nlmsg_failure:
2848 nla_put_failure:
2849 	nlmsg_cancel(skb, nlh);
2850 	return -1;
2851 }
2852 
2853 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2854 static int
2855 ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
2856 {
2857 	struct nf_conntrack_expect *exp = item->exp;
2858 	struct net *net = nf_ct_exp_net(exp);
2859 	struct nlmsghdr *nlh;
2860 	struct nfgenmsg *nfmsg;
2861 	struct sk_buff *skb;
2862 	unsigned int type, group;
2863 	int flags = 0;
2864 
2865 	if (events & (1 << IPEXP_DESTROY)) {
2866 		type = IPCTNL_MSG_EXP_DELETE;
2867 		group = NFNLGRP_CONNTRACK_EXP_DESTROY;
2868 	} else if (events & (1 << IPEXP_NEW)) {
2869 		type = IPCTNL_MSG_EXP_NEW;
2870 		flags = NLM_F_CREATE|NLM_F_EXCL;
2871 		group = NFNLGRP_CONNTRACK_EXP_NEW;
2872 	} else
2873 		return 0;
2874 
2875 	if (!item->report && !nfnetlink_has_listeners(net, group))
2876 		return 0;
2877 
2878 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
2879 	if (skb == NULL)
2880 		goto errout;
2881 
2882 	type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, type);
2883 	nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
2884 	if (nlh == NULL)
2885 		goto nlmsg_failure;
2886 
2887 	nfmsg = nlmsg_data(nlh);
2888 	nfmsg->nfgen_family = exp->tuple.src.l3num;
2889 	nfmsg->version	    = NFNETLINK_V0;
2890 	nfmsg->res_id	    = 0;
2891 
2892 	if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2893 		goto nla_put_failure;
2894 
2895 	nlmsg_end(skb, nlh);
2896 	nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
2897 	return 0;
2898 
2899 nla_put_failure:
2900 	nlmsg_cancel(skb, nlh);
2901 nlmsg_failure:
2902 	kfree_skb(skb);
2903 errout:
2904 	nfnetlink_set_err(net, 0, 0, -ENOBUFS);
2905 	return 0;
2906 }
2907 #endif
2908 static int ctnetlink_exp_done(struct netlink_callback *cb)
2909 {
2910 	if (cb->args[1])
2911 		nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
2912 	return 0;
2913 }
2914 
2915 static int
2916 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2917 {
2918 	struct net *net = sock_net(skb->sk);
2919 	struct nf_conntrack_expect *exp, *last;
2920 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2921 	u_int8_t l3proto = nfmsg->nfgen_family;
2922 
2923 	rcu_read_lock();
2924 	last = (struct nf_conntrack_expect *)cb->args[1];
2925 	for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2926 restart:
2927 		hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
2928 					 hnode) {
2929 			if (l3proto && exp->tuple.src.l3num != l3proto)
2930 				continue;
2931 
2932 			if (!net_eq(nf_ct_net(exp->master), net))
2933 				continue;
2934 
2935 			if (cb->args[1]) {
2936 				if (exp != last)
2937 					continue;
2938 				cb->args[1] = 0;
2939 			}
2940 			if (ctnetlink_exp_fill_info(skb,
2941 						    NETLINK_CB(cb->skb).portid,
2942 						    cb->nlh->nlmsg_seq,
2943 						    IPCTNL_MSG_EXP_NEW,
2944 						    exp) < 0) {
2945 				if (!refcount_inc_not_zero(&exp->use))
2946 					continue;
2947 				cb->args[1] = (unsigned long)exp;
2948 				goto out;
2949 			}
2950 		}
2951 		if (cb->args[1]) {
2952 			cb->args[1] = 0;
2953 			goto restart;
2954 		}
2955 	}
2956 out:
2957 	rcu_read_unlock();
2958 	if (last)
2959 		nf_ct_expect_put(last);
2960 
2961 	return skb->len;
2962 }
2963 
2964 static int
2965 ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2966 {
2967 	struct nf_conntrack_expect *exp, *last;
2968 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2969 	struct nf_conn *ct = cb->data;
2970 	struct nf_conn_help *help = nfct_help(ct);
2971 	u_int8_t l3proto = nfmsg->nfgen_family;
2972 
2973 	if (cb->args[0])
2974 		return 0;
2975 
2976 	rcu_read_lock();
2977 	last = (struct nf_conntrack_expect *)cb->args[1];
2978 restart:
2979 	hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
2980 		if (l3proto && exp->tuple.src.l3num != l3proto)
2981 			continue;
2982 		if (cb->args[1]) {
2983 			if (exp != last)
2984 				continue;
2985 			cb->args[1] = 0;
2986 		}
2987 		if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
2988 					    cb->nlh->nlmsg_seq,
2989 					    IPCTNL_MSG_EXP_NEW,
2990 					    exp) < 0) {
2991 			if (!refcount_inc_not_zero(&exp->use))
2992 				continue;
2993 			cb->args[1] = (unsigned long)exp;
2994 			goto out;
2995 		}
2996 	}
2997 	if (cb->args[1]) {
2998 		cb->args[1] = 0;
2999 		goto restart;
3000 	}
3001 	cb->args[0] = 1;
3002 out:
3003 	rcu_read_unlock();
3004 	if (last)
3005 		nf_ct_expect_put(last);
3006 
3007 	return skb->len;
3008 }
3009 
3010 static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
3011 				 struct sk_buff *skb,
3012 				 const struct nlmsghdr *nlh,
3013 				 const struct nlattr * const cda[],
3014 				 struct netlink_ext_ack *extack)
3015 {
3016 	int err;
3017 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3018 	u_int8_t u3 = nfmsg->nfgen_family;
3019 	struct nf_conntrack_tuple tuple;
3020 	struct nf_conntrack_tuple_hash *h;
3021 	struct nf_conn *ct;
3022 	struct nf_conntrack_zone zone;
3023 	struct netlink_dump_control c = {
3024 		.dump = ctnetlink_exp_ct_dump_table,
3025 		.done = ctnetlink_exp_done,
3026 	};
3027 
3028 	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
3029 				    u3, NULL);
3030 	if (err < 0)
3031 		return err;
3032 
3033 	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3034 	if (err < 0)
3035 		return err;
3036 
3037 	h = nf_conntrack_find_get(net, &zone, &tuple);
3038 	if (!h)
3039 		return -ENOENT;
3040 
3041 	ct = nf_ct_tuplehash_to_ctrack(h);
3042 	/* No expectation linked to this connection tracking. */
3043 	if (!nfct_help(ct)) {
3044 		nf_ct_put(ct);
3045 		return 0;
3046 	}
3047 
3048 	c.data = ct;
3049 
3050 	err = netlink_dump_start(ctnl, skb, nlh, &c);
3051 	nf_ct_put(ct);
3052 
3053 	return err;
3054 }
3055 
3056 static int ctnetlink_get_expect(struct net *net, struct sock *ctnl,
3057 				struct sk_buff *skb, const struct nlmsghdr *nlh,
3058 				const struct nlattr * const cda[],
3059 				struct netlink_ext_ack *extack)
3060 {
3061 	struct nf_conntrack_tuple tuple;
3062 	struct nf_conntrack_expect *exp;
3063 	struct sk_buff *skb2;
3064 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3065 	u_int8_t u3 = nfmsg->nfgen_family;
3066 	struct nf_conntrack_zone zone;
3067 	int err;
3068 
3069 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
3070 		if (cda[CTA_EXPECT_MASTER])
3071 			return ctnetlink_dump_exp_ct(net, ctnl, skb, nlh, cda,
3072 						     extack);
3073 		else {
3074 			struct netlink_dump_control c = {
3075 				.dump = ctnetlink_exp_dump_table,
3076 				.done = ctnetlink_exp_done,
3077 			};
3078 			return netlink_dump_start(ctnl, skb, nlh, &c);
3079 		}
3080 	}
3081 
3082 	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3083 	if (err < 0)
3084 		return err;
3085 
3086 	if (cda[CTA_EXPECT_TUPLE])
3087 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3088 					    u3, NULL);
3089 	else if (cda[CTA_EXPECT_MASTER])
3090 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
3091 					    u3, NULL);
3092 	else
3093 		return -EINVAL;
3094 
3095 	if (err < 0)
3096 		return err;
3097 
3098 	exp = nf_ct_expect_find_get(net, &zone, &tuple);
3099 	if (!exp)
3100 		return -ENOENT;
3101 
3102 	if (cda[CTA_EXPECT_ID]) {
3103 		__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
3104 
3105 		if (id != nf_expect_get_id(exp)) {
3106 			nf_ct_expect_put(exp);
3107 			return -ENOENT;
3108 		}
3109 	}
3110 
3111 	err = -ENOMEM;
3112 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3113 	if (skb2 == NULL) {
3114 		nf_ct_expect_put(exp);
3115 		goto out;
3116 	}
3117 
3118 	rcu_read_lock();
3119 	err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
3120 				      nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
3121 	rcu_read_unlock();
3122 	nf_ct_expect_put(exp);
3123 	if (err <= 0)
3124 		goto free;
3125 
3126 	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
3127 	if (err < 0)
3128 		goto out;
3129 
3130 	return 0;
3131 
3132 free:
3133 	kfree_skb(skb2);
3134 out:
3135 	/* this avoids a loop in nfnetlink. */
3136 	return err == -EAGAIN ? -ENOBUFS : err;
3137 }
3138 
3139 static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data)
3140 {
3141 	const struct nf_conn_help *m_help;
3142 	const char *name = data;
3143 
3144 	m_help = nfct_help(exp->master);
3145 
3146 	return strcmp(m_help->helper->name, name) == 0;
3147 }
3148 
3149 static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data)
3150 {
3151 	return true;
3152 }
3153 
3154 static int ctnetlink_del_expect(struct net *net, struct sock *ctnl,
3155 				struct sk_buff *skb, const struct nlmsghdr *nlh,
3156 				const struct nlattr * const cda[],
3157 				struct netlink_ext_ack *extack)
3158 {
3159 	struct nf_conntrack_expect *exp;
3160 	struct nf_conntrack_tuple tuple;
3161 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3162 	u_int8_t u3 = nfmsg->nfgen_family;
3163 	struct nf_conntrack_zone zone;
3164 	int err;
3165 
3166 	if (cda[CTA_EXPECT_TUPLE]) {
3167 		/* delete a single expect by tuple */
3168 		err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3169 		if (err < 0)
3170 			return err;
3171 
3172 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3173 					    u3, NULL);
3174 		if (err < 0)
3175 			return err;
3176 
3177 		/* bump usage count to 2 */
3178 		exp = nf_ct_expect_find_get(net, &zone, &tuple);
3179 		if (!exp)
3180 			return -ENOENT;
3181 
3182 		if (cda[CTA_EXPECT_ID]) {
3183 			__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
3184 			if (ntohl(id) != (u32)(unsigned long)exp) {
3185 				nf_ct_expect_put(exp);
3186 				return -ENOENT;
3187 			}
3188 		}
3189 
3190 		/* after list removal, usage count == 1 */
3191 		spin_lock_bh(&nf_conntrack_expect_lock);
3192 		if (del_timer(&exp->timeout)) {
3193 			nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
3194 						   nlmsg_report(nlh));
3195 			nf_ct_expect_put(exp);
3196 		}
3197 		spin_unlock_bh(&nf_conntrack_expect_lock);
3198 		/* have to put what we 'get' above.
3199 		 * after this line usage count == 0 */
3200 		nf_ct_expect_put(exp);
3201 	} else if (cda[CTA_EXPECT_HELP_NAME]) {
3202 		char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3203 
3204 		nf_ct_expect_iterate_net(net, expect_iter_name, name,
3205 					 NETLINK_CB(skb).portid,
3206 					 nlmsg_report(nlh));
3207 	} else {
3208 		/* This basically means we have to flush everything*/
3209 		nf_ct_expect_iterate_net(net, expect_iter_all, NULL,
3210 					 NETLINK_CB(skb).portid,
3211 					 nlmsg_report(nlh));
3212 	}
3213 
3214 	return 0;
3215 }
3216 static int
3217 ctnetlink_change_expect(struct nf_conntrack_expect *x,
3218 			const struct nlattr * const cda[])
3219 {
3220 	if (cda[CTA_EXPECT_TIMEOUT]) {
3221 		if (!del_timer(&x->timeout))
3222 			return -ETIME;
3223 
3224 		x->timeout.expires = jiffies +
3225 			ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
3226 		add_timer(&x->timeout);
3227 	}
3228 	return 0;
3229 }
3230 
3231 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
3232 	[CTA_EXPECT_NAT_DIR]	= { .type = NLA_U32 },
3233 	[CTA_EXPECT_NAT_TUPLE]	= { .type = NLA_NESTED },
3234 };
3235 
3236 static int
3237 ctnetlink_parse_expect_nat(const struct nlattr *attr,
3238 			   struct nf_conntrack_expect *exp,
3239 			   u_int8_t u3)
3240 {
3241 #if IS_ENABLED(CONFIG_NF_NAT)
3242 	struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
3243 	struct nf_conntrack_tuple nat_tuple = {};
3244 	int err;
3245 
3246 	err = nla_parse_nested_deprecated(tb, CTA_EXPECT_NAT_MAX, attr,
3247 					  exp_nat_nla_policy, NULL);
3248 	if (err < 0)
3249 		return err;
3250 
3251 	if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
3252 		return -EINVAL;
3253 
3254 	err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
3255 				    &nat_tuple, CTA_EXPECT_NAT_TUPLE,
3256 				    u3, NULL);
3257 	if (err < 0)
3258 		return err;
3259 
3260 	exp->saved_addr = nat_tuple.src.u3;
3261 	exp->saved_proto = nat_tuple.src.u;
3262 	exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
3263 
3264 	return 0;
3265 #else
3266 	return -EOPNOTSUPP;
3267 #endif
3268 }
3269 
3270 static struct nf_conntrack_expect *
3271 ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
3272 		       struct nf_conntrack_helper *helper,
3273 		       struct nf_conntrack_tuple *tuple,
3274 		       struct nf_conntrack_tuple *mask)
3275 {
3276 	u_int32_t class = 0;
3277 	struct nf_conntrack_expect *exp;
3278 	struct nf_conn_help *help;
3279 	int err;
3280 
3281 	help = nfct_help(ct);
3282 	if (!help)
3283 		return ERR_PTR(-EOPNOTSUPP);
3284 
3285 	if (cda[CTA_EXPECT_CLASS] && helper) {
3286 		class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
3287 		if (class > helper->expect_class_max)
3288 			return ERR_PTR(-EINVAL);
3289 	}
3290 	exp = nf_ct_expect_alloc(ct);
3291 	if (!exp)
3292 		return ERR_PTR(-ENOMEM);
3293 
3294 	if (cda[CTA_EXPECT_FLAGS]) {
3295 		exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
3296 		exp->flags &= ~NF_CT_EXPECT_USERSPACE;
3297 	} else {
3298 		exp->flags = 0;
3299 	}
3300 	if (cda[CTA_EXPECT_FN]) {
3301 		const char *name = nla_data(cda[CTA_EXPECT_FN]);
3302 		struct nf_ct_helper_expectfn *expfn;
3303 
3304 		expfn = nf_ct_helper_expectfn_find_by_name(name);
3305 		if (expfn == NULL) {
3306 			err = -EINVAL;
3307 			goto err_out;
3308 		}
3309 		exp->expectfn = expfn->expectfn;
3310 	} else
3311 		exp->expectfn = NULL;
3312 
3313 	exp->class = class;
3314 	exp->master = ct;
3315 	exp->helper = helper;
3316 	exp->tuple = *tuple;
3317 	exp->mask.src.u3 = mask->src.u3;
3318 	exp->mask.src.u.all = mask->src.u.all;
3319 
3320 	if (cda[CTA_EXPECT_NAT]) {
3321 		err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
3322 						 exp, nf_ct_l3num(ct));
3323 		if (err < 0)
3324 			goto err_out;
3325 	}
3326 	return exp;
3327 err_out:
3328 	nf_ct_expect_put(exp);
3329 	return ERR_PTR(err);
3330 }
3331 
3332 static int
3333 ctnetlink_create_expect(struct net *net,
3334 			const struct nf_conntrack_zone *zone,
3335 			const struct nlattr * const cda[],
3336 			u_int8_t u3, u32 portid, int report)
3337 {
3338 	struct nf_conntrack_tuple tuple, mask, master_tuple;
3339 	struct nf_conntrack_tuple_hash *h = NULL;
3340 	struct nf_conntrack_helper *helper = NULL;
3341 	struct nf_conntrack_expect *exp;
3342 	struct nf_conn *ct;
3343 	int err;
3344 
3345 	/* caller guarantees that those three CTA_EXPECT_* exist */
3346 	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3347 				    u3, NULL);
3348 	if (err < 0)
3349 		return err;
3350 	err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK,
3351 				    u3, NULL);
3352 	if (err < 0)
3353 		return err;
3354 	err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER,
3355 				    u3, NULL);
3356 	if (err < 0)
3357 		return err;
3358 
3359 	/* Look for master conntrack of this expectation */
3360 	h = nf_conntrack_find_get(net, zone, &master_tuple);
3361 	if (!h)
3362 		return -ENOENT;
3363 	ct = nf_ct_tuplehash_to_ctrack(h);
3364 
3365 	rcu_read_lock();
3366 	if (cda[CTA_EXPECT_HELP_NAME]) {
3367 		const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3368 
3369 		helper = __nf_conntrack_helper_find(helpname, u3,
3370 						    nf_ct_protonum(ct));
3371 		if (helper == NULL) {
3372 			rcu_read_unlock();
3373 #ifdef CONFIG_MODULES
3374 			if (request_module("nfct-helper-%s", helpname) < 0) {
3375 				err = -EOPNOTSUPP;
3376 				goto err_ct;
3377 			}
3378 			rcu_read_lock();
3379 			helper = __nf_conntrack_helper_find(helpname, u3,
3380 							    nf_ct_protonum(ct));
3381 			if (helper) {
3382 				err = -EAGAIN;
3383 				goto err_rcu;
3384 			}
3385 			rcu_read_unlock();
3386 #endif
3387 			err = -EOPNOTSUPP;
3388 			goto err_ct;
3389 		}
3390 	}
3391 
3392 	exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
3393 	if (IS_ERR(exp)) {
3394 		err = PTR_ERR(exp);
3395 		goto err_rcu;
3396 	}
3397 
3398 	err = nf_ct_expect_related_report(exp, portid, report, 0);
3399 	nf_ct_expect_put(exp);
3400 err_rcu:
3401 	rcu_read_unlock();
3402 err_ct:
3403 	nf_ct_put(ct);
3404 	return err;
3405 }
3406 
3407 static int ctnetlink_new_expect(struct net *net, struct sock *ctnl,
3408 				struct sk_buff *skb, const struct nlmsghdr *nlh,
3409 				const struct nlattr * const cda[],
3410 				struct netlink_ext_ack *extack)
3411 {
3412 	struct nf_conntrack_tuple tuple;
3413 	struct nf_conntrack_expect *exp;
3414 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3415 	u_int8_t u3 = nfmsg->nfgen_family;
3416 	struct nf_conntrack_zone zone;
3417 	int err;
3418 
3419 	if (!cda[CTA_EXPECT_TUPLE]
3420 	    || !cda[CTA_EXPECT_MASK]
3421 	    || !cda[CTA_EXPECT_MASTER])
3422 		return -EINVAL;
3423 
3424 	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3425 	if (err < 0)
3426 		return err;
3427 
3428 	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3429 				    u3, NULL);
3430 	if (err < 0)
3431 		return err;
3432 
3433 	spin_lock_bh(&nf_conntrack_expect_lock);
3434 	exp = __nf_ct_expect_find(net, &zone, &tuple);
3435 	if (!exp) {
3436 		spin_unlock_bh(&nf_conntrack_expect_lock);
3437 		err = -ENOENT;
3438 		if (nlh->nlmsg_flags & NLM_F_CREATE) {
3439 			err = ctnetlink_create_expect(net, &zone, cda, u3,
3440 						      NETLINK_CB(skb).portid,
3441 						      nlmsg_report(nlh));
3442 		}
3443 		return err;
3444 	}
3445 
3446 	err = -EEXIST;
3447 	if (!(nlh->nlmsg_flags & NLM_F_EXCL))
3448 		err = ctnetlink_change_expect(exp, cda);
3449 	spin_unlock_bh(&nf_conntrack_expect_lock);
3450 
3451 	return err;
3452 }
3453 
3454 static int
3455 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
3456 			     const struct ip_conntrack_stat *st)
3457 {
3458 	struct nlmsghdr *nlh;
3459 	struct nfgenmsg *nfmsg;
3460 	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
3461 
3462 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
3463 			      IPCTNL_MSG_EXP_GET_STATS_CPU);
3464 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
3465 	if (nlh == NULL)
3466 		goto nlmsg_failure;
3467 
3468 	nfmsg = nlmsg_data(nlh);
3469 	nfmsg->nfgen_family = AF_UNSPEC;
3470 	nfmsg->version      = NFNETLINK_V0;
3471 	nfmsg->res_id	    = htons(cpu);
3472 
3473 	if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
3474 	    nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
3475 	    nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
3476 		goto nla_put_failure;
3477 
3478 	nlmsg_end(skb, nlh);
3479 	return skb->len;
3480 
3481 nla_put_failure:
3482 nlmsg_failure:
3483 	nlmsg_cancel(skb, nlh);
3484 	return -1;
3485 }
3486 
3487 static int
3488 ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
3489 {
3490 	int cpu;
3491 	struct net *net = sock_net(skb->sk);
3492 
3493 	if (cb->args[0] == nr_cpu_ids)
3494 		return 0;
3495 
3496 	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
3497 		const struct ip_conntrack_stat *st;
3498 
3499 		if (!cpu_possible(cpu))
3500 			continue;
3501 
3502 		st = per_cpu_ptr(net->ct.stat, cpu);
3503 		if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
3504 						 cb->nlh->nlmsg_seq,
3505 						 cpu, st) < 0)
3506 			break;
3507 	}
3508 	cb->args[0] = cpu;
3509 
3510 	return skb->len;
3511 }
3512 
3513 static int ctnetlink_stat_exp_cpu(struct net *net, struct sock *ctnl,
3514 				  struct sk_buff *skb,
3515 				  const struct nlmsghdr *nlh,
3516 				  const struct nlattr * const cda[],
3517 				  struct netlink_ext_ack *extack)
3518 {
3519 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
3520 		struct netlink_dump_control c = {
3521 			.dump = ctnetlink_exp_stat_cpu_dump,
3522 		};
3523 		return netlink_dump_start(ctnl, skb, nlh, &c);
3524 	}
3525 
3526 	return 0;
3527 }
3528 
3529 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3530 static struct nf_ct_event_notifier ctnl_notifier = {
3531 	.fcn = ctnetlink_conntrack_event,
3532 };
3533 
3534 static struct nf_exp_event_notifier ctnl_notifier_exp = {
3535 	.fcn = ctnetlink_expect_event,
3536 };
3537 #endif
3538 
3539 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
3540 	[IPCTNL_MSG_CT_NEW]		= { .call = ctnetlink_new_conntrack,
3541 					    .attr_count = CTA_MAX,
3542 					    .policy = ct_nla_policy },
3543 	[IPCTNL_MSG_CT_GET] 		= { .call = ctnetlink_get_conntrack,
3544 					    .attr_count = CTA_MAX,
3545 					    .policy = ct_nla_policy },
3546 	[IPCTNL_MSG_CT_DELETE]  	= { .call = ctnetlink_del_conntrack,
3547 					    .attr_count = CTA_MAX,
3548 					    .policy = ct_nla_policy },
3549 	[IPCTNL_MSG_CT_GET_CTRZERO] 	= { .call = ctnetlink_get_conntrack,
3550 					    .attr_count = CTA_MAX,
3551 					    .policy = ct_nla_policy },
3552 	[IPCTNL_MSG_CT_GET_STATS_CPU]	= { .call = ctnetlink_stat_ct_cpu },
3553 	[IPCTNL_MSG_CT_GET_STATS]	= { .call = ctnetlink_stat_ct },
3554 	[IPCTNL_MSG_CT_GET_DYING]	= { .call = ctnetlink_get_ct_dying },
3555 	[IPCTNL_MSG_CT_GET_UNCONFIRMED]	= { .call = ctnetlink_get_ct_unconfirmed },
3556 };
3557 
3558 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
3559 	[IPCTNL_MSG_EXP_GET]		= { .call = ctnetlink_get_expect,
3560 					    .attr_count = CTA_EXPECT_MAX,
3561 					    .policy = exp_nla_policy },
3562 	[IPCTNL_MSG_EXP_NEW]		= { .call = ctnetlink_new_expect,
3563 					    .attr_count = CTA_EXPECT_MAX,
3564 					    .policy = exp_nla_policy },
3565 	[IPCTNL_MSG_EXP_DELETE]		= { .call = ctnetlink_del_expect,
3566 					    .attr_count = CTA_EXPECT_MAX,
3567 					    .policy = exp_nla_policy },
3568 	[IPCTNL_MSG_EXP_GET_STATS_CPU]	= { .call = ctnetlink_stat_exp_cpu },
3569 };
3570 
3571 static const struct nfnetlink_subsystem ctnl_subsys = {
3572 	.name				= "conntrack",
3573 	.subsys_id			= NFNL_SUBSYS_CTNETLINK,
3574 	.cb_count			= IPCTNL_MSG_MAX,
3575 	.cb				= ctnl_cb,
3576 };
3577 
3578 static const struct nfnetlink_subsystem ctnl_exp_subsys = {
3579 	.name				= "conntrack_expect",
3580 	.subsys_id			= NFNL_SUBSYS_CTNETLINK_EXP,
3581 	.cb_count			= IPCTNL_MSG_EXP_MAX,
3582 	.cb				= ctnl_exp_cb,
3583 };
3584 
3585 MODULE_ALIAS("ip_conntrack_netlink");
3586 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
3587 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
3588 
3589 static int __net_init ctnetlink_net_init(struct net *net)
3590 {
3591 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3592 	int ret;
3593 
3594 	ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
3595 	if (ret < 0) {
3596 		pr_err("ctnetlink_init: cannot register notifier.\n");
3597 		goto err_out;
3598 	}
3599 
3600 	ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
3601 	if (ret < 0) {
3602 		pr_err("ctnetlink_init: cannot expect register notifier.\n");
3603 		goto err_unreg_notifier;
3604 	}
3605 #endif
3606 	return 0;
3607 
3608 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3609 err_unreg_notifier:
3610 	nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3611 err_out:
3612 	return ret;
3613 #endif
3614 }
3615 
3616 static void ctnetlink_net_exit(struct net *net)
3617 {
3618 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3619 	nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
3620 	nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3621 #endif
3622 }
3623 
3624 static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
3625 {
3626 	struct net *net;
3627 
3628 	list_for_each_entry(net, net_exit_list, exit_list)
3629 		ctnetlink_net_exit(net);
3630 
3631 	/* wait for other cpus until they are done with ctnl_notifiers */
3632 	synchronize_rcu();
3633 }
3634 
3635 static struct pernet_operations ctnetlink_net_ops = {
3636 	.init		= ctnetlink_net_init,
3637 	.exit_batch	= ctnetlink_net_exit_batch,
3638 };
3639 
3640 static int __init ctnetlink_init(void)
3641 {
3642 	int ret;
3643 
3644 	ret = nfnetlink_subsys_register(&ctnl_subsys);
3645 	if (ret < 0) {
3646 		pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
3647 		goto err_out;
3648 	}
3649 
3650 	ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
3651 	if (ret < 0) {
3652 		pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
3653 		goto err_unreg_subsys;
3654 	}
3655 
3656 	ret = register_pernet_subsys(&ctnetlink_net_ops);
3657 	if (ret < 0) {
3658 		pr_err("ctnetlink_init: cannot register pernet operations\n");
3659 		goto err_unreg_exp_subsys;
3660 	}
3661 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3662 	/* setup interaction between nf_queue and nf_conntrack_netlink. */
3663 	RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook);
3664 #endif
3665 	return 0;
3666 
3667 err_unreg_exp_subsys:
3668 	nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3669 err_unreg_subsys:
3670 	nfnetlink_subsys_unregister(&ctnl_subsys);
3671 err_out:
3672 	return ret;
3673 }
3674 
3675 static void __exit ctnetlink_exit(void)
3676 {
3677 	unregister_pernet_subsys(&ctnetlink_net_ops);
3678 	nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3679 	nfnetlink_subsys_unregister(&ctnl_subsys);
3680 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3681 	RCU_INIT_POINTER(nfnl_ct_hook, NULL);
3682 #endif
3683 	synchronize_rcu();
3684 }
3685 
3686 module_init(ctnetlink_init);
3687 module_exit(ctnetlink_exit);
3688