xref: /openbmc/linux/net/netfilter/nfnetlink.c (revision e0d07278)
1 /* Netfilter messages via netlink socket. Allows for user space
2  * protocol helpers and general trouble making from userspace.
3  *
4  * (C) 2001 by Jay Schulist <jschlst@samba.org>,
5  * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org>
6  * (C) 2005-2017 by Pablo Neira Ayuso <pablo@netfilter.org>
7  *
8  * Initial netfilter messages via netlink development funded and
9  * generally made possible by Network Robots, Inc. (www.networkrobots.com)
10  *
11  * Further development of this code funded by Astaro AG (http://www.astaro.com)
12  *
13  * This software may be used and distributed according to the terms
14  * of the GNU General Public License, incorporated herein by reference.
15  */
16 
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/socket.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/sockios.h>
23 #include <linux/net.h>
24 #include <linux/skbuff.h>
25 #include <linux/uaccess.h>
26 #include <net/sock.h>
27 #include <linux/init.h>
28 #include <linux/sched/signal.h>
29 
30 #include <net/netlink.h>
31 #include <linux/netfilter/nfnetlink.h>
32 
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
35 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
36 MODULE_DESCRIPTION("Netfilter messages via netlink socket");
37 
38 #define nfnl_dereference_protected(id) \
39 	rcu_dereference_protected(table[(id)].subsys, \
40 				  lockdep_nfnl_is_held((id)))
41 
42 #define NFNL_MAX_ATTR_COUNT	32
43 
44 static struct {
45 	struct mutex				mutex;
46 	const struct nfnetlink_subsystem __rcu	*subsys;
47 } table[NFNL_SUBSYS_COUNT];
48 
49 static const int nfnl_group2type[NFNLGRP_MAX+1] = {
50 	[NFNLGRP_CONNTRACK_NEW]		= NFNL_SUBSYS_CTNETLINK,
51 	[NFNLGRP_CONNTRACK_UPDATE]	= NFNL_SUBSYS_CTNETLINK,
52 	[NFNLGRP_CONNTRACK_DESTROY]	= NFNL_SUBSYS_CTNETLINK,
53 	[NFNLGRP_CONNTRACK_EXP_NEW]	= NFNL_SUBSYS_CTNETLINK_EXP,
54 	[NFNLGRP_CONNTRACK_EXP_UPDATE]	= NFNL_SUBSYS_CTNETLINK_EXP,
55 	[NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
56 	[NFNLGRP_NFTABLES]		= NFNL_SUBSYS_NFTABLES,
57 	[NFNLGRP_ACCT_QUOTA]		= NFNL_SUBSYS_ACCT,
58 	[NFNLGRP_NFTRACE]		= NFNL_SUBSYS_NFTABLES,
59 };
60 
61 void nfnl_lock(__u8 subsys_id)
62 {
63 	mutex_lock(&table[subsys_id].mutex);
64 }
65 EXPORT_SYMBOL_GPL(nfnl_lock);
66 
67 void nfnl_unlock(__u8 subsys_id)
68 {
69 	mutex_unlock(&table[subsys_id].mutex);
70 }
71 EXPORT_SYMBOL_GPL(nfnl_unlock);
72 
73 #ifdef CONFIG_PROVE_LOCKING
74 bool lockdep_nfnl_is_held(u8 subsys_id)
75 {
76 	return lockdep_is_held(&table[subsys_id].mutex);
77 }
78 EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held);
79 #endif
80 
81 int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
82 {
83 	u8 cb_id;
84 
85 	/* Sanity-check attr_count size to avoid stack buffer overflow. */
86 	for (cb_id = 0; cb_id < n->cb_count; cb_id++)
87 		if (WARN_ON(n->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT))
88 			return -EINVAL;
89 
90 	nfnl_lock(n->subsys_id);
91 	if (table[n->subsys_id].subsys) {
92 		nfnl_unlock(n->subsys_id);
93 		return -EBUSY;
94 	}
95 	rcu_assign_pointer(table[n->subsys_id].subsys, n);
96 	nfnl_unlock(n->subsys_id);
97 
98 	return 0;
99 }
100 EXPORT_SYMBOL_GPL(nfnetlink_subsys_register);
101 
102 int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n)
103 {
104 	nfnl_lock(n->subsys_id);
105 	table[n->subsys_id].subsys = NULL;
106 	nfnl_unlock(n->subsys_id);
107 	synchronize_rcu();
108 	return 0;
109 }
110 EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister);
111 
112 static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u16 type)
113 {
114 	u8 subsys_id = NFNL_SUBSYS_ID(type);
115 
116 	if (subsys_id >= NFNL_SUBSYS_COUNT)
117 		return NULL;
118 
119 	return rcu_dereference(table[subsys_id].subsys);
120 }
121 
122 static inline const struct nfnl_callback *
123 nfnetlink_find_client(u16 type, const struct nfnetlink_subsystem *ss)
124 {
125 	u8 cb_id = NFNL_MSG_TYPE(type);
126 
127 	if (cb_id >= ss->cb_count)
128 		return NULL;
129 
130 	return &ss->cb[cb_id];
131 }
132 
133 int nfnetlink_has_listeners(struct net *net, unsigned int group)
134 {
135 	return netlink_has_listeners(net->nfnl, group);
136 }
137 EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
138 
139 int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
140 		   unsigned int group, int echo, gfp_t flags)
141 {
142 	return nlmsg_notify(net->nfnl, skb, portid, group, echo, flags);
143 }
144 EXPORT_SYMBOL_GPL(nfnetlink_send);
145 
146 int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error)
147 {
148 	return netlink_set_err(net->nfnl, portid, group, error);
149 }
150 EXPORT_SYMBOL_GPL(nfnetlink_set_err);
151 
152 int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
153 		      int flags)
154 {
155 	return netlink_unicast(net->nfnl, skb, portid, flags);
156 }
157 EXPORT_SYMBOL_GPL(nfnetlink_unicast);
158 
159 /* Process one complete nfnetlink message. */
160 static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
161 			     struct netlink_ext_ack *extack)
162 {
163 	struct net *net = sock_net(skb->sk);
164 	const struct nfnl_callback *nc;
165 	const struct nfnetlink_subsystem *ss;
166 	int type, err;
167 
168 	/* All the messages must at least contain nfgenmsg */
169 	if (nlmsg_len(nlh) < sizeof(struct nfgenmsg))
170 		return 0;
171 
172 	type = nlh->nlmsg_type;
173 replay:
174 	rcu_read_lock();
175 	ss = nfnetlink_get_subsys(type);
176 	if (!ss) {
177 #ifdef CONFIG_MODULES
178 		rcu_read_unlock();
179 		request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type));
180 		rcu_read_lock();
181 		ss = nfnetlink_get_subsys(type);
182 		if (!ss)
183 #endif
184 		{
185 			rcu_read_unlock();
186 			return -EINVAL;
187 		}
188 	}
189 
190 	nc = nfnetlink_find_client(type, ss);
191 	if (!nc) {
192 		rcu_read_unlock();
193 		return -EINVAL;
194 	}
195 
196 	{
197 		int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
198 		u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
199 		struct nlattr *cda[NFNL_MAX_ATTR_COUNT + 1];
200 		struct nlattr *attr = (void *)nlh + min_len;
201 		int attrlen = nlh->nlmsg_len - min_len;
202 		__u8 subsys_id = NFNL_SUBSYS_ID(type);
203 
204 		/* Sanity-check NFNL_MAX_ATTR_COUNT */
205 		if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) {
206 			rcu_read_unlock();
207 			return -ENOMEM;
208 		}
209 
210 		err = nla_parse_deprecated(cda, ss->cb[cb_id].attr_count,
211 					   attr, attrlen,
212 					   ss->cb[cb_id].policy, extack);
213 		if (err < 0) {
214 			rcu_read_unlock();
215 			return err;
216 		}
217 
218 		if (nc->call_rcu) {
219 			err = nc->call_rcu(net, net->nfnl, skb, nlh,
220 					   (const struct nlattr **)cda,
221 					   extack);
222 			rcu_read_unlock();
223 		} else {
224 			rcu_read_unlock();
225 			nfnl_lock(subsys_id);
226 			if (nfnl_dereference_protected(subsys_id) != ss ||
227 			    nfnetlink_find_client(type, ss) != nc)
228 				err = -EAGAIN;
229 			else if (nc->call)
230 				err = nc->call(net, net->nfnl, skb, nlh,
231 					       (const struct nlattr **)cda,
232 					       extack);
233 			else
234 				err = -EINVAL;
235 			nfnl_unlock(subsys_id);
236 		}
237 		if (err == -EAGAIN)
238 			goto replay;
239 		return err;
240 	}
241 }
242 
243 struct nfnl_err {
244 	struct list_head	head;
245 	struct nlmsghdr		*nlh;
246 	int			err;
247 	struct netlink_ext_ack	extack;
248 };
249 
250 static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err,
251 			const struct netlink_ext_ack *extack)
252 {
253 	struct nfnl_err *nfnl_err;
254 
255 	nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL);
256 	if (nfnl_err == NULL)
257 		return -ENOMEM;
258 
259 	nfnl_err->nlh = nlh;
260 	nfnl_err->err = err;
261 	nfnl_err->extack = *extack;
262 	list_add_tail(&nfnl_err->head, list);
263 
264 	return 0;
265 }
266 
267 static void nfnl_err_del(struct nfnl_err *nfnl_err)
268 {
269 	list_del(&nfnl_err->head);
270 	kfree(nfnl_err);
271 }
272 
273 static void nfnl_err_reset(struct list_head *err_list)
274 {
275 	struct nfnl_err *nfnl_err, *next;
276 
277 	list_for_each_entry_safe(nfnl_err, next, err_list, head)
278 		nfnl_err_del(nfnl_err);
279 }
280 
281 static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
282 {
283 	struct nfnl_err *nfnl_err, *next;
284 
285 	list_for_each_entry_safe(nfnl_err, next, err_list, head) {
286 		netlink_ack(skb, nfnl_err->nlh, nfnl_err->err,
287 			    &nfnl_err->extack);
288 		nfnl_err_del(nfnl_err);
289 	}
290 }
291 
292 enum {
293 	NFNL_BATCH_FAILURE	= (1 << 0),
294 	NFNL_BATCH_DONE		= (1 << 1),
295 	NFNL_BATCH_REPLAY	= (1 << 2),
296 };
297 
298 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
299 				u16 subsys_id, u32 genid)
300 {
301 	struct sk_buff *oskb = skb;
302 	struct net *net = sock_net(skb->sk);
303 	const struct nfnetlink_subsystem *ss;
304 	const struct nfnl_callback *nc;
305 	struct netlink_ext_ack extack;
306 	LIST_HEAD(err_list);
307 	u32 status;
308 	int err;
309 
310 	if (subsys_id >= NFNL_SUBSYS_COUNT)
311 		return netlink_ack(skb, nlh, -EINVAL, NULL);
312 replay:
313 	status = 0;
314 
315 	skb = netlink_skb_clone(oskb, GFP_KERNEL);
316 	if (!skb)
317 		return netlink_ack(oskb, nlh, -ENOMEM, NULL);
318 
319 	nfnl_lock(subsys_id);
320 	ss = nfnl_dereference_protected(subsys_id);
321 	if (!ss) {
322 #ifdef CONFIG_MODULES
323 		nfnl_unlock(subsys_id);
324 		request_module("nfnetlink-subsys-%d", subsys_id);
325 		nfnl_lock(subsys_id);
326 		ss = nfnl_dereference_protected(subsys_id);
327 		if (!ss)
328 #endif
329 		{
330 			nfnl_unlock(subsys_id);
331 			netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL);
332 			return kfree_skb(skb);
333 		}
334 	}
335 
336 	if (!ss->valid_genid || !ss->commit || !ss->abort) {
337 		nfnl_unlock(subsys_id);
338 		netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL);
339 		return kfree_skb(skb);
340 	}
341 
342 	if (!try_module_get(ss->owner)) {
343 		nfnl_unlock(subsys_id);
344 		netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL);
345 		return kfree_skb(skb);
346 	}
347 
348 	if (!ss->valid_genid(net, genid)) {
349 		module_put(ss->owner);
350 		nfnl_unlock(subsys_id);
351 		netlink_ack(oskb, nlh, -ERESTART, NULL);
352 		return kfree_skb(skb);
353 	}
354 
355 	nfnl_unlock(subsys_id);
356 
357 	while (skb->len >= nlmsg_total_size(0)) {
358 		int msglen, type;
359 
360 		if (fatal_signal_pending(current)) {
361 			nfnl_err_reset(&err_list);
362 			err = -EINTR;
363 			status = NFNL_BATCH_FAILURE;
364 			goto done;
365 		}
366 
367 		memset(&extack, 0, sizeof(extack));
368 		nlh = nlmsg_hdr(skb);
369 		err = 0;
370 
371 		if (nlh->nlmsg_len < NLMSG_HDRLEN ||
372 		    skb->len < nlh->nlmsg_len ||
373 		    nlmsg_len(nlh) < sizeof(struct nfgenmsg)) {
374 			nfnl_err_reset(&err_list);
375 			status |= NFNL_BATCH_FAILURE;
376 			goto done;
377 		}
378 
379 		/* Only requests are handled by the kernel */
380 		if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
381 			err = -EINVAL;
382 			goto ack;
383 		}
384 
385 		type = nlh->nlmsg_type;
386 		if (type == NFNL_MSG_BATCH_BEGIN) {
387 			/* Malformed: Batch begin twice */
388 			nfnl_err_reset(&err_list);
389 			status |= NFNL_BATCH_FAILURE;
390 			goto done;
391 		} else if (type == NFNL_MSG_BATCH_END) {
392 			status |= NFNL_BATCH_DONE;
393 			goto done;
394 		} else if (type < NLMSG_MIN_TYPE) {
395 			err = -EINVAL;
396 			goto ack;
397 		}
398 
399 		/* We only accept a batch with messages for the same
400 		 * subsystem.
401 		 */
402 		if (NFNL_SUBSYS_ID(type) != subsys_id) {
403 			err = -EINVAL;
404 			goto ack;
405 		}
406 
407 		nc = nfnetlink_find_client(type, ss);
408 		if (!nc) {
409 			err = -EINVAL;
410 			goto ack;
411 		}
412 
413 		{
414 			int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
415 			u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
416 			struct nlattr *cda[NFNL_MAX_ATTR_COUNT + 1];
417 			struct nlattr *attr = (void *)nlh + min_len;
418 			int attrlen = nlh->nlmsg_len - min_len;
419 
420 			/* Sanity-check NFTA_MAX_ATTR */
421 			if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) {
422 				err = -ENOMEM;
423 				goto ack;
424 			}
425 
426 			err = nla_parse_deprecated(cda,
427 						   ss->cb[cb_id].attr_count,
428 						   attr, attrlen,
429 						   ss->cb[cb_id].policy, NULL);
430 			if (err < 0)
431 				goto ack;
432 
433 			if (nc->call_batch) {
434 				err = nc->call_batch(net, net->nfnl, skb, nlh,
435 						     (const struct nlattr **)cda,
436 						     &extack);
437 			}
438 
439 			/* The lock was released to autoload some module, we
440 			 * have to abort and start from scratch using the
441 			 * original skb.
442 			 */
443 			if (err == -EAGAIN) {
444 				status |= NFNL_BATCH_REPLAY;
445 				goto done;
446 			}
447 		}
448 ack:
449 		if (nlh->nlmsg_flags & NLM_F_ACK || err) {
450 			/* Errors are delivered once the full batch has been
451 			 * processed, this avoids that the same error is
452 			 * reported several times when replaying the batch.
453 			 */
454 			if (nfnl_err_add(&err_list, nlh, err, &extack) < 0) {
455 				/* We failed to enqueue an error, reset the
456 				 * list of errors and send OOM to userspace
457 				 * pointing to the batch header.
458 				 */
459 				nfnl_err_reset(&err_list);
460 				netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM,
461 					    NULL);
462 				status |= NFNL_BATCH_FAILURE;
463 				goto done;
464 			}
465 			/* We don't stop processing the batch on errors, thus,
466 			 * userspace gets all the errors that the batch
467 			 * triggers.
468 			 */
469 			if (err)
470 				status |= NFNL_BATCH_FAILURE;
471 		}
472 
473 		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
474 		if (msglen > skb->len)
475 			msglen = skb->len;
476 		skb_pull(skb, msglen);
477 	}
478 done:
479 	if (status & NFNL_BATCH_REPLAY) {
480 		ss->abort(net, oskb, true);
481 		nfnl_err_reset(&err_list);
482 		kfree_skb(skb);
483 		module_put(ss->owner);
484 		goto replay;
485 	} else if (status == NFNL_BATCH_DONE) {
486 		err = ss->commit(net, oskb);
487 		if (err == -EAGAIN) {
488 			status |= NFNL_BATCH_REPLAY;
489 			goto done;
490 		} else if (err) {
491 			ss->abort(net, oskb, false);
492 			netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
493 		}
494 	} else {
495 		ss->abort(net, oskb, false);
496 	}
497 	if (ss->cleanup)
498 		ss->cleanup(net);
499 
500 	nfnl_err_deliver(&err_list, oskb);
501 	kfree_skb(skb);
502 	module_put(ss->owner);
503 }
504 
505 static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = {
506 	[NFNL_BATCH_GENID]	= { .type = NLA_U32 },
507 };
508 
509 static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
510 {
511 	int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
512 	struct nlattr *attr = (void *)nlh + min_len;
513 	struct nlattr *cda[NFNL_BATCH_MAX + 1];
514 	int attrlen = nlh->nlmsg_len - min_len;
515 	struct nfgenmsg *nfgenmsg;
516 	int msglen, err;
517 	u32 gen_id = 0;
518 	u16 res_id;
519 
520 	msglen = NLMSG_ALIGN(nlh->nlmsg_len);
521 	if (msglen > skb->len)
522 		msglen = skb->len;
523 
524 	if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
525 		return;
526 
527 	err = nla_parse_deprecated(cda, NFNL_BATCH_MAX, attr, attrlen,
528 				   nfnl_batch_policy, NULL);
529 	if (err < 0) {
530 		netlink_ack(skb, nlh, err, NULL);
531 		return;
532 	}
533 	if (cda[NFNL_BATCH_GENID])
534 		gen_id = ntohl(nla_get_be32(cda[NFNL_BATCH_GENID]));
535 
536 	nfgenmsg = nlmsg_data(nlh);
537 	skb_pull(skb, msglen);
538 	/* Work around old nft using host byte order */
539 	if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
540 		res_id = NFNL_SUBSYS_NFTABLES;
541 	else
542 		res_id = ntohs(nfgenmsg->res_id);
543 
544 	nfnetlink_rcv_batch(skb, nlh, res_id, gen_id);
545 }
546 
547 static void nfnetlink_rcv(struct sk_buff *skb)
548 {
549 	struct nlmsghdr *nlh = nlmsg_hdr(skb);
550 
551 	if (skb->len < NLMSG_HDRLEN ||
552 	    nlh->nlmsg_len < NLMSG_HDRLEN ||
553 	    skb->len < nlh->nlmsg_len)
554 		return;
555 
556 	if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
557 		netlink_ack(skb, nlh, -EPERM, NULL);
558 		return;
559 	}
560 
561 	if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN)
562 		nfnetlink_rcv_skb_batch(skb, nlh);
563 	else
564 		netlink_rcv_skb(skb, nfnetlink_rcv_msg);
565 }
566 
567 #ifdef CONFIG_MODULES
568 static int nfnetlink_bind(struct net *net, int group)
569 {
570 	const struct nfnetlink_subsystem *ss;
571 	int type;
572 
573 	if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
574 		return 0;
575 
576 	type = nfnl_group2type[group];
577 
578 	rcu_read_lock();
579 	ss = nfnetlink_get_subsys(type << 8);
580 	rcu_read_unlock();
581 	if (!ss)
582 		request_module_nowait("nfnetlink-subsys-%d", type);
583 	return 0;
584 }
585 #endif
586 
587 static int __net_init nfnetlink_net_init(struct net *net)
588 {
589 	struct sock *nfnl;
590 	struct netlink_kernel_cfg cfg = {
591 		.groups	= NFNLGRP_MAX,
592 		.input	= nfnetlink_rcv,
593 #ifdef CONFIG_MODULES
594 		.bind	= nfnetlink_bind,
595 #endif
596 	};
597 
598 	nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg);
599 	if (!nfnl)
600 		return -ENOMEM;
601 	net->nfnl_stash = nfnl;
602 	rcu_assign_pointer(net->nfnl, nfnl);
603 	return 0;
604 }
605 
606 static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
607 {
608 	struct net *net;
609 
610 	list_for_each_entry(net, net_exit_list, exit_list)
611 		RCU_INIT_POINTER(net->nfnl, NULL);
612 	synchronize_net();
613 	list_for_each_entry(net, net_exit_list, exit_list)
614 		netlink_kernel_release(net->nfnl_stash);
615 }
616 
617 static struct pernet_operations nfnetlink_net_ops = {
618 	.init		= nfnetlink_net_init,
619 	.exit_batch	= nfnetlink_net_exit_batch,
620 };
621 
622 static int __init nfnetlink_init(void)
623 {
624 	int i;
625 
626 	for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++)
627 		BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE);
628 
629 	for (i=0; i<NFNL_SUBSYS_COUNT; i++)
630 		mutex_init(&table[i].mutex);
631 
632 	return register_pernet_subsys(&nfnetlink_net_ops);
633 }
634 
635 static void __exit nfnetlink_exit(void)
636 {
637 	unregister_pernet_subsys(&nfnetlink_net_ops);
638 }
639 module_init(nfnetlink_init);
640 module_exit(nfnetlink_exit);
641