xref: /openbmc/linux/net/netlink/genetlink.c (revision 5d0e4d78)
1 /*
2  * NETLINK      Generic Netlink Family
3  *
4  * 		Authors:	Jamal Hadi Salim
5  * 				Thomas Graf <tgraf@suug.ch>
6  *				Johannes Berg <johannes@sipsolutions.net>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/types.h>
14 #include <linux/socket.h>
15 #include <linux/string.h>
16 #include <linux/skbuff.h>
17 #include <linux/mutex.h>
18 #include <linux/bitmap.h>
19 #include <linux/rwsem.h>
20 #include <linux/idr.h>
21 #include <net/sock.h>
22 #include <net/genetlink.h>
23 
24 static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
25 static DECLARE_RWSEM(cb_lock);
26 
27 atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
28 DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
29 
30 void genl_lock(void)
31 {
32 	mutex_lock(&genl_mutex);
33 }
34 EXPORT_SYMBOL(genl_lock);
35 
36 void genl_unlock(void)
37 {
38 	mutex_unlock(&genl_mutex);
39 }
40 EXPORT_SYMBOL(genl_unlock);
41 
42 #ifdef CONFIG_LOCKDEP
43 bool lockdep_genl_is_held(void)
44 {
45 	return lockdep_is_held(&genl_mutex);
46 }
47 EXPORT_SYMBOL(lockdep_genl_is_held);
48 #endif
49 
50 static void genl_lock_all(void)
51 {
52 	down_write(&cb_lock);
53 	genl_lock();
54 }
55 
56 static void genl_unlock_all(void)
57 {
58 	genl_unlock();
59 	up_write(&cb_lock);
60 }
61 
62 static DEFINE_IDR(genl_fam_idr);
63 
64 /*
65  * Bitmap of multicast groups that are currently in use.
66  *
67  * To avoid an allocation at boot of just one unsigned long,
68  * declare it global instead.
69  * Bit 0 is marked as already used since group 0 is invalid.
70  * Bit 1 is marked as already used since the drop-monitor code
71  * abuses the API and thinks it can statically use group 1.
72  * That group will typically conflict with other groups that
73  * any proper users use.
74  * Bit 16 is marked as used since it's used for generic netlink
75  * and the code no longer marks pre-reserved IDs as used.
76  * Bit 17 is marked as already used since the VFS quota code
77  * also abused this API and relied on family == group ID, we
78  * cater to that by giving it a static family and group ID.
79  * Bit 18 is marked as already used since the PMCRAID driver
80  * did the same thing as the VFS quota code (maybe copied?)
81  */
82 static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
83 				      BIT(GENL_ID_VFS_DQUOT) |
84 				      BIT(GENL_ID_PMCRAID);
85 static unsigned long *mc_groups = &mc_group_start;
86 static unsigned long mc_groups_longs = 1;
87 
88 static int genl_ctrl_event(int event, const struct genl_family *family,
89 			   const struct genl_multicast_group *grp,
90 			   int grp_id);
91 
92 static const struct genl_family *genl_family_find_byid(unsigned int id)
93 {
94 	return idr_find(&genl_fam_idr, id);
95 }
96 
97 static const struct genl_family *genl_family_find_byname(char *name)
98 {
99 	const struct genl_family *family;
100 	unsigned int id;
101 
102 	idr_for_each_entry(&genl_fam_idr, family, id)
103 		if (strcmp(family->name, name) == 0)
104 			return family;
105 
106 	return NULL;
107 }
108 
109 static const struct genl_ops *genl_get_cmd(u8 cmd,
110 					   const struct genl_family *family)
111 {
112 	int i;
113 
114 	for (i = 0; i < family->n_ops; i++)
115 		if (family->ops[i].cmd == cmd)
116 			return &family->ops[i];
117 
118 	return NULL;
119 }
120 
121 static int genl_allocate_reserve_groups(int n_groups, int *first_id)
122 {
123 	unsigned long *new_groups;
124 	int start = 0;
125 	int i;
126 	int id;
127 	bool fits;
128 
129 	do {
130 		if (start == 0)
131 			id = find_first_zero_bit(mc_groups,
132 						 mc_groups_longs *
133 						 BITS_PER_LONG);
134 		else
135 			id = find_next_zero_bit(mc_groups,
136 						mc_groups_longs * BITS_PER_LONG,
137 						start);
138 
139 		fits = true;
140 		for (i = id;
141 		     i < min_t(int, id + n_groups,
142 			       mc_groups_longs * BITS_PER_LONG);
143 		     i++) {
144 			if (test_bit(i, mc_groups)) {
145 				start = i;
146 				fits = false;
147 				break;
148 			}
149 		}
150 
151 		if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
152 			unsigned long new_longs = mc_groups_longs +
153 						  BITS_TO_LONGS(n_groups);
154 			size_t nlen = new_longs * sizeof(unsigned long);
155 
156 			if (mc_groups == &mc_group_start) {
157 				new_groups = kzalloc(nlen, GFP_KERNEL);
158 				if (!new_groups)
159 					return -ENOMEM;
160 				mc_groups = new_groups;
161 				*mc_groups = mc_group_start;
162 			} else {
163 				new_groups = krealloc(mc_groups, nlen,
164 						      GFP_KERNEL);
165 				if (!new_groups)
166 					return -ENOMEM;
167 				mc_groups = new_groups;
168 				for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
169 					mc_groups[mc_groups_longs + i] = 0;
170 			}
171 			mc_groups_longs = new_longs;
172 		}
173 	} while (!fits);
174 
175 	for (i = id; i < id + n_groups; i++)
176 		set_bit(i, mc_groups);
177 	*first_id = id;
178 	return 0;
179 }
180 
181 static struct genl_family genl_ctrl;
182 
183 static int genl_validate_assign_mc_groups(struct genl_family *family)
184 {
185 	int first_id;
186 	int n_groups = family->n_mcgrps;
187 	int err = 0, i;
188 	bool groups_allocated = false;
189 
190 	if (!n_groups)
191 		return 0;
192 
193 	for (i = 0; i < n_groups; i++) {
194 		const struct genl_multicast_group *grp = &family->mcgrps[i];
195 
196 		if (WARN_ON(grp->name[0] == '\0'))
197 			return -EINVAL;
198 		if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL))
199 			return -EINVAL;
200 	}
201 
202 	/* special-case our own group and hacks */
203 	if (family == &genl_ctrl) {
204 		first_id = GENL_ID_CTRL;
205 		BUG_ON(n_groups != 1);
206 	} else if (strcmp(family->name, "NET_DM") == 0) {
207 		first_id = 1;
208 		BUG_ON(n_groups != 1);
209 	} else if (family->id == GENL_ID_VFS_DQUOT) {
210 		first_id = GENL_ID_VFS_DQUOT;
211 		BUG_ON(n_groups != 1);
212 	} else if (family->id == GENL_ID_PMCRAID) {
213 		first_id = GENL_ID_PMCRAID;
214 		BUG_ON(n_groups != 1);
215 	} else {
216 		groups_allocated = true;
217 		err = genl_allocate_reserve_groups(n_groups, &first_id);
218 		if (err)
219 			return err;
220 	}
221 
222 	family->mcgrp_offset = first_id;
223 
224 	/* if still initializing, can't and don't need to to realloc bitmaps */
225 	if (!init_net.genl_sock)
226 		return 0;
227 
228 	if (family->netnsok) {
229 		struct net *net;
230 
231 		netlink_table_grab();
232 		rcu_read_lock();
233 		for_each_net_rcu(net) {
234 			err = __netlink_change_ngroups(net->genl_sock,
235 					mc_groups_longs * BITS_PER_LONG);
236 			if (err) {
237 				/*
238 				 * No need to roll back, can only fail if
239 				 * memory allocation fails and then the
240 				 * number of _possible_ groups has been
241 				 * increased on some sockets which is ok.
242 				 */
243 				break;
244 			}
245 		}
246 		rcu_read_unlock();
247 		netlink_table_ungrab();
248 	} else {
249 		err = netlink_change_ngroups(init_net.genl_sock,
250 					     mc_groups_longs * BITS_PER_LONG);
251 	}
252 
253 	if (groups_allocated && err) {
254 		for (i = 0; i < family->n_mcgrps; i++)
255 			clear_bit(family->mcgrp_offset + i, mc_groups);
256 	}
257 
258 	return err;
259 }
260 
261 static void genl_unregister_mc_groups(const struct genl_family *family)
262 {
263 	struct net *net;
264 	int i;
265 
266 	netlink_table_grab();
267 	rcu_read_lock();
268 	for_each_net_rcu(net) {
269 		for (i = 0; i < family->n_mcgrps; i++)
270 			__netlink_clear_multicast_users(
271 				net->genl_sock, family->mcgrp_offset + i);
272 	}
273 	rcu_read_unlock();
274 	netlink_table_ungrab();
275 
276 	for (i = 0; i < family->n_mcgrps; i++) {
277 		int grp_id = family->mcgrp_offset + i;
278 
279 		if (grp_id != 1)
280 			clear_bit(grp_id, mc_groups);
281 		genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
282 				&family->mcgrps[i], grp_id);
283 	}
284 }
285 
286 static int genl_validate_ops(const struct genl_family *family)
287 {
288 	const struct genl_ops *ops = family->ops;
289 	unsigned int n_ops = family->n_ops;
290 	int i, j;
291 
292 	if (WARN_ON(n_ops && !ops))
293 		return -EINVAL;
294 
295 	if (!n_ops)
296 		return 0;
297 
298 	for (i = 0; i < n_ops; i++) {
299 		if (ops[i].dumpit == NULL && ops[i].doit == NULL)
300 			return -EINVAL;
301 		for (j = i + 1; j < n_ops; j++)
302 			if (ops[i].cmd == ops[j].cmd)
303 				return -EINVAL;
304 	}
305 
306 	return 0;
307 }
308 
309 /**
310  * genl_register_family - register a generic netlink family
311  * @family: generic netlink family
312  *
313  * Registers the specified family after validating it first. Only one
314  * family may be registered with the same family name or identifier.
315  *
316  * The family's ops, multicast groups and module pointer must already
317  * be assigned.
318  *
319  * Return 0 on success or a negative error code.
320  */
321 int genl_register_family(struct genl_family *family)
322 {
323 	int err, i;
324 	int start = GENL_START_ALLOC, end = GENL_MAX_ID;
325 
326 	err = genl_validate_ops(family);
327 	if (err)
328 		return err;
329 
330 	genl_lock_all();
331 
332 	if (genl_family_find_byname(family->name)) {
333 		err = -EEXIST;
334 		goto errout_locked;
335 	}
336 
337 	/*
338 	 * Sadly, a few cases need to be special-cased
339 	 * due to them having previously abused the API
340 	 * and having used their family ID also as their
341 	 * multicast group ID, so we use reserved IDs
342 	 * for both to be sure we can do that mapping.
343 	 */
344 	if (family == &genl_ctrl) {
345 		/* and this needs to be special for initial family lookups */
346 		start = end = GENL_ID_CTRL;
347 	} else if (strcmp(family->name, "pmcraid") == 0) {
348 		start = end = GENL_ID_PMCRAID;
349 	} else if (strcmp(family->name, "VFS_DQUOT") == 0) {
350 		start = end = GENL_ID_VFS_DQUOT;
351 	}
352 
353 	if (family->maxattr && !family->parallel_ops) {
354 		family->attrbuf = kmalloc((family->maxattr+1) *
355 					sizeof(struct nlattr *), GFP_KERNEL);
356 		if (family->attrbuf == NULL) {
357 			err = -ENOMEM;
358 			goto errout_locked;
359 		}
360 	} else
361 		family->attrbuf = NULL;
362 
363 	family->id = idr_alloc(&genl_fam_idr, family,
364 			       start, end + 1, GFP_KERNEL);
365 	if (family->id < 0) {
366 		err = family->id;
367 		goto errout_locked;
368 	}
369 
370 	err = genl_validate_assign_mc_groups(family);
371 	if (err)
372 		goto errout_remove;
373 
374 	genl_unlock_all();
375 
376 	/* send all events */
377 	genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
378 	for (i = 0; i < family->n_mcgrps; i++)
379 		genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
380 				&family->mcgrps[i], family->mcgrp_offset + i);
381 
382 	return 0;
383 
384 errout_remove:
385 	idr_remove(&genl_fam_idr, family->id);
386 	kfree(family->attrbuf);
387 errout_locked:
388 	genl_unlock_all();
389 	return err;
390 }
391 EXPORT_SYMBOL(genl_register_family);
392 
393 /**
394  * genl_unregister_family - unregister generic netlink family
395  * @family: generic netlink family
396  *
397  * Unregisters the specified family.
398  *
399  * Returns 0 on success or a negative error code.
400  */
401 int genl_unregister_family(const struct genl_family *family)
402 {
403 	genl_lock_all();
404 
405 	if (!genl_family_find_byid(family->id)) {
406 		genl_unlock_all();
407 		return -ENOENT;
408 	}
409 
410 	genl_unregister_mc_groups(family);
411 
412 	idr_remove(&genl_fam_idr, family->id);
413 
414 	up_write(&cb_lock);
415 	wait_event(genl_sk_destructing_waitq,
416 		   atomic_read(&genl_sk_destructing_cnt) == 0);
417 	genl_unlock();
418 
419 	kfree(family->attrbuf);
420 
421 	genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
422 
423 	return 0;
424 }
425 EXPORT_SYMBOL(genl_unregister_family);
426 
427 /**
428  * genlmsg_put - Add generic netlink header to netlink message
429  * @skb: socket buffer holding the message
430  * @portid: netlink portid the message is addressed to
431  * @seq: sequence number (usually the one of the sender)
432  * @family: generic netlink family
433  * @flags: netlink message flags
434  * @cmd: generic netlink command
435  *
436  * Returns pointer to user specific header
437  */
438 void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
439 		  const struct genl_family *family, int flags, u8 cmd)
440 {
441 	struct nlmsghdr *nlh;
442 	struct genlmsghdr *hdr;
443 
444 	nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
445 			family->hdrsize, flags);
446 	if (nlh == NULL)
447 		return NULL;
448 
449 	hdr = nlmsg_data(nlh);
450 	hdr->cmd = cmd;
451 	hdr->version = family->version;
452 	hdr->reserved = 0;
453 
454 	return (char *) hdr + GENL_HDRLEN;
455 }
456 EXPORT_SYMBOL(genlmsg_put);
457 
458 static int genl_lock_start(struct netlink_callback *cb)
459 {
460 	/* our ops are always const - netlink API doesn't propagate that */
461 	const struct genl_ops *ops = cb->data;
462 	int rc = 0;
463 
464 	if (ops->start) {
465 		genl_lock();
466 		rc = ops->start(cb);
467 		genl_unlock();
468 	}
469 	return rc;
470 }
471 
472 static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
473 {
474 	/* our ops are always const - netlink API doesn't propagate that */
475 	const struct genl_ops *ops = cb->data;
476 	int rc;
477 
478 	genl_lock();
479 	rc = ops->dumpit(skb, cb);
480 	genl_unlock();
481 	return rc;
482 }
483 
484 static int genl_lock_done(struct netlink_callback *cb)
485 {
486 	/* our ops are always const - netlink API doesn't propagate that */
487 	const struct genl_ops *ops = cb->data;
488 	int rc = 0;
489 
490 	if (ops->done) {
491 		genl_lock();
492 		rc = ops->done(cb);
493 		genl_unlock();
494 	}
495 	return rc;
496 }
497 
498 static int genl_family_rcv_msg(const struct genl_family *family,
499 			       struct sk_buff *skb,
500 			       struct nlmsghdr *nlh,
501 			       struct netlink_ext_ack *extack)
502 {
503 	const struct genl_ops *ops;
504 	struct net *net = sock_net(skb->sk);
505 	struct genl_info info;
506 	struct genlmsghdr *hdr = nlmsg_data(nlh);
507 	struct nlattr **attrbuf;
508 	int hdrlen, err;
509 
510 	/* this family doesn't exist in this netns */
511 	if (!family->netnsok && !net_eq(net, &init_net))
512 		return -ENOENT;
513 
514 	hdrlen = GENL_HDRLEN + family->hdrsize;
515 	if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
516 		return -EINVAL;
517 
518 	ops = genl_get_cmd(hdr->cmd, family);
519 	if (ops == NULL)
520 		return -EOPNOTSUPP;
521 
522 	if ((ops->flags & GENL_ADMIN_PERM) &&
523 	    !netlink_capable(skb, CAP_NET_ADMIN))
524 		return -EPERM;
525 
526 	if ((ops->flags & GENL_UNS_ADMIN_PERM) &&
527 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
528 		return -EPERM;
529 
530 	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
531 		int rc;
532 
533 		if (ops->dumpit == NULL)
534 			return -EOPNOTSUPP;
535 
536 		if (!family->parallel_ops) {
537 			struct netlink_dump_control c = {
538 				.module = family->module,
539 				/* we have const, but the netlink API doesn't */
540 				.data = (void *)ops,
541 				.start = genl_lock_start,
542 				.dump = genl_lock_dumpit,
543 				.done = genl_lock_done,
544 			};
545 
546 			genl_unlock();
547 			rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
548 			genl_lock();
549 
550 		} else {
551 			struct netlink_dump_control c = {
552 				.module = family->module,
553 				.start = ops->start,
554 				.dump = ops->dumpit,
555 				.done = ops->done,
556 			};
557 
558 			rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
559 		}
560 
561 		return rc;
562 	}
563 
564 	if (ops->doit == NULL)
565 		return -EOPNOTSUPP;
566 
567 	if (family->maxattr && family->parallel_ops) {
568 		attrbuf = kmalloc((family->maxattr+1) *
569 					sizeof(struct nlattr *), GFP_KERNEL);
570 		if (attrbuf == NULL)
571 			return -ENOMEM;
572 	} else
573 		attrbuf = family->attrbuf;
574 
575 	if (attrbuf) {
576 		err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
577 				  ops->policy, extack);
578 		if (err < 0)
579 			goto out;
580 	}
581 
582 	info.snd_seq = nlh->nlmsg_seq;
583 	info.snd_portid = NETLINK_CB(skb).portid;
584 	info.nlhdr = nlh;
585 	info.genlhdr = nlmsg_data(nlh);
586 	info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
587 	info.attrs = attrbuf;
588 	info.extack = extack;
589 	genl_info_net_set(&info, net);
590 	memset(&info.user_ptr, 0, sizeof(info.user_ptr));
591 
592 	if (family->pre_doit) {
593 		err = family->pre_doit(ops, skb, &info);
594 		if (err)
595 			goto out;
596 	}
597 
598 	err = ops->doit(skb, &info);
599 
600 	if (family->post_doit)
601 		family->post_doit(ops, skb, &info);
602 
603 out:
604 	if (family->parallel_ops)
605 		kfree(attrbuf);
606 
607 	return err;
608 }
609 
610 static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
611 			struct netlink_ext_ack *extack)
612 {
613 	const struct genl_family *family;
614 	int err;
615 
616 	family = genl_family_find_byid(nlh->nlmsg_type);
617 	if (family == NULL)
618 		return -ENOENT;
619 
620 	if (!family->parallel_ops)
621 		genl_lock();
622 
623 	err = genl_family_rcv_msg(family, skb, nlh, extack);
624 
625 	if (!family->parallel_ops)
626 		genl_unlock();
627 
628 	return err;
629 }
630 
631 static void genl_rcv(struct sk_buff *skb)
632 {
633 	down_read(&cb_lock);
634 	netlink_rcv_skb(skb, &genl_rcv_msg);
635 	up_read(&cb_lock);
636 }
637 
638 /**************************************************************************
639  * Controller
640  **************************************************************************/
641 
642 static struct genl_family genl_ctrl;
643 
644 static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
645 			  u32 flags, struct sk_buff *skb, u8 cmd)
646 {
647 	void *hdr;
648 
649 	hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
650 	if (hdr == NULL)
651 		return -1;
652 
653 	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
654 	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
655 	    nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
656 	    nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
657 	    nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
658 		goto nla_put_failure;
659 
660 	if (family->n_ops) {
661 		struct nlattr *nla_ops;
662 		int i;
663 
664 		nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS);
665 		if (nla_ops == NULL)
666 			goto nla_put_failure;
667 
668 		for (i = 0; i < family->n_ops; i++) {
669 			struct nlattr *nest;
670 			const struct genl_ops *ops = &family->ops[i];
671 			u32 op_flags = ops->flags;
672 
673 			if (ops->dumpit)
674 				op_flags |= GENL_CMD_CAP_DUMP;
675 			if (ops->doit)
676 				op_flags |= GENL_CMD_CAP_DO;
677 			if (ops->policy)
678 				op_flags |= GENL_CMD_CAP_HASPOL;
679 
680 			nest = nla_nest_start(skb, i + 1);
681 			if (nest == NULL)
682 				goto nla_put_failure;
683 
684 			if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
685 			    nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
686 				goto nla_put_failure;
687 
688 			nla_nest_end(skb, nest);
689 		}
690 
691 		nla_nest_end(skb, nla_ops);
692 	}
693 
694 	if (family->n_mcgrps) {
695 		struct nlattr *nla_grps;
696 		int i;
697 
698 		nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
699 		if (nla_grps == NULL)
700 			goto nla_put_failure;
701 
702 		for (i = 0; i < family->n_mcgrps; i++) {
703 			struct nlattr *nest;
704 			const struct genl_multicast_group *grp;
705 
706 			grp = &family->mcgrps[i];
707 
708 			nest = nla_nest_start(skb, i + 1);
709 			if (nest == NULL)
710 				goto nla_put_failure;
711 
712 			if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
713 					family->mcgrp_offset + i) ||
714 			    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
715 					   grp->name))
716 				goto nla_put_failure;
717 
718 			nla_nest_end(skb, nest);
719 		}
720 		nla_nest_end(skb, nla_grps);
721 	}
722 
723 	genlmsg_end(skb, hdr);
724 	return 0;
725 
726 nla_put_failure:
727 	genlmsg_cancel(skb, hdr);
728 	return -EMSGSIZE;
729 }
730 
731 static int ctrl_fill_mcgrp_info(const struct genl_family *family,
732 				const struct genl_multicast_group *grp,
733 				int grp_id, u32 portid, u32 seq, u32 flags,
734 				struct sk_buff *skb, u8 cmd)
735 {
736 	void *hdr;
737 	struct nlattr *nla_grps;
738 	struct nlattr *nest;
739 
740 	hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
741 	if (hdr == NULL)
742 		return -1;
743 
744 	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
745 	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
746 		goto nla_put_failure;
747 
748 	nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
749 	if (nla_grps == NULL)
750 		goto nla_put_failure;
751 
752 	nest = nla_nest_start(skb, 1);
753 	if (nest == NULL)
754 		goto nla_put_failure;
755 
756 	if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
757 	    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
758 			   grp->name))
759 		goto nla_put_failure;
760 
761 	nla_nest_end(skb, nest);
762 	nla_nest_end(skb, nla_grps);
763 
764 	genlmsg_end(skb, hdr);
765 	return 0;
766 
767 nla_put_failure:
768 	genlmsg_cancel(skb, hdr);
769 	return -EMSGSIZE;
770 }
771 
772 static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
773 {
774 	int n = 0;
775 	struct genl_family *rt;
776 	struct net *net = sock_net(skb->sk);
777 	int fams_to_skip = cb->args[0];
778 	unsigned int id;
779 
780 	idr_for_each_entry(&genl_fam_idr, rt, id) {
781 		if (!rt->netnsok && !net_eq(net, &init_net))
782 			continue;
783 
784 		if (n++ < fams_to_skip)
785 			continue;
786 
787 		if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
788 				   cb->nlh->nlmsg_seq, NLM_F_MULTI,
789 				   skb, CTRL_CMD_NEWFAMILY) < 0) {
790 			n--;
791 			break;
792 		}
793 	}
794 
795 	cb->args[0] = n;
796 	return skb->len;
797 }
798 
799 static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family,
800 					     u32 portid, int seq, u8 cmd)
801 {
802 	struct sk_buff *skb;
803 	int err;
804 
805 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
806 	if (skb == NULL)
807 		return ERR_PTR(-ENOBUFS);
808 
809 	err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
810 	if (err < 0) {
811 		nlmsg_free(skb);
812 		return ERR_PTR(err);
813 	}
814 
815 	return skb;
816 }
817 
818 static struct sk_buff *
819 ctrl_build_mcgrp_msg(const struct genl_family *family,
820 		     const struct genl_multicast_group *grp,
821 		     int grp_id, u32 portid, int seq, u8 cmd)
822 {
823 	struct sk_buff *skb;
824 	int err;
825 
826 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
827 	if (skb == NULL)
828 		return ERR_PTR(-ENOBUFS);
829 
830 	err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
831 				   seq, 0, skb, cmd);
832 	if (err < 0) {
833 		nlmsg_free(skb);
834 		return ERR_PTR(err);
835 	}
836 
837 	return skb;
838 }
839 
840 static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = {
841 	[CTRL_ATTR_FAMILY_ID]	= { .type = NLA_U16 },
842 	[CTRL_ATTR_FAMILY_NAME]	= { .type = NLA_NUL_STRING,
843 				    .len = GENL_NAMSIZ - 1 },
844 };
845 
846 static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
847 {
848 	struct sk_buff *msg;
849 	const struct genl_family *res = NULL;
850 	int err = -EINVAL;
851 
852 	if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
853 		u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
854 		res = genl_family_find_byid(id);
855 		err = -ENOENT;
856 	}
857 
858 	if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
859 		char *name;
860 
861 		name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
862 		res = genl_family_find_byname(name);
863 #ifdef CONFIG_MODULES
864 		if (res == NULL) {
865 			genl_unlock();
866 			up_read(&cb_lock);
867 			request_module("net-pf-%d-proto-%d-family-%s",
868 				       PF_NETLINK, NETLINK_GENERIC, name);
869 			down_read(&cb_lock);
870 			genl_lock();
871 			res = genl_family_find_byname(name);
872 		}
873 #endif
874 		err = -ENOENT;
875 	}
876 
877 	if (res == NULL)
878 		return err;
879 
880 	if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
881 		/* family doesn't exist here */
882 		return -ENOENT;
883 	}
884 
885 	msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
886 				    CTRL_CMD_NEWFAMILY);
887 	if (IS_ERR(msg))
888 		return PTR_ERR(msg);
889 
890 	return genlmsg_reply(msg, info);
891 }
892 
893 static int genl_ctrl_event(int event, const struct genl_family *family,
894 			   const struct genl_multicast_group *grp,
895 			   int grp_id)
896 {
897 	struct sk_buff *msg;
898 
899 	/* genl is still initialising */
900 	if (!init_net.genl_sock)
901 		return 0;
902 
903 	switch (event) {
904 	case CTRL_CMD_NEWFAMILY:
905 	case CTRL_CMD_DELFAMILY:
906 		WARN_ON(grp);
907 		msg = ctrl_build_family_msg(family, 0, 0, event);
908 		break;
909 	case CTRL_CMD_NEWMCAST_GRP:
910 	case CTRL_CMD_DELMCAST_GRP:
911 		BUG_ON(!grp);
912 		msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
913 		break;
914 	default:
915 		return -EINVAL;
916 	}
917 
918 	if (IS_ERR(msg))
919 		return PTR_ERR(msg);
920 
921 	if (!family->netnsok) {
922 		genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
923 					0, GFP_KERNEL);
924 	} else {
925 		rcu_read_lock();
926 		genlmsg_multicast_allns(&genl_ctrl, msg, 0,
927 					0, GFP_ATOMIC);
928 		rcu_read_unlock();
929 	}
930 
931 	return 0;
932 }
933 
934 static const struct genl_ops genl_ctrl_ops[] = {
935 	{
936 		.cmd		= CTRL_CMD_GETFAMILY,
937 		.doit		= ctrl_getfamily,
938 		.dumpit		= ctrl_dumpfamily,
939 		.policy		= ctrl_policy,
940 	},
941 };
942 
943 static const struct genl_multicast_group genl_ctrl_groups[] = {
944 	{ .name = "notify", },
945 };
946 
947 static struct genl_family genl_ctrl __ro_after_init = {
948 	.module = THIS_MODULE,
949 	.ops = genl_ctrl_ops,
950 	.n_ops = ARRAY_SIZE(genl_ctrl_ops),
951 	.mcgrps = genl_ctrl_groups,
952 	.n_mcgrps = ARRAY_SIZE(genl_ctrl_groups),
953 	.id = GENL_ID_CTRL,
954 	.name = "nlctrl",
955 	.version = 0x2,
956 	.maxattr = CTRL_ATTR_MAX,
957 	.netnsok = true,
958 };
959 
960 static int genl_bind(struct net *net, int group)
961 {
962 	struct genl_family *f;
963 	int err = -ENOENT;
964 	unsigned int id;
965 
966 	down_read(&cb_lock);
967 
968 	idr_for_each_entry(&genl_fam_idr, f, id) {
969 		if (group >= f->mcgrp_offset &&
970 		    group < f->mcgrp_offset + f->n_mcgrps) {
971 			int fam_grp = group - f->mcgrp_offset;
972 
973 			if (!f->netnsok && net != &init_net)
974 				err = -ENOENT;
975 			else if (f->mcast_bind)
976 				err = f->mcast_bind(net, fam_grp);
977 			else
978 				err = 0;
979 			break;
980 		}
981 	}
982 	up_read(&cb_lock);
983 
984 	return err;
985 }
986 
987 static void genl_unbind(struct net *net, int group)
988 {
989 	struct genl_family *f;
990 	unsigned int id;
991 
992 	down_read(&cb_lock);
993 
994 	idr_for_each_entry(&genl_fam_idr, f, id) {
995 		if (group >= f->mcgrp_offset &&
996 		    group < f->mcgrp_offset + f->n_mcgrps) {
997 			int fam_grp = group - f->mcgrp_offset;
998 
999 			if (f->mcast_unbind)
1000 				f->mcast_unbind(net, fam_grp);
1001 			break;
1002 		}
1003 	}
1004 	up_read(&cb_lock);
1005 }
1006 
1007 static int __net_init genl_pernet_init(struct net *net)
1008 {
1009 	struct netlink_kernel_cfg cfg = {
1010 		.input		= genl_rcv,
1011 		.flags		= NL_CFG_F_NONROOT_RECV,
1012 		.bind		= genl_bind,
1013 		.unbind		= genl_unbind,
1014 	};
1015 
1016 	/* we'll bump the group number right afterwards */
1017 	net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
1018 
1019 	if (!net->genl_sock && net_eq(net, &init_net))
1020 		panic("GENL: Cannot initialize generic netlink\n");
1021 
1022 	if (!net->genl_sock)
1023 		return -ENOMEM;
1024 
1025 	return 0;
1026 }
1027 
1028 static void __net_exit genl_pernet_exit(struct net *net)
1029 {
1030 	netlink_kernel_release(net->genl_sock);
1031 	net->genl_sock = NULL;
1032 }
1033 
1034 static struct pernet_operations genl_pernet_ops = {
1035 	.init = genl_pernet_init,
1036 	.exit = genl_pernet_exit,
1037 };
1038 
1039 static int __init genl_init(void)
1040 {
1041 	int err;
1042 
1043 	err = genl_register_family(&genl_ctrl);
1044 	if (err < 0)
1045 		goto problem;
1046 
1047 	err = register_pernet_subsys(&genl_pernet_ops);
1048 	if (err)
1049 		goto problem;
1050 
1051 	return 0;
1052 
1053 problem:
1054 	panic("GENL: Cannot register controller: %d\n", err);
1055 }
1056 
1057 subsys_initcall(genl_init);
1058 
1059 /**
1060  * genl_family_attrbuf - return family's attrbuf
1061  * @family: the family
1062  *
1063  * Return the family's attrbuf, while validating that it's
1064  * actually valid to access it.
1065  *
1066  * You cannot use this function with a family that has parallel_ops
1067  * and you can only use it within (pre/post) doit/dumpit callbacks.
1068  */
1069 struct nlattr **genl_family_attrbuf(const struct genl_family *family)
1070 {
1071 	if (!WARN_ON(family->parallel_ops))
1072 		lockdep_assert_held(&genl_mutex);
1073 
1074 	return family->attrbuf;
1075 }
1076 EXPORT_SYMBOL(genl_family_attrbuf);
1077 
1078 static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1079 			 gfp_t flags)
1080 {
1081 	struct sk_buff *tmp;
1082 	struct net *net, *prev = NULL;
1083 	int err;
1084 
1085 	for_each_net_rcu(net) {
1086 		if (prev) {
1087 			tmp = skb_clone(skb, flags);
1088 			if (!tmp) {
1089 				err = -ENOMEM;
1090 				goto error;
1091 			}
1092 			err = nlmsg_multicast(prev->genl_sock, tmp,
1093 					      portid, group, flags);
1094 			if (err)
1095 				goto error;
1096 		}
1097 
1098 		prev = net;
1099 	}
1100 
1101 	return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
1102  error:
1103 	kfree_skb(skb);
1104 	return err;
1105 }
1106 
1107 int genlmsg_multicast_allns(const struct genl_family *family,
1108 			    struct sk_buff *skb, u32 portid,
1109 			    unsigned int group, gfp_t flags)
1110 {
1111 	if (WARN_ON_ONCE(group >= family->n_mcgrps))
1112 		return -EINVAL;
1113 	group = family->mcgrp_offset + group;
1114 	return genlmsg_mcast(skb, portid, group, flags);
1115 }
1116 EXPORT_SYMBOL(genlmsg_multicast_allns);
1117 
1118 void genl_notify(const struct genl_family *family, struct sk_buff *skb,
1119 		 struct genl_info *info, u32 group, gfp_t flags)
1120 {
1121 	struct net *net = genl_info_net(info);
1122 	struct sock *sk = net->genl_sock;
1123 	int report = 0;
1124 
1125 	if (info->nlhdr)
1126 		report = nlmsg_report(info->nlhdr);
1127 
1128 	if (WARN_ON_ONCE(group >= family->n_mcgrps))
1129 		return;
1130 	group = family->mcgrp_offset + group;
1131 	nlmsg_notify(sk, skb, info->snd_portid, group, report, flags);
1132 }
1133 EXPORT_SYMBOL(genl_notify);
1134