1 /* 2 * NETLINK Generic Netlink Family 3 * 4 * Authors: Jamal Hadi Salim 5 * Thomas Graf <tgraf@suug.ch> 6 * Johannes Berg <johannes@sipsolutions.net> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/kernel.h> 11 #include <linux/slab.h> 12 #include <linux/errno.h> 13 #include <linux/types.h> 14 #include <linux/socket.h> 15 #include <linux/string.h> 16 #include <linux/skbuff.h> 17 #include <linux/mutex.h> 18 #include <linux/bitmap.h> 19 #include <linux/rwsem.h> 20 #include <linux/idr.h> 21 #include <net/sock.h> 22 #include <net/genetlink.h> 23 24 static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ 25 static DECLARE_RWSEM(cb_lock); 26 27 atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0); 28 DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq); 29 30 void genl_lock(void) 31 { 32 mutex_lock(&genl_mutex); 33 } 34 EXPORT_SYMBOL(genl_lock); 35 36 void genl_unlock(void) 37 { 38 mutex_unlock(&genl_mutex); 39 } 40 EXPORT_SYMBOL(genl_unlock); 41 42 #ifdef CONFIG_LOCKDEP 43 bool lockdep_genl_is_held(void) 44 { 45 return lockdep_is_held(&genl_mutex); 46 } 47 EXPORT_SYMBOL(lockdep_genl_is_held); 48 #endif 49 50 static void genl_lock_all(void) 51 { 52 down_write(&cb_lock); 53 genl_lock(); 54 } 55 56 static void genl_unlock_all(void) 57 { 58 genl_unlock(); 59 up_write(&cb_lock); 60 } 61 62 static DEFINE_IDR(genl_fam_idr); 63 64 /* 65 * Bitmap of multicast groups that are currently in use. 66 * 67 * To avoid an allocation at boot of just one unsigned long, 68 * declare it global instead. 69 * Bit 0 is marked as already used since group 0 is invalid. 70 * Bit 1 is marked as already used since the drop-monitor code 71 * abuses the API and thinks it can statically use group 1. 72 * That group will typically conflict with other groups that 73 * any proper users use. 74 * Bit 16 is marked as used since it's used for generic netlink 75 * and the code no longer marks pre-reserved IDs as used. 76 * Bit 17 is marked as already used since the VFS quota code 77 * also abused this API and relied on family == group ID, we 78 * cater to that by giving it a static family and group ID. 79 * Bit 18 is marked as already used since the PMCRAID driver 80 * did the same thing as the VFS quota code (maybe copied?) 81 */ 82 static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) | 83 BIT(GENL_ID_VFS_DQUOT) | 84 BIT(GENL_ID_PMCRAID); 85 static unsigned long *mc_groups = &mc_group_start; 86 static unsigned long mc_groups_longs = 1; 87 88 static int genl_ctrl_event(int event, const struct genl_family *family, 89 const struct genl_multicast_group *grp, 90 int grp_id); 91 92 static const struct genl_family *genl_family_find_byid(unsigned int id) 93 { 94 return idr_find(&genl_fam_idr, id); 95 } 96 97 static const struct genl_family *genl_family_find_byname(char *name) 98 { 99 const struct genl_family *family; 100 unsigned int id; 101 102 idr_for_each_entry(&genl_fam_idr, family, id) 103 if (strcmp(family->name, name) == 0) 104 return family; 105 106 return NULL; 107 } 108 109 static const struct genl_ops *genl_get_cmd(u8 cmd, 110 const struct genl_family *family) 111 { 112 int i; 113 114 for (i = 0; i < family->n_ops; i++) 115 if (family->ops[i].cmd == cmd) 116 return &family->ops[i]; 117 118 return NULL; 119 } 120 121 static int genl_allocate_reserve_groups(int n_groups, int *first_id) 122 { 123 unsigned long *new_groups; 124 int start = 0; 125 int i; 126 int id; 127 bool fits; 128 129 do { 130 if (start == 0) 131 id = find_first_zero_bit(mc_groups, 132 mc_groups_longs * 133 BITS_PER_LONG); 134 else 135 id = find_next_zero_bit(mc_groups, 136 mc_groups_longs * BITS_PER_LONG, 137 start); 138 139 fits = true; 140 for (i = id; 141 i < min_t(int, id + n_groups, 142 mc_groups_longs * BITS_PER_LONG); 143 i++) { 144 if (test_bit(i, mc_groups)) { 145 start = i; 146 fits = false; 147 break; 148 } 149 } 150 151 if (id + n_groups > mc_groups_longs * BITS_PER_LONG) { 152 unsigned long new_longs = mc_groups_longs + 153 BITS_TO_LONGS(n_groups); 154 size_t nlen = new_longs * sizeof(unsigned long); 155 156 if (mc_groups == &mc_group_start) { 157 new_groups = kzalloc(nlen, GFP_KERNEL); 158 if (!new_groups) 159 return -ENOMEM; 160 mc_groups = new_groups; 161 *mc_groups = mc_group_start; 162 } else { 163 new_groups = krealloc(mc_groups, nlen, 164 GFP_KERNEL); 165 if (!new_groups) 166 return -ENOMEM; 167 mc_groups = new_groups; 168 for (i = 0; i < BITS_TO_LONGS(n_groups); i++) 169 mc_groups[mc_groups_longs + i] = 0; 170 } 171 mc_groups_longs = new_longs; 172 } 173 } while (!fits); 174 175 for (i = id; i < id + n_groups; i++) 176 set_bit(i, mc_groups); 177 *first_id = id; 178 return 0; 179 } 180 181 static struct genl_family genl_ctrl; 182 183 static int genl_validate_assign_mc_groups(struct genl_family *family) 184 { 185 int first_id; 186 int n_groups = family->n_mcgrps; 187 int err = 0, i; 188 bool groups_allocated = false; 189 190 if (!n_groups) 191 return 0; 192 193 for (i = 0; i < n_groups; i++) { 194 const struct genl_multicast_group *grp = &family->mcgrps[i]; 195 196 if (WARN_ON(grp->name[0] == '\0')) 197 return -EINVAL; 198 if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL)) 199 return -EINVAL; 200 } 201 202 /* special-case our own group and hacks */ 203 if (family == &genl_ctrl) { 204 first_id = GENL_ID_CTRL; 205 BUG_ON(n_groups != 1); 206 } else if (strcmp(family->name, "NET_DM") == 0) { 207 first_id = 1; 208 BUG_ON(n_groups != 1); 209 } else if (family->id == GENL_ID_VFS_DQUOT) { 210 first_id = GENL_ID_VFS_DQUOT; 211 BUG_ON(n_groups != 1); 212 } else if (family->id == GENL_ID_PMCRAID) { 213 first_id = GENL_ID_PMCRAID; 214 BUG_ON(n_groups != 1); 215 } else { 216 groups_allocated = true; 217 err = genl_allocate_reserve_groups(n_groups, &first_id); 218 if (err) 219 return err; 220 } 221 222 family->mcgrp_offset = first_id; 223 224 /* if still initializing, can't and don't need to to realloc bitmaps */ 225 if (!init_net.genl_sock) 226 return 0; 227 228 if (family->netnsok) { 229 struct net *net; 230 231 netlink_table_grab(); 232 rcu_read_lock(); 233 for_each_net_rcu(net) { 234 err = __netlink_change_ngroups(net->genl_sock, 235 mc_groups_longs * BITS_PER_LONG); 236 if (err) { 237 /* 238 * No need to roll back, can only fail if 239 * memory allocation fails and then the 240 * number of _possible_ groups has been 241 * increased on some sockets which is ok. 242 */ 243 break; 244 } 245 } 246 rcu_read_unlock(); 247 netlink_table_ungrab(); 248 } else { 249 err = netlink_change_ngroups(init_net.genl_sock, 250 mc_groups_longs * BITS_PER_LONG); 251 } 252 253 if (groups_allocated && err) { 254 for (i = 0; i < family->n_mcgrps; i++) 255 clear_bit(family->mcgrp_offset + i, mc_groups); 256 } 257 258 return err; 259 } 260 261 static void genl_unregister_mc_groups(const struct genl_family *family) 262 { 263 struct net *net; 264 int i; 265 266 netlink_table_grab(); 267 rcu_read_lock(); 268 for_each_net_rcu(net) { 269 for (i = 0; i < family->n_mcgrps; i++) 270 __netlink_clear_multicast_users( 271 net->genl_sock, family->mcgrp_offset + i); 272 } 273 rcu_read_unlock(); 274 netlink_table_ungrab(); 275 276 for (i = 0; i < family->n_mcgrps; i++) { 277 int grp_id = family->mcgrp_offset + i; 278 279 if (grp_id != 1) 280 clear_bit(grp_id, mc_groups); 281 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family, 282 &family->mcgrps[i], grp_id); 283 } 284 } 285 286 static int genl_validate_ops(const struct genl_family *family) 287 { 288 const struct genl_ops *ops = family->ops; 289 unsigned int n_ops = family->n_ops; 290 int i, j; 291 292 if (WARN_ON(n_ops && !ops)) 293 return -EINVAL; 294 295 if (!n_ops) 296 return 0; 297 298 for (i = 0; i < n_ops; i++) { 299 if (ops[i].dumpit == NULL && ops[i].doit == NULL) 300 return -EINVAL; 301 for (j = i + 1; j < n_ops; j++) 302 if (ops[i].cmd == ops[j].cmd) 303 return -EINVAL; 304 } 305 306 return 0; 307 } 308 309 /** 310 * genl_register_family - register a generic netlink family 311 * @family: generic netlink family 312 * 313 * Registers the specified family after validating it first. Only one 314 * family may be registered with the same family name or identifier. 315 * 316 * The family's ops, multicast groups and module pointer must already 317 * be assigned. 318 * 319 * Return 0 on success or a negative error code. 320 */ 321 int genl_register_family(struct genl_family *family) 322 { 323 int err, i; 324 int start = GENL_START_ALLOC, end = GENL_MAX_ID; 325 326 err = genl_validate_ops(family); 327 if (err) 328 return err; 329 330 genl_lock_all(); 331 332 if (genl_family_find_byname(family->name)) { 333 err = -EEXIST; 334 goto errout_locked; 335 } 336 337 /* 338 * Sadly, a few cases need to be special-cased 339 * due to them having previously abused the API 340 * and having used their family ID also as their 341 * multicast group ID, so we use reserved IDs 342 * for both to be sure we can do that mapping. 343 */ 344 if (family == &genl_ctrl) { 345 /* and this needs to be special for initial family lookups */ 346 start = end = GENL_ID_CTRL; 347 } else if (strcmp(family->name, "pmcraid") == 0) { 348 start = end = GENL_ID_PMCRAID; 349 } else if (strcmp(family->name, "VFS_DQUOT") == 0) { 350 start = end = GENL_ID_VFS_DQUOT; 351 } 352 353 if (family->maxattr && !family->parallel_ops) { 354 family->attrbuf = kmalloc((family->maxattr+1) * 355 sizeof(struct nlattr *), GFP_KERNEL); 356 if (family->attrbuf == NULL) { 357 err = -ENOMEM; 358 goto errout_locked; 359 } 360 } else 361 family->attrbuf = NULL; 362 363 family->id = idr_alloc(&genl_fam_idr, family, 364 start, end + 1, GFP_KERNEL); 365 if (family->id < 0) { 366 err = family->id; 367 goto errout_locked; 368 } 369 370 err = genl_validate_assign_mc_groups(family); 371 if (err) 372 goto errout_remove; 373 374 genl_unlock_all(); 375 376 /* send all events */ 377 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0); 378 for (i = 0; i < family->n_mcgrps; i++) 379 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family, 380 &family->mcgrps[i], family->mcgrp_offset + i); 381 382 return 0; 383 384 errout_remove: 385 idr_remove(&genl_fam_idr, family->id); 386 kfree(family->attrbuf); 387 errout_locked: 388 genl_unlock_all(); 389 return err; 390 } 391 EXPORT_SYMBOL(genl_register_family); 392 393 /** 394 * genl_unregister_family - unregister generic netlink family 395 * @family: generic netlink family 396 * 397 * Unregisters the specified family. 398 * 399 * Returns 0 on success or a negative error code. 400 */ 401 int genl_unregister_family(const struct genl_family *family) 402 { 403 genl_lock_all(); 404 405 if (!genl_family_find_byid(family->id)) { 406 genl_unlock_all(); 407 return -ENOENT; 408 } 409 410 genl_unregister_mc_groups(family); 411 412 idr_remove(&genl_fam_idr, family->id); 413 414 up_write(&cb_lock); 415 wait_event(genl_sk_destructing_waitq, 416 atomic_read(&genl_sk_destructing_cnt) == 0); 417 genl_unlock(); 418 419 kfree(family->attrbuf); 420 421 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0); 422 423 return 0; 424 } 425 EXPORT_SYMBOL(genl_unregister_family); 426 427 /** 428 * genlmsg_put - Add generic netlink header to netlink message 429 * @skb: socket buffer holding the message 430 * @portid: netlink portid the message is addressed to 431 * @seq: sequence number (usually the one of the sender) 432 * @family: generic netlink family 433 * @flags: netlink message flags 434 * @cmd: generic netlink command 435 * 436 * Returns pointer to user specific header 437 */ 438 void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, 439 const struct genl_family *family, int flags, u8 cmd) 440 { 441 struct nlmsghdr *nlh; 442 struct genlmsghdr *hdr; 443 444 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN + 445 family->hdrsize, flags); 446 if (nlh == NULL) 447 return NULL; 448 449 hdr = nlmsg_data(nlh); 450 hdr->cmd = cmd; 451 hdr->version = family->version; 452 hdr->reserved = 0; 453 454 return (char *) hdr + GENL_HDRLEN; 455 } 456 EXPORT_SYMBOL(genlmsg_put); 457 458 static int genl_lock_start(struct netlink_callback *cb) 459 { 460 /* our ops are always const - netlink API doesn't propagate that */ 461 const struct genl_ops *ops = cb->data; 462 int rc = 0; 463 464 if (ops->start) { 465 genl_lock(); 466 rc = ops->start(cb); 467 genl_unlock(); 468 } 469 return rc; 470 } 471 472 static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 473 { 474 /* our ops are always const - netlink API doesn't propagate that */ 475 const struct genl_ops *ops = cb->data; 476 int rc; 477 478 genl_lock(); 479 rc = ops->dumpit(skb, cb); 480 genl_unlock(); 481 return rc; 482 } 483 484 static int genl_lock_done(struct netlink_callback *cb) 485 { 486 /* our ops are always const - netlink API doesn't propagate that */ 487 const struct genl_ops *ops = cb->data; 488 int rc = 0; 489 490 if (ops->done) { 491 genl_lock(); 492 rc = ops->done(cb); 493 genl_unlock(); 494 } 495 return rc; 496 } 497 498 static int genl_family_rcv_msg(const struct genl_family *family, 499 struct sk_buff *skb, 500 struct nlmsghdr *nlh) 501 { 502 const struct genl_ops *ops; 503 struct net *net = sock_net(skb->sk); 504 struct genl_info info; 505 struct genlmsghdr *hdr = nlmsg_data(nlh); 506 struct nlattr **attrbuf; 507 int hdrlen, err; 508 509 /* this family doesn't exist in this netns */ 510 if (!family->netnsok && !net_eq(net, &init_net)) 511 return -ENOENT; 512 513 hdrlen = GENL_HDRLEN + family->hdrsize; 514 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) 515 return -EINVAL; 516 517 ops = genl_get_cmd(hdr->cmd, family); 518 if (ops == NULL) 519 return -EOPNOTSUPP; 520 521 if ((ops->flags & GENL_ADMIN_PERM) && 522 !netlink_capable(skb, CAP_NET_ADMIN)) 523 return -EPERM; 524 525 if ((ops->flags & GENL_UNS_ADMIN_PERM) && 526 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 527 return -EPERM; 528 529 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 530 int rc; 531 532 if (ops->dumpit == NULL) 533 return -EOPNOTSUPP; 534 535 if (!family->parallel_ops) { 536 struct netlink_dump_control c = { 537 .module = family->module, 538 /* we have const, but the netlink API doesn't */ 539 .data = (void *)ops, 540 .start = genl_lock_start, 541 .dump = genl_lock_dumpit, 542 .done = genl_lock_done, 543 }; 544 545 genl_unlock(); 546 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); 547 genl_lock(); 548 549 } else { 550 struct netlink_dump_control c = { 551 .module = family->module, 552 .start = ops->start, 553 .dump = ops->dumpit, 554 .done = ops->done, 555 }; 556 557 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); 558 } 559 560 return rc; 561 } 562 563 if (ops->doit == NULL) 564 return -EOPNOTSUPP; 565 566 if (family->maxattr && family->parallel_ops) { 567 attrbuf = kmalloc((family->maxattr+1) * 568 sizeof(struct nlattr *), GFP_KERNEL); 569 if (attrbuf == NULL) 570 return -ENOMEM; 571 } else 572 attrbuf = family->attrbuf; 573 574 if (attrbuf) { 575 err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr, 576 ops->policy); 577 if (err < 0) 578 goto out; 579 } 580 581 info.snd_seq = nlh->nlmsg_seq; 582 info.snd_portid = NETLINK_CB(skb).portid; 583 info.nlhdr = nlh; 584 info.genlhdr = nlmsg_data(nlh); 585 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN; 586 info.attrs = attrbuf; 587 genl_info_net_set(&info, net); 588 memset(&info.user_ptr, 0, sizeof(info.user_ptr)); 589 590 if (family->pre_doit) { 591 err = family->pre_doit(ops, skb, &info); 592 if (err) 593 goto out; 594 } 595 596 err = ops->doit(skb, &info); 597 598 if (family->post_doit) 599 family->post_doit(ops, skb, &info); 600 601 out: 602 if (family->parallel_ops) 603 kfree(attrbuf); 604 605 return err; 606 } 607 608 static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 609 { 610 const struct genl_family *family; 611 int err; 612 613 family = genl_family_find_byid(nlh->nlmsg_type); 614 if (family == NULL) 615 return -ENOENT; 616 617 if (!family->parallel_ops) 618 genl_lock(); 619 620 err = genl_family_rcv_msg(family, skb, nlh); 621 622 if (!family->parallel_ops) 623 genl_unlock(); 624 625 return err; 626 } 627 628 static void genl_rcv(struct sk_buff *skb) 629 { 630 down_read(&cb_lock); 631 netlink_rcv_skb(skb, &genl_rcv_msg); 632 up_read(&cb_lock); 633 } 634 635 /************************************************************************** 636 * Controller 637 **************************************************************************/ 638 639 static struct genl_family genl_ctrl; 640 641 static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq, 642 u32 flags, struct sk_buff *skb, u8 cmd) 643 { 644 void *hdr; 645 646 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd); 647 if (hdr == NULL) 648 return -1; 649 650 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || 651 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) || 652 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) || 653 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) || 654 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr)) 655 goto nla_put_failure; 656 657 if (family->n_ops) { 658 struct nlattr *nla_ops; 659 int i; 660 661 nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS); 662 if (nla_ops == NULL) 663 goto nla_put_failure; 664 665 for (i = 0; i < family->n_ops; i++) { 666 struct nlattr *nest; 667 const struct genl_ops *ops = &family->ops[i]; 668 u32 op_flags = ops->flags; 669 670 if (ops->dumpit) 671 op_flags |= GENL_CMD_CAP_DUMP; 672 if (ops->doit) 673 op_flags |= GENL_CMD_CAP_DO; 674 if (ops->policy) 675 op_flags |= GENL_CMD_CAP_HASPOL; 676 677 nest = nla_nest_start(skb, i + 1); 678 if (nest == NULL) 679 goto nla_put_failure; 680 681 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) || 682 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags)) 683 goto nla_put_failure; 684 685 nla_nest_end(skb, nest); 686 } 687 688 nla_nest_end(skb, nla_ops); 689 } 690 691 if (family->n_mcgrps) { 692 struct nlattr *nla_grps; 693 int i; 694 695 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); 696 if (nla_grps == NULL) 697 goto nla_put_failure; 698 699 for (i = 0; i < family->n_mcgrps; i++) { 700 struct nlattr *nest; 701 const struct genl_multicast_group *grp; 702 703 grp = &family->mcgrps[i]; 704 705 nest = nla_nest_start(skb, i + 1); 706 if (nest == NULL) 707 goto nla_put_failure; 708 709 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, 710 family->mcgrp_offset + i) || 711 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, 712 grp->name)) 713 goto nla_put_failure; 714 715 nla_nest_end(skb, nest); 716 } 717 nla_nest_end(skb, nla_grps); 718 } 719 720 genlmsg_end(skb, hdr); 721 return 0; 722 723 nla_put_failure: 724 genlmsg_cancel(skb, hdr); 725 return -EMSGSIZE; 726 } 727 728 static int ctrl_fill_mcgrp_info(const struct genl_family *family, 729 const struct genl_multicast_group *grp, 730 int grp_id, u32 portid, u32 seq, u32 flags, 731 struct sk_buff *skb, u8 cmd) 732 { 733 void *hdr; 734 struct nlattr *nla_grps; 735 struct nlattr *nest; 736 737 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd); 738 if (hdr == NULL) 739 return -1; 740 741 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || 742 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id)) 743 goto nla_put_failure; 744 745 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); 746 if (nla_grps == NULL) 747 goto nla_put_failure; 748 749 nest = nla_nest_start(skb, 1); 750 if (nest == NULL) 751 goto nla_put_failure; 752 753 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) || 754 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, 755 grp->name)) 756 goto nla_put_failure; 757 758 nla_nest_end(skb, nest); 759 nla_nest_end(skb, nla_grps); 760 761 genlmsg_end(skb, hdr); 762 return 0; 763 764 nla_put_failure: 765 genlmsg_cancel(skb, hdr); 766 return -EMSGSIZE; 767 } 768 769 static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) 770 { 771 int n = 0; 772 struct genl_family *rt; 773 struct net *net = sock_net(skb->sk); 774 int fams_to_skip = cb->args[0]; 775 unsigned int id; 776 777 idr_for_each_entry(&genl_fam_idr, rt, id) { 778 if (!rt->netnsok && !net_eq(net, &init_net)) 779 continue; 780 781 if (n++ < fams_to_skip) 782 continue; 783 784 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid, 785 cb->nlh->nlmsg_seq, NLM_F_MULTI, 786 skb, CTRL_CMD_NEWFAMILY) < 0) { 787 n--; 788 break; 789 } 790 } 791 792 cb->args[0] = n; 793 return skb->len; 794 } 795 796 static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family, 797 u32 portid, int seq, u8 cmd) 798 { 799 struct sk_buff *skb; 800 int err; 801 802 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 803 if (skb == NULL) 804 return ERR_PTR(-ENOBUFS); 805 806 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd); 807 if (err < 0) { 808 nlmsg_free(skb); 809 return ERR_PTR(err); 810 } 811 812 return skb; 813 } 814 815 static struct sk_buff * 816 ctrl_build_mcgrp_msg(const struct genl_family *family, 817 const struct genl_multicast_group *grp, 818 int grp_id, u32 portid, int seq, u8 cmd) 819 { 820 struct sk_buff *skb; 821 int err; 822 823 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 824 if (skb == NULL) 825 return ERR_PTR(-ENOBUFS); 826 827 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid, 828 seq, 0, skb, cmd); 829 if (err < 0) { 830 nlmsg_free(skb); 831 return ERR_PTR(err); 832 } 833 834 return skb; 835 } 836 837 static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = { 838 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 }, 839 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING, 840 .len = GENL_NAMSIZ - 1 }, 841 }; 842 843 static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info) 844 { 845 struct sk_buff *msg; 846 const struct genl_family *res = NULL; 847 int err = -EINVAL; 848 849 if (info->attrs[CTRL_ATTR_FAMILY_ID]) { 850 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]); 851 res = genl_family_find_byid(id); 852 err = -ENOENT; 853 } 854 855 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) { 856 char *name; 857 858 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]); 859 res = genl_family_find_byname(name); 860 #ifdef CONFIG_MODULES 861 if (res == NULL) { 862 genl_unlock(); 863 up_read(&cb_lock); 864 request_module("net-pf-%d-proto-%d-family-%s", 865 PF_NETLINK, NETLINK_GENERIC, name); 866 down_read(&cb_lock); 867 genl_lock(); 868 res = genl_family_find_byname(name); 869 } 870 #endif 871 err = -ENOENT; 872 } 873 874 if (res == NULL) 875 return err; 876 877 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) { 878 /* family doesn't exist here */ 879 return -ENOENT; 880 } 881 882 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq, 883 CTRL_CMD_NEWFAMILY); 884 if (IS_ERR(msg)) 885 return PTR_ERR(msg); 886 887 return genlmsg_reply(msg, info); 888 } 889 890 static int genl_ctrl_event(int event, const struct genl_family *family, 891 const struct genl_multicast_group *grp, 892 int grp_id) 893 { 894 struct sk_buff *msg; 895 896 /* genl is still initialising */ 897 if (!init_net.genl_sock) 898 return 0; 899 900 switch (event) { 901 case CTRL_CMD_NEWFAMILY: 902 case CTRL_CMD_DELFAMILY: 903 WARN_ON(grp); 904 msg = ctrl_build_family_msg(family, 0, 0, event); 905 break; 906 case CTRL_CMD_NEWMCAST_GRP: 907 case CTRL_CMD_DELMCAST_GRP: 908 BUG_ON(!grp); 909 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event); 910 break; 911 default: 912 return -EINVAL; 913 } 914 915 if (IS_ERR(msg)) 916 return PTR_ERR(msg); 917 918 if (!family->netnsok) { 919 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0, 920 0, GFP_KERNEL); 921 } else { 922 rcu_read_lock(); 923 genlmsg_multicast_allns(&genl_ctrl, msg, 0, 924 0, GFP_ATOMIC); 925 rcu_read_unlock(); 926 } 927 928 return 0; 929 } 930 931 static const struct genl_ops genl_ctrl_ops[] = { 932 { 933 .cmd = CTRL_CMD_GETFAMILY, 934 .doit = ctrl_getfamily, 935 .dumpit = ctrl_dumpfamily, 936 .policy = ctrl_policy, 937 }, 938 }; 939 940 static const struct genl_multicast_group genl_ctrl_groups[] = { 941 { .name = "notify", }, 942 }; 943 944 static struct genl_family genl_ctrl __ro_after_init = { 945 .module = THIS_MODULE, 946 .ops = genl_ctrl_ops, 947 .n_ops = ARRAY_SIZE(genl_ctrl_ops), 948 .mcgrps = genl_ctrl_groups, 949 .n_mcgrps = ARRAY_SIZE(genl_ctrl_groups), 950 .id = GENL_ID_CTRL, 951 .name = "nlctrl", 952 .version = 0x2, 953 .maxattr = CTRL_ATTR_MAX, 954 .netnsok = true, 955 }; 956 957 static int genl_bind(struct net *net, int group) 958 { 959 struct genl_family *f; 960 int err = -ENOENT; 961 unsigned int id; 962 963 down_read(&cb_lock); 964 965 idr_for_each_entry(&genl_fam_idr, f, id) { 966 if (group >= f->mcgrp_offset && 967 group < f->mcgrp_offset + f->n_mcgrps) { 968 int fam_grp = group - f->mcgrp_offset; 969 970 if (!f->netnsok && net != &init_net) 971 err = -ENOENT; 972 else if (f->mcast_bind) 973 err = f->mcast_bind(net, fam_grp); 974 else 975 err = 0; 976 break; 977 } 978 } 979 up_read(&cb_lock); 980 981 return err; 982 } 983 984 static void genl_unbind(struct net *net, int group) 985 { 986 struct genl_family *f; 987 unsigned int id; 988 989 down_read(&cb_lock); 990 991 idr_for_each_entry(&genl_fam_idr, f, id) { 992 if (group >= f->mcgrp_offset && 993 group < f->mcgrp_offset + f->n_mcgrps) { 994 int fam_grp = group - f->mcgrp_offset; 995 996 if (f->mcast_unbind) 997 f->mcast_unbind(net, fam_grp); 998 break; 999 } 1000 } 1001 up_read(&cb_lock); 1002 } 1003 1004 static int __net_init genl_pernet_init(struct net *net) 1005 { 1006 struct netlink_kernel_cfg cfg = { 1007 .input = genl_rcv, 1008 .flags = NL_CFG_F_NONROOT_RECV, 1009 .bind = genl_bind, 1010 .unbind = genl_unbind, 1011 }; 1012 1013 /* we'll bump the group number right afterwards */ 1014 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg); 1015 1016 if (!net->genl_sock && net_eq(net, &init_net)) 1017 panic("GENL: Cannot initialize generic netlink\n"); 1018 1019 if (!net->genl_sock) 1020 return -ENOMEM; 1021 1022 return 0; 1023 } 1024 1025 static void __net_exit genl_pernet_exit(struct net *net) 1026 { 1027 netlink_kernel_release(net->genl_sock); 1028 net->genl_sock = NULL; 1029 } 1030 1031 static struct pernet_operations genl_pernet_ops = { 1032 .init = genl_pernet_init, 1033 .exit = genl_pernet_exit, 1034 }; 1035 1036 static int __init genl_init(void) 1037 { 1038 int err; 1039 1040 err = genl_register_family(&genl_ctrl); 1041 if (err < 0) 1042 goto problem; 1043 1044 err = register_pernet_subsys(&genl_pernet_ops); 1045 if (err) 1046 goto problem; 1047 1048 return 0; 1049 1050 problem: 1051 panic("GENL: Cannot register controller: %d\n", err); 1052 } 1053 1054 subsys_initcall(genl_init); 1055 1056 /** 1057 * genl_family_attrbuf - return family's attrbuf 1058 * @family: the family 1059 * 1060 * Return the family's attrbuf, while validating that it's 1061 * actually valid to access it. 1062 * 1063 * You cannot use this function with a family that has parallel_ops 1064 * and you can only use it within (pre/post) doit/dumpit callbacks. 1065 */ 1066 struct nlattr **genl_family_attrbuf(const struct genl_family *family) 1067 { 1068 if (!WARN_ON(family->parallel_ops)) 1069 lockdep_assert_held(&genl_mutex); 1070 1071 return family->attrbuf; 1072 } 1073 EXPORT_SYMBOL(genl_family_attrbuf); 1074 1075 static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group, 1076 gfp_t flags) 1077 { 1078 struct sk_buff *tmp; 1079 struct net *net, *prev = NULL; 1080 int err; 1081 1082 for_each_net_rcu(net) { 1083 if (prev) { 1084 tmp = skb_clone(skb, flags); 1085 if (!tmp) { 1086 err = -ENOMEM; 1087 goto error; 1088 } 1089 err = nlmsg_multicast(prev->genl_sock, tmp, 1090 portid, group, flags); 1091 if (err) 1092 goto error; 1093 } 1094 1095 prev = net; 1096 } 1097 1098 return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags); 1099 error: 1100 kfree_skb(skb); 1101 return err; 1102 } 1103 1104 int genlmsg_multicast_allns(const struct genl_family *family, 1105 struct sk_buff *skb, u32 portid, 1106 unsigned int group, gfp_t flags) 1107 { 1108 if (WARN_ON_ONCE(group >= family->n_mcgrps)) 1109 return -EINVAL; 1110 group = family->mcgrp_offset + group; 1111 return genlmsg_mcast(skb, portid, group, flags); 1112 } 1113 EXPORT_SYMBOL(genlmsg_multicast_allns); 1114 1115 void genl_notify(const struct genl_family *family, struct sk_buff *skb, 1116 struct genl_info *info, u32 group, gfp_t flags) 1117 { 1118 struct net *net = genl_info_net(info); 1119 struct sock *sk = net->genl_sock; 1120 int report = 0; 1121 1122 if (info->nlhdr) 1123 report = nlmsg_report(info->nlhdr); 1124 1125 if (WARN_ON_ONCE(group >= family->n_mcgrps)) 1126 return; 1127 group = family->mcgrp_offset + group; 1128 nlmsg_notify(sk, skb, info->snd_portid, group, report, flags); 1129 } 1130 EXPORT_SYMBOL(genl_notify); 1131