1 /* 2 * NETLINK Generic Netlink Family 3 * 4 * Authors: Jamal Hadi Salim 5 * Thomas Graf <tgraf@suug.ch> 6 * Johannes Berg <johannes@sipsolutions.net> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/kernel.h> 11 #include <linux/slab.h> 12 #include <linux/errno.h> 13 #include <linux/types.h> 14 #include <linux/socket.h> 15 #include <linux/string.h> 16 #include <linux/skbuff.h> 17 #include <linux/mutex.h> 18 #include <linux/bitmap.h> 19 #include <linux/rwsem.h> 20 #include <net/sock.h> 21 #include <net/genetlink.h> 22 23 static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ 24 static DECLARE_RWSEM(cb_lock); 25 26 void genl_lock(void) 27 { 28 mutex_lock(&genl_mutex); 29 } 30 EXPORT_SYMBOL(genl_lock); 31 32 void genl_unlock(void) 33 { 34 mutex_unlock(&genl_mutex); 35 } 36 EXPORT_SYMBOL(genl_unlock); 37 38 #ifdef CONFIG_LOCKDEP 39 int lockdep_genl_is_held(void) 40 { 41 return lockdep_is_held(&genl_mutex); 42 } 43 EXPORT_SYMBOL(lockdep_genl_is_held); 44 #endif 45 46 static void genl_lock_all(void) 47 { 48 down_write(&cb_lock); 49 genl_lock(); 50 } 51 52 static void genl_unlock_all(void) 53 { 54 genl_unlock(); 55 up_write(&cb_lock); 56 } 57 58 #define GENL_FAM_TAB_SIZE 16 59 #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) 60 61 static struct list_head family_ht[GENL_FAM_TAB_SIZE]; 62 /* 63 * Bitmap of multicast groups that are currently in use. 64 * 65 * To avoid an allocation at boot of just one unsigned long, 66 * declare it global instead. 67 * Bit 0 is marked as already used since group 0 is invalid. 68 * Bit 1 is marked as already used since the drop-monitor code 69 * abuses the API and thinks it can statically use group 1. 70 * That group will typically conflict with other groups that 71 * any proper users use. 72 * Bit 16 is marked as used since it's used for generic netlink 73 * and the code no longer marks pre-reserved IDs as used. 74 * Bit 17 is marked as already used since the VFS quota code 75 * also abused this API and relied on family == group ID, we 76 * cater to that by giving it a static family and group ID. 77 * Bit 18 is marked as already used since the PMCRAID driver 78 * did the same thing as the VFS quota code (maybe copied?) 79 */ 80 static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) | 81 BIT(GENL_ID_VFS_DQUOT) | 82 BIT(GENL_ID_PMCRAID); 83 static unsigned long *mc_groups = &mc_group_start; 84 static unsigned long mc_groups_longs = 1; 85 86 static int genl_ctrl_event(int event, struct genl_family *family, 87 const struct genl_multicast_group *grp, 88 int grp_id); 89 90 static inline unsigned int genl_family_hash(unsigned int id) 91 { 92 return id & GENL_FAM_TAB_MASK; 93 } 94 95 static inline struct list_head *genl_family_chain(unsigned int id) 96 { 97 return &family_ht[genl_family_hash(id)]; 98 } 99 100 static struct genl_family *genl_family_find_byid(unsigned int id) 101 { 102 struct genl_family *f; 103 104 list_for_each_entry(f, genl_family_chain(id), family_list) 105 if (f->id == id) 106 return f; 107 108 return NULL; 109 } 110 111 static struct genl_family *genl_family_find_byname(char *name) 112 { 113 struct genl_family *f; 114 int i; 115 116 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) 117 list_for_each_entry(f, genl_family_chain(i), family_list) 118 if (strcmp(f->name, name) == 0) 119 return f; 120 121 return NULL; 122 } 123 124 static const struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family) 125 { 126 int i; 127 128 for (i = 0; i < family->n_ops; i++) 129 if (family->ops[i].cmd == cmd) 130 return &family->ops[i]; 131 132 return NULL; 133 } 134 135 /* Of course we are going to have problems once we hit 136 * 2^16 alive types, but that can only happen by year 2K 137 */ 138 static u16 genl_generate_id(void) 139 { 140 static u16 id_gen_idx = GENL_MIN_ID; 141 int i; 142 143 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { 144 if (id_gen_idx != GENL_ID_VFS_DQUOT && 145 id_gen_idx != GENL_ID_PMCRAID && 146 !genl_family_find_byid(id_gen_idx)) 147 return id_gen_idx; 148 if (++id_gen_idx > GENL_MAX_ID) 149 id_gen_idx = GENL_MIN_ID; 150 } 151 152 return 0; 153 } 154 155 static int genl_allocate_reserve_groups(int n_groups, int *first_id) 156 { 157 unsigned long *new_groups; 158 int start = 0; 159 int i; 160 int id; 161 bool fits; 162 163 do { 164 if (start == 0) 165 id = find_first_zero_bit(mc_groups, 166 mc_groups_longs * 167 BITS_PER_LONG); 168 else 169 id = find_next_zero_bit(mc_groups, 170 mc_groups_longs * BITS_PER_LONG, 171 start); 172 173 fits = true; 174 for (i = id; 175 i < min_t(int, id + n_groups, 176 mc_groups_longs * BITS_PER_LONG); 177 i++) { 178 if (test_bit(i, mc_groups)) { 179 start = i; 180 fits = false; 181 break; 182 } 183 } 184 185 if (id >= mc_groups_longs * BITS_PER_LONG) { 186 unsigned long new_longs = mc_groups_longs + 187 BITS_TO_LONGS(n_groups); 188 size_t nlen = new_longs * sizeof(unsigned long); 189 190 if (mc_groups == &mc_group_start) { 191 new_groups = kzalloc(nlen, GFP_KERNEL); 192 if (!new_groups) 193 return -ENOMEM; 194 mc_groups = new_groups; 195 *mc_groups = mc_group_start; 196 } else { 197 new_groups = krealloc(mc_groups, nlen, 198 GFP_KERNEL); 199 if (!new_groups) 200 return -ENOMEM; 201 mc_groups = new_groups; 202 for (i = 0; i < BITS_TO_LONGS(n_groups); i++) 203 mc_groups[mc_groups_longs + i] = 0; 204 } 205 mc_groups_longs = new_longs; 206 } 207 } while (!fits); 208 209 for (i = id; i < id + n_groups; i++) 210 set_bit(i, mc_groups); 211 *first_id = id; 212 return 0; 213 } 214 215 static struct genl_family genl_ctrl; 216 217 static int genl_validate_assign_mc_groups(struct genl_family *family) 218 { 219 int first_id; 220 int n_groups = family->n_mcgrps; 221 int err = 0, i; 222 bool groups_allocated = false; 223 224 if (!n_groups) 225 return 0; 226 227 for (i = 0; i < n_groups; i++) { 228 const struct genl_multicast_group *grp = &family->mcgrps[i]; 229 230 if (WARN_ON(grp->name[0] == '\0')) 231 return -EINVAL; 232 if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL)) 233 return -EINVAL; 234 } 235 236 /* special-case our own group and hacks */ 237 if (family == &genl_ctrl) { 238 first_id = GENL_ID_CTRL; 239 BUG_ON(n_groups != 1); 240 } else if (strcmp(family->name, "NET_DM") == 0) { 241 first_id = 1; 242 BUG_ON(n_groups != 1); 243 } else if (family->id == GENL_ID_VFS_DQUOT) { 244 first_id = GENL_ID_VFS_DQUOT; 245 BUG_ON(n_groups != 1); 246 } else if (family->id == GENL_ID_PMCRAID) { 247 first_id = GENL_ID_PMCRAID; 248 BUG_ON(n_groups != 1); 249 } else { 250 groups_allocated = true; 251 err = genl_allocate_reserve_groups(n_groups, &first_id); 252 if (err) 253 return err; 254 } 255 256 family->mcgrp_offset = first_id; 257 258 /* if still initializing, can't and don't need to to realloc bitmaps */ 259 if (!init_net.genl_sock) 260 return 0; 261 262 if (family->netnsok) { 263 struct net *net; 264 265 netlink_table_grab(); 266 rcu_read_lock(); 267 for_each_net_rcu(net) { 268 err = __netlink_change_ngroups(net->genl_sock, 269 mc_groups_longs * BITS_PER_LONG); 270 if (err) { 271 /* 272 * No need to roll back, can only fail if 273 * memory allocation fails and then the 274 * number of _possible_ groups has been 275 * increased on some sockets which is ok. 276 */ 277 break; 278 } 279 } 280 rcu_read_unlock(); 281 netlink_table_ungrab(); 282 } else { 283 err = netlink_change_ngroups(init_net.genl_sock, 284 mc_groups_longs * BITS_PER_LONG); 285 } 286 287 if (groups_allocated && err) { 288 for (i = 0; i < family->n_mcgrps; i++) 289 clear_bit(family->mcgrp_offset + i, mc_groups); 290 } 291 292 return err; 293 } 294 295 static void genl_unregister_mc_groups(struct genl_family *family) 296 { 297 struct net *net; 298 int i; 299 300 netlink_table_grab(); 301 rcu_read_lock(); 302 for_each_net_rcu(net) { 303 for (i = 0; i < family->n_mcgrps; i++) 304 __netlink_clear_multicast_users( 305 net->genl_sock, family->mcgrp_offset + i); 306 } 307 rcu_read_unlock(); 308 netlink_table_ungrab(); 309 310 for (i = 0; i < family->n_mcgrps; i++) { 311 int grp_id = family->mcgrp_offset + i; 312 313 if (grp_id != 1) 314 clear_bit(grp_id, mc_groups); 315 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family, 316 &family->mcgrps[i], grp_id); 317 } 318 } 319 320 static int genl_validate_ops(const struct genl_family *family) 321 { 322 const struct genl_ops *ops = family->ops; 323 unsigned int n_ops = family->n_ops; 324 int i, j; 325 326 if (WARN_ON(n_ops && !ops)) 327 return -EINVAL; 328 329 if (!n_ops) 330 return 0; 331 332 for (i = 0; i < n_ops; i++) { 333 if (ops[i].dumpit == NULL && ops[i].doit == NULL) 334 return -EINVAL; 335 for (j = i + 1; j < n_ops; j++) 336 if (ops[i].cmd == ops[j].cmd) 337 return -EINVAL; 338 } 339 340 return 0; 341 } 342 343 /** 344 * __genl_register_family - register a generic netlink family 345 * @family: generic netlink family 346 * 347 * Registers the specified family after validating it first. Only one 348 * family may be registered with the same family name or identifier. 349 * The family id may equal GENL_ID_GENERATE causing an unique id to 350 * be automatically generated and assigned. 351 * 352 * The family's ops array must already be assigned, you can use the 353 * genl_register_family_with_ops() helper function. 354 * 355 * Return 0 on success or a negative error code. 356 */ 357 int __genl_register_family(struct genl_family *family) 358 { 359 int err = -EINVAL, i; 360 361 if (family->id && family->id < GENL_MIN_ID) 362 goto errout; 363 364 if (family->id > GENL_MAX_ID) 365 goto errout; 366 367 err = genl_validate_ops(family); 368 if (err) 369 return err; 370 371 genl_lock_all(); 372 373 if (genl_family_find_byname(family->name)) { 374 err = -EEXIST; 375 goto errout_locked; 376 } 377 378 if (family->id == GENL_ID_GENERATE) { 379 u16 newid = genl_generate_id(); 380 381 if (!newid) { 382 err = -ENOMEM; 383 goto errout_locked; 384 } 385 386 family->id = newid; 387 } else if (genl_family_find_byid(family->id)) { 388 err = -EEXIST; 389 goto errout_locked; 390 } 391 392 if (family->maxattr && !family->parallel_ops) { 393 family->attrbuf = kmalloc((family->maxattr+1) * 394 sizeof(struct nlattr *), GFP_KERNEL); 395 if (family->attrbuf == NULL) { 396 err = -ENOMEM; 397 goto errout_locked; 398 } 399 } else 400 family->attrbuf = NULL; 401 402 err = genl_validate_assign_mc_groups(family); 403 if (err) 404 goto errout_locked; 405 406 list_add_tail(&family->family_list, genl_family_chain(family->id)); 407 genl_unlock_all(); 408 409 /* send all events */ 410 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0); 411 for (i = 0; i < family->n_mcgrps; i++) 412 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family, 413 &family->mcgrps[i], family->mcgrp_offset + i); 414 415 return 0; 416 417 errout_locked: 418 genl_unlock_all(); 419 errout: 420 return err; 421 } 422 EXPORT_SYMBOL(__genl_register_family); 423 424 /** 425 * genl_unregister_family - unregister generic netlink family 426 * @family: generic netlink family 427 * 428 * Unregisters the specified family. 429 * 430 * Returns 0 on success or a negative error code. 431 */ 432 int genl_unregister_family(struct genl_family *family) 433 { 434 struct genl_family *rc; 435 436 genl_lock_all(); 437 438 genl_unregister_mc_groups(family); 439 440 list_for_each_entry(rc, genl_family_chain(family->id), family_list) { 441 if (family->id != rc->id || strcmp(rc->name, family->name)) 442 continue; 443 444 list_del(&rc->family_list); 445 family->n_ops = 0; 446 genl_unlock_all(); 447 448 kfree(family->attrbuf); 449 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0); 450 return 0; 451 } 452 453 genl_unlock_all(); 454 455 return -ENOENT; 456 } 457 EXPORT_SYMBOL(genl_unregister_family); 458 459 /** 460 * genlmsg_new_unicast - Allocate generic netlink message for unicast 461 * @payload: size of the message payload 462 * @info: information on destination 463 * @flags: the type of memory to allocate 464 * 465 * Allocates a new sk_buff large enough to cover the specified payload 466 * plus required Netlink headers. Will check receiving socket for 467 * memory mapped i/o capability and use it if enabled. Will fall back 468 * to non-mapped skb if message size exceeds the frame size of the ring. 469 */ 470 struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info, 471 gfp_t flags) 472 { 473 size_t len = nlmsg_total_size(genlmsg_total_size(payload)); 474 475 return netlink_alloc_skb(info->dst_sk, len, info->snd_portid, flags); 476 } 477 EXPORT_SYMBOL_GPL(genlmsg_new_unicast); 478 479 /** 480 * genlmsg_put - Add generic netlink header to netlink message 481 * @skb: socket buffer holding the message 482 * @portid: netlink portid the message is addressed to 483 * @seq: sequence number (usually the one of the sender) 484 * @family: generic netlink family 485 * @flags: netlink message flags 486 * @cmd: generic netlink command 487 * 488 * Returns pointer to user specific header 489 */ 490 void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, 491 struct genl_family *family, int flags, u8 cmd) 492 { 493 struct nlmsghdr *nlh; 494 struct genlmsghdr *hdr; 495 496 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN + 497 family->hdrsize, flags); 498 if (nlh == NULL) 499 return NULL; 500 501 hdr = nlmsg_data(nlh); 502 hdr->cmd = cmd; 503 hdr->version = family->version; 504 hdr->reserved = 0; 505 506 return (char *) hdr + GENL_HDRLEN; 507 } 508 EXPORT_SYMBOL(genlmsg_put); 509 510 static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 511 { 512 /* our ops are always const - netlink API doesn't propagate that */ 513 const struct genl_ops *ops = cb->data; 514 int rc; 515 516 genl_lock(); 517 rc = ops->dumpit(skb, cb); 518 genl_unlock(); 519 return rc; 520 } 521 522 static int genl_lock_done(struct netlink_callback *cb) 523 { 524 /* our ops are always const - netlink API doesn't propagate that */ 525 const struct genl_ops *ops = cb->data; 526 int rc = 0; 527 528 if (ops->done) { 529 genl_lock(); 530 rc = ops->done(cb); 531 genl_unlock(); 532 } 533 return rc; 534 } 535 536 static int genl_family_rcv_msg(struct genl_family *family, 537 struct sk_buff *skb, 538 struct nlmsghdr *nlh) 539 { 540 const struct genl_ops *ops; 541 struct net *net = sock_net(skb->sk); 542 struct genl_info info; 543 struct genlmsghdr *hdr = nlmsg_data(nlh); 544 struct nlattr **attrbuf; 545 int hdrlen, err; 546 547 /* this family doesn't exist in this netns */ 548 if (!family->netnsok && !net_eq(net, &init_net)) 549 return -ENOENT; 550 551 hdrlen = GENL_HDRLEN + family->hdrsize; 552 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) 553 return -EINVAL; 554 555 ops = genl_get_cmd(hdr->cmd, family); 556 if (ops == NULL) 557 return -EOPNOTSUPP; 558 559 if ((ops->flags & GENL_ADMIN_PERM) && 560 !netlink_capable(skb, CAP_NET_ADMIN)) 561 return -EPERM; 562 563 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 564 int rc; 565 566 if (ops->dumpit == NULL) 567 return -EOPNOTSUPP; 568 569 if (!family->parallel_ops) { 570 struct netlink_dump_control c = { 571 .module = family->module, 572 /* we have const, but the netlink API doesn't */ 573 .data = (void *)ops, 574 .dump = genl_lock_dumpit, 575 .done = genl_lock_done, 576 }; 577 578 genl_unlock(); 579 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); 580 genl_lock(); 581 582 } else { 583 struct netlink_dump_control c = { 584 .module = family->module, 585 .dump = ops->dumpit, 586 .done = ops->done, 587 }; 588 589 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); 590 } 591 592 return rc; 593 } 594 595 if (ops->doit == NULL) 596 return -EOPNOTSUPP; 597 598 if (family->maxattr && family->parallel_ops) { 599 attrbuf = kmalloc((family->maxattr+1) * 600 sizeof(struct nlattr *), GFP_KERNEL); 601 if (attrbuf == NULL) 602 return -ENOMEM; 603 } else 604 attrbuf = family->attrbuf; 605 606 if (attrbuf) { 607 err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr, 608 ops->policy); 609 if (err < 0) 610 goto out; 611 } 612 613 info.snd_seq = nlh->nlmsg_seq; 614 info.snd_portid = NETLINK_CB(skb).portid; 615 info.nlhdr = nlh; 616 info.genlhdr = nlmsg_data(nlh); 617 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN; 618 info.attrs = attrbuf; 619 info.dst_sk = skb->sk; 620 genl_info_net_set(&info, net); 621 memset(&info.user_ptr, 0, sizeof(info.user_ptr)); 622 623 if (family->pre_doit) { 624 err = family->pre_doit(ops, skb, &info); 625 if (err) 626 goto out; 627 } 628 629 err = ops->doit(skb, &info); 630 631 if (family->post_doit) 632 family->post_doit(ops, skb, &info); 633 634 out: 635 if (family->parallel_ops) 636 kfree(attrbuf); 637 638 return err; 639 } 640 641 static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 642 { 643 struct genl_family *family; 644 int err; 645 646 family = genl_family_find_byid(nlh->nlmsg_type); 647 if (family == NULL) 648 return -ENOENT; 649 650 if (!family->parallel_ops) 651 genl_lock(); 652 653 err = genl_family_rcv_msg(family, skb, nlh); 654 655 if (!family->parallel_ops) 656 genl_unlock(); 657 658 return err; 659 } 660 661 static void genl_rcv(struct sk_buff *skb) 662 { 663 down_read(&cb_lock); 664 netlink_rcv_skb(skb, &genl_rcv_msg); 665 up_read(&cb_lock); 666 } 667 668 /************************************************************************** 669 * Controller 670 **************************************************************************/ 671 672 static struct genl_family genl_ctrl = { 673 .id = GENL_ID_CTRL, 674 .name = "nlctrl", 675 .version = 0x2, 676 .maxattr = CTRL_ATTR_MAX, 677 .netnsok = true, 678 }; 679 680 static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq, 681 u32 flags, struct sk_buff *skb, u8 cmd) 682 { 683 void *hdr; 684 685 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd); 686 if (hdr == NULL) 687 return -1; 688 689 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || 690 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) || 691 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) || 692 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) || 693 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr)) 694 goto nla_put_failure; 695 696 if (family->n_ops) { 697 struct nlattr *nla_ops; 698 int i; 699 700 nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS); 701 if (nla_ops == NULL) 702 goto nla_put_failure; 703 704 for (i = 0; i < family->n_ops; i++) { 705 struct nlattr *nest; 706 const struct genl_ops *ops = &family->ops[i]; 707 u32 op_flags = ops->flags; 708 709 if (ops->dumpit) 710 op_flags |= GENL_CMD_CAP_DUMP; 711 if (ops->doit) 712 op_flags |= GENL_CMD_CAP_DO; 713 if (ops->policy) 714 op_flags |= GENL_CMD_CAP_HASPOL; 715 716 nest = nla_nest_start(skb, i + 1); 717 if (nest == NULL) 718 goto nla_put_failure; 719 720 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) || 721 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags)) 722 goto nla_put_failure; 723 724 nla_nest_end(skb, nest); 725 } 726 727 nla_nest_end(skb, nla_ops); 728 } 729 730 if (family->n_mcgrps) { 731 struct nlattr *nla_grps; 732 int i; 733 734 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); 735 if (nla_grps == NULL) 736 goto nla_put_failure; 737 738 for (i = 0; i < family->n_mcgrps; i++) { 739 struct nlattr *nest; 740 const struct genl_multicast_group *grp; 741 742 grp = &family->mcgrps[i]; 743 744 nest = nla_nest_start(skb, i + 1); 745 if (nest == NULL) 746 goto nla_put_failure; 747 748 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, 749 family->mcgrp_offset + i) || 750 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, 751 grp->name)) 752 goto nla_put_failure; 753 754 nla_nest_end(skb, nest); 755 } 756 nla_nest_end(skb, nla_grps); 757 } 758 759 return genlmsg_end(skb, hdr); 760 761 nla_put_failure: 762 genlmsg_cancel(skb, hdr); 763 return -EMSGSIZE; 764 } 765 766 static int ctrl_fill_mcgrp_info(struct genl_family *family, 767 const struct genl_multicast_group *grp, 768 int grp_id, u32 portid, u32 seq, u32 flags, 769 struct sk_buff *skb, u8 cmd) 770 { 771 void *hdr; 772 struct nlattr *nla_grps; 773 struct nlattr *nest; 774 775 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd); 776 if (hdr == NULL) 777 return -1; 778 779 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || 780 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id)) 781 goto nla_put_failure; 782 783 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); 784 if (nla_grps == NULL) 785 goto nla_put_failure; 786 787 nest = nla_nest_start(skb, 1); 788 if (nest == NULL) 789 goto nla_put_failure; 790 791 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) || 792 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, 793 grp->name)) 794 goto nla_put_failure; 795 796 nla_nest_end(skb, nest); 797 nla_nest_end(skb, nla_grps); 798 799 return genlmsg_end(skb, hdr); 800 801 nla_put_failure: 802 genlmsg_cancel(skb, hdr); 803 return -EMSGSIZE; 804 } 805 806 static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) 807 { 808 809 int i, n = 0; 810 struct genl_family *rt; 811 struct net *net = sock_net(skb->sk); 812 int chains_to_skip = cb->args[0]; 813 int fams_to_skip = cb->args[1]; 814 815 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) { 816 n = 0; 817 list_for_each_entry(rt, genl_family_chain(i), family_list) { 818 if (!rt->netnsok && !net_eq(net, &init_net)) 819 continue; 820 if (++n < fams_to_skip) 821 continue; 822 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid, 823 cb->nlh->nlmsg_seq, NLM_F_MULTI, 824 skb, CTRL_CMD_NEWFAMILY) < 0) 825 goto errout; 826 } 827 828 fams_to_skip = 0; 829 } 830 831 errout: 832 cb->args[0] = i; 833 cb->args[1] = n; 834 835 return skb->len; 836 } 837 838 static struct sk_buff *ctrl_build_family_msg(struct genl_family *family, 839 u32 portid, int seq, u8 cmd) 840 { 841 struct sk_buff *skb; 842 int err; 843 844 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 845 if (skb == NULL) 846 return ERR_PTR(-ENOBUFS); 847 848 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd); 849 if (err < 0) { 850 nlmsg_free(skb); 851 return ERR_PTR(err); 852 } 853 854 return skb; 855 } 856 857 static struct sk_buff * 858 ctrl_build_mcgrp_msg(struct genl_family *family, 859 const struct genl_multicast_group *grp, 860 int grp_id, u32 portid, int seq, u8 cmd) 861 { 862 struct sk_buff *skb; 863 int err; 864 865 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 866 if (skb == NULL) 867 return ERR_PTR(-ENOBUFS); 868 869 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid, 870 seq, 0, skb, cmd); 871 if (err < 0) { 872 nlmsg_free(skb); 873 return ERR_PTR(err); 874 } 875 876 return skb; 877 } 878 879 static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = { 880 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 }, 881 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING, 882 .len = GENL_NAMSIZ - 1 }, 883 }; 884 885 static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info) 886 { 887 struct sk_buff *msg; 888 struct genl_family *res = NULL; 889 int err = -EINVAL; 890 891 if (info->attrs[CTRL_ATTR_FAMILY_ID]) { 892 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]); 893 res = genl_family_find_byid(id); 894 err = -ENOENT; 895 } 896 897 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) { 898 char *name; 899 900 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]); 901 res = genl_family_find_byname(name); 902 #ifdef CONFIG_MODULES 903 if (res == NULL) { 904 genl_unlock(); 905 up_read(&cb_lock); 906 request_module("net-pf-%d-proto-%d-family-%s", 907 PF_NETLINK, NETLINK_GENERIC, name); 908 down_read(&cb_lock); 909 genl_lock(); 910 res = genl_family_find_byname(name); 911 } 912 #endif 913 err = -ENOENT; 914 } 915 916 if (res == NULL) 917 return err; 918 919 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) { 920 /* family doesn't exist here */ 921 return -ENOENT; 922 } 923 924 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq, 925 CTRL_CMD_NEWFAMILY); 926 if (IS_ERR(msg)) 927 return PTR_ERR(msg); 928 929 return genlmsg_reply(msg, info); 930 } 931 932 static int genl_ctrl_event(int event, struct genl_family *family, 933 const struct genl_multicast_group *grp, 934 int grp_id) 935 { 936 struct sk_buff *msg; 937 938 /* genl is still initialising */ 939 if (!init_net.genl_sock) 940 return 0; 941 942 switch (event) { 943 case CTRL_CMD_NEWFAMILY: 944 case CTRL_CMD_DELFAMILY: 945 WARN_ON(grp); 946 msg = ctrl_build_family_msg(family, 0, 0, event); 947 break; 948 case CTRL_CMD_NEWMCAST_GRP: 949 case CTRL_CMD_DELMCAST_GRP: 950 BUG_ON(!grp); 951 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event); 952 break; 953 default: 954 return -EINVAL; 955 } 956 957 if (IS_ERR(msg)) 958 return PTR_ERR(msg); 959 960 if (!family->netnsok) { 961 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0, 962 0, GFP_KERNEL); 963 } else { 964 rcu_read_lock(); 965 genlmsg_multicast_allns(&genl_ctrl, msg, 0, 966 0, GFP_ATOMIC); 967 rcu_read_unlock(); 968 } 969 970 return 0; 971 } 972 973 static struct genl_ops genl_ctrl_ops[] = { 974 { 975 .cmd = CTRL_CMD_GETFAMILY, 976 .doit = ctrl_getfamily, 977 .dumpit = ctrl_dumpfamily, 978 .policy = ctrl_policy, 979 }, 980 }; 981 982 static struct genl_multicast_group genl_ctrl_groups[] = { 983 { .name = "notify", }, 984 }; 985 986 static int __net_init genl_pernet_init(struct net *net) 987 { 988 struct netlink_kernel_cfg cfg = { 989 .input = genl_rcv, 990 .flags = NL_CFG_F_NONROOT_RECV, 991 }; 992 993 /* we'll bump the group number right afterwards */ 994 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg); 995 996 if (!net->genl_sock && net_eq(net, &init_net)) 997 panic("GENL: Cannot initialize generic netlink\n"); 998 999 if (!net->genl_sock) 1000 return -ENOMEM; 1001 1002 return 0; 1003 } 1004 1005 static void __net_exit genl_pernet_exit(struct net *net) 1006 { 1007 netlink_kernel_release(net->genl_sock); 1008 net->genl_sock = NULL; 1009 } 1010 1011 static struct pernet_operations genl_pernet_ops = { 1012 .init = genl_pernet_init, 1013 .exit = genl_pernet_exit, 1014 }; 1015 1016 static int __init genl_init(void) 1017 { 1018 int i, err; 1019 1020 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) 1021 INIT_LIST_HEAD(&family_ht[i]); 1022 1023 err = genl_register_family_with_ops_groups(&genl_ctrl, genl_ctrl_ops, 1024 genl_ctrl_groups); 1025 if (err < 0) 1026 goto problem; 1027 1028 err = register_pernet_subsys(&genl_pernet_ops); 1029 if (err) 1030 goto problem; 1031 1032 return 0; 1033 1034 problem: 1035 panic("GENL: Cannot register controller: %d\n", err); 1036 } 1037 1038 subsys_initcall(genl_init); 1039 1040 static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group, 1041 gfp_t flags) 1042 { 1043 struct sk_buff *tmp; 1044 struct net *net, *prev = NULL; 1045 int err; 1046 1047 for_each_net_rcu(net) { 1048 if (prev) { 1049 tmp = skb_clone(skb, flags); 1050 if (!tmp) { 1051 err = -ENOMEM; 1052 goto error; 1053 } 1054 err = nlmsg_multicast(prev->genl_sock, tmp, 1055 portid, group, flags); 1056 if (err) 1057 goto error; 1058 } 1059 1060 prev = net; 1061 } 1062 1063 return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags); 1064 error: 1065 kfree_skb(skb); 1066 return err; 1067 } 1068 1069 int genlmsg_multicast_allns(struct genl_family *family, struct sk_buff *skb, 1070 u32 portid, unsigned int group, gfp_t flags) 1071 { 1072 if (WARN_ON_ONCE(group >= family->n_mcgrps)) 1073 return -EINVAL; 1074 group = family->mcgrp_offset + group; 1075 return genlmsg_mcast(skb, portid, group, flags); 1076 } 1077 EXPORT_SYMBOL(genlmsg_multicast_allns); 1078 1079 void genl_notify(struct genl_family *family, 1080 struct sk_buff *skb, struct net *net, u32 portid, u32 group, 1081 struct nlmsghdr *nlh, gfp_t flags) 1082 { 1083 struct sock *sk = net->genl_sock; 1084 int report = 0; 1085 1086 if (nlh) 1087 report = nlmsg_report(nlh); 1088 1089 if (WARN_ON_ONCE(group >= family->n_mcgrps)) 1090 return; 1091 group = family->mcgrp_offset + group; 1092 nlmsg_notify(sk, skb, portid, group, report, flags); 1093 } 1094 EXPORT_SYMBOL(genl_notify); 1095