1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/workqueue.h> 4 #include <linux/rtnetlink.h> 5 #include <linux/cache.h> 6 #include <linux/slab.h> 7 #include <linux/list.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/idr.h> 11 #include <linux/rculist.h> 12 #include <linux/nsproxy.h> 13 #include <linux/fs.h> 14 #include <linux/proc_ns.h> 15 #include <linux/file.h> 16 #include <linux/export.h> 17 #include <linux/user_namespace.h> 18 #include <linux/net_namespace.h> 19 #include <net/sock.h> 20 #include <net/netlink.h> 21 #include <net/net_namespace.h> 22 #include <net/netns/generic.h> 23 24 /* 25 * Our network namespace constructor/destructor lists 26 */ 27 28 static LIST_HEAD(pernet_list); 29 static struct list_head *first_device = &pernet_list; 30 DEFINE_MUTEX(net_mutex); 31 32 LIST_HEAD(net_namespace_list); 33 EXPORT_SYMBOL_GPL(net_namespace_list); 34 35 struct net init_net = { 36 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), 37 }; 38 EXPORT_SYMBOL(init_net); 39 40 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 41 42 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; 43 44 static struct net_generic *net_alloc_generic(void) 45 { 46 struct net_generic *ng; 47 size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); 48 49 ng = kzalloc(generic_size, GFP_KERNEL); 50 if (ng) 51 ng->len = max_gen_ptrs; 52 53 return ng; 54 } 55 56 static int net_assign_generic(struct net *net, int id, void *data) 57 { 58 struct net_generic *ng, *old_ng; 59 60 BUG_ON(!mutex_is_locked(&net_mutex)); 61 BUG_ON(id == 0); 62 63 old_ng = rcu_dereference_protected(net->gen, 64 lockdep_is_held(&net_mutex)); 65 ng = old_ng; 66 if (old_ng->len >= id) 67 goto assign; 68 69 ng = net_alloc_generic(); 70 if (ng == NULL) 71 return -ENOMEM; 72 73 /* 74 * Some synchronisation notes: 75 * 76 * The net_generic explores the net->gen array inside rcu 77 * read section. Besides once set the net->gen->ptr[x] 78 * pointer never changes (see rules in netns/generic.h). 79 * 80 * That said, we simply duplicate this array and schedule 81 * the old copy for kfree after a grace period. 82 */ 83 84 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); 85 86 rcu_assign_pointer(net->gen, ng); 87 kfree_rcu(old_ng, rcu); 88 assign: 89 ng->ptr[id - 1] = data; 90 return 0; 91 } 92 93 static int ops_init(const struct pernet_operations *ops, struct net *net) 94 { 95 int err = -ENOMEM; 96 void *data = NULL; 97 98 if (ops->id && ops->size) { 99 data = kzalloc(ops->size, GFP_KERNEL); 100 if (!data) 101 goto out; 102 103 err = net_assign_generic(net, *ops->id, data); 104 if (err) 105 goto cleanup; 106 } 107 err = 0; 108 if (ops->init) 109 err = ops->init(net); 110 if (!err) 111 return 0; 112 113 cleanup: 114 kfree(data); 115 116 out: 117 return err; 118 } 119 120 static void ops_free(const struct pernet_operations *ops, struct net *net) 121 { 122 if (ops->id && ops->size) { 123 int id = *ops->id; 124 kfree(net_generic(net, id)); 125 } 126 } 127 128 static void ops_exit_list(const struct pernet_operations *ops, 129 struct list_head *net_exit_list) 130 { 131 struct net *net; 132 if (ops->exit) { 133 list_for_each_entry(net, net_exit_list, exit_list) 134 ops->exit(net); 135 } 136 if (ops->exit_batch) 137 ops->exit_batch(net_exit_list); 138 } 139 140 static void ops_free_list(const struct pernet_operations *ops, 141 struct list_head *net_exit_list) 142 { 143 struct net *net; 144 if (ops->size && ops->id) { 145 list_for_each_entry(net, net_exit_list, exit_list) 146 ops_free(ops, net); 147 } 148 } 149 150 static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd, 151 int id); 152 static int alloc_netid(struct net *net, struct net *peer, int reqid) 153 { 154 int min = 0, max = 0, id; 155 156 ASSERT_RTNL(); 157 158 if (reqid >= 0) { 159 min = reqid; 160 max = reqid + 1; 161 } 162 163 id = idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL); 164 if (id >= 0) 165 rtnl_net_notifyid(net, peer, RTM_NEWNSID, id); 166 167 return id; 168 } 169 170 /* This function is used by idr_for_each(). If net is equal to peer, the 171 * function returns the id so that idr_for_each() stops. Because we cannot 172 * returns the id 0 (idr_for_each() will not stop), we return the magic value 173 * NET_ID_ZERO (-1) for it. 174 */ 175 #define NET_ID_ZERO -1 176 static int net_eq_idr(int id, void *net, void *peer) 177 { 178 if (net_eq(net, peer)) 179 return id ? : NET_ID_ZERO; 180 return 0; 181 } 182 183 static int __peernet2id(struct net *net, struct net *peer, bool alloc) 184 { 185 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); 186 187 ASSERT_RTNL(); 188 189 /* Magic value for id 0. */ 190 if (id == NET_ID_ZERO) 191 return 0; 192 if (id > 0) 193 return id; 194 195 if (alloc) { 196 id = alloc_netid(net, peer, -1); 197 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 198 } 199 200 return NETNSA_NSID_NOT_ASSIGNED; 201 } 202 203 /* This function returns the id of a peer netns. If no id is assigned, one will 204 * be allocated and returned. 205 */ 206 int peernet2id(struct net *net, struct net *peer) 207 { 208 bool alloc = atomic_read(&peer->count) == 0 ? false : true; 209 210 return __peernet2id(net, peer, alloc); 211 } 212 EXPORT_SYMBOL(peernet2id); 213 214 struct net *get_net_ns_by_id(struct net *net, int id) 215 { 216 struct net *peer; 217 218 if (id < 0) 219 return NULL; 220 221 rcu_read_lock(); 222 peer = idr_find(&net->netns_ids, id); 223 if (peer) 224 get_net(peer); 225 rcu_read_unlock(); 226 227 return peer; 228 } 229 230 /* 231 * setup_net runs the initializers for the network namespace object. 232 */ 233 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) 234 { 235 /* Must be called with net_mutex held */ 236 const struct pernet_operations *ops, *saved_ops; 237 int error = 0; 238 LIST_HEAD(net_exit_list); 239 240 atomic_set(&net->count, 1); 241 atomic_set(&net->passive, 1); 242 net->dev_base_seq = 1; 243 net->user_ns = user_ns; 244 idr_init(&net->netns_ids); 245 246 list_for_each_entry(ops, &pernet_list, list) { 247 error = ops_init(ops, net); 248 if (error < 0) 249 goto out_undo; 250 } 251 out: 252 return error; 253 254 out_undo: 255 /* Walk through the list backwards calling the exit functions 256 * for the pernet modules whose init functions did not fail. 257 */ 258 list_add(&net->exit_list, &net_exit_list); 259 saved_ops = ops; 260 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 261 ops_exit_list(ops, &net_exit_list); 262 263 ops = saved_ops; 264 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 265 ops_free_list(ops, &net_exit_list); 266 267 rcu_barrier(); 268 goto out; 269 } 270 271 272 #ifdef CONFIG_NET_NS 273 static struct kmem_cache *net_cachep; 274 static struct workqueue_struct *netns_wq; 275 276 static struct net *net_alloc(void) 277 { 278 struct net *net = NULL; 279 struct net_generic *ng; 280 281 ng = net_alloc_generic(); 282 if (!ng) 283 goto out; 284 285 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); 286 if (!net) 287 goto out_free; 288 289 rcu_assign_pointer(net->gen, ng); 290 out: 291 return net; 292 293 out_free: 294 kfree(ng); 295 goto out; 296 } 297 298 static void net_free(struct net *net) 299 { 300 kfree(rcu_access_pointer(net->gen)); 301 kmem_cache_free(net_cachep, net); 302 } 303 304 void net_drop_ns(void *p) 305 { 306 struct net *ns = p; 307 if (ns && atomic_dec_and_test(&ns->passive)) 308 net_free(ns); 309 } 310 311 struct net *copy_net_ns(unsigned long flags, 312 struct user_namespace *user_ns, struct net *old_net) 313 { 314 struct net *net; 315 int rv; 316 317 if (!(flags & CLONE_NEWNET)) 318 return get_net(old_net); 319 320 net = net_alloc(); 321 if (!net) 322 return ERR_PTR(-ENOMEM); 323 324 get_user_ns(user_ns); 325 326 mutex_lock(&net_mutex); 327 rv = setup_net(net, user_ns); 328 if (rv == 0) { 329 rtnl_lock(); 330 list_add_tail_rcu(&net->list, &net_namespace_list); 331 rtnl_unlock(); 332 } 333 mutex_unlock(&net_mutex); 334 if (rv < 0) { 335 put_user_ns(user_ns); 336 net_drop_ns(net); 337 return ERR_PTR(rv); 338 } 339 return net; 340 } 341 342 static DEFINE_SPINLOCK(cleanup_list_lock); 343 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 344 345 static void cleanup_net(struct work_struct *work) 346 { 347 const struct pernet_operations *ops; 348 struct net *net, *tmp; 349 struct list_head net_kill_list; 350 LIST_HEAD(net_exit_list); 351 352 /* Atomically snapshot the list of namespaces to cleanup */ 353 spin_lock_irq(&cleanup_list_lock); 354 list_replace_init(&cleanup_list, &net_kill_list); 355 spin_unlock_irq(&cleanup_list_lock); 356 357 mutex_lock(&net_mutex); 358 359 /* Don't let anyone else find us. */ 360 rtnl_lock(); 361 list_for_each_entry(net, &net_kill_list, cleanup_list) { 362 list_del_rcu(&net->list); 363 list_add_tail(&net->exit_list, &net_exit_list); 364 for_each_net(tmp) { 365 int id = __peernet2id(tmp, net, false); 366 367 if (id >= 0) { 368 rtnl_net_notifyid(tmp, net, RTM_DELNSID, id); 369 idr_remove(&tmp->netns_ids, id); 370 } 371 } 372 idr_destroy(&net->netns_ids); 373 374 } 375 rtnl_unlock(); 376 377 /* 378 * Another CPU might be rcu-iterating the list, wait for it. 379 * This needs to be before calling the exit() notifiers, so 380 * the rcu_barrier() below isn't sufficient alone. 381 */ 382 synchronize_rcu(); 383 384 /* Run all of the network namespace exit methods */ 385 list_for_each_entry_reverse(ops, &pernet_list, list) 386 ops_exit_list(ops, &net_exit_list); 387 388 /* Free the net generic variables */ 389 list_for_each_entry_reverse(ops, &pernet_list, list) 390 ops_free_list(ops, &net_exit_list); 391 392 mutex_unlock(&net_mutex); 393 394 /* Ensure there are no outstanding rcu callbacks using this 395 * network namespace. 396 */ 397 rcu_barrier(); 398 399 /* Finally it is safe to free my network namespace structure */ 400 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 401 list_del_init(&net->exit_list); 402 put_user_ns(net->user_ns); 403 net_drop_ns(net); 404 } 405 } 406 static DECLARE_WORK(net_cleanup_work, cleanup_net); 407 408 void __put_net(struct net *net) 409 { 410 /* Cleanup the network namespace in process context */ 411 unsigned long flags; 412 413 spin_lock_irqsave(&cleanup_list_lock, flags); 414 list_add(&net->cleanup_list, &cleanup_list); 415 spin_unlock_irqrestore(&cleanup_list_lock, flags); 416 417 queue_work(netns_wq, &net_cleanup_work); 418 } 419 EXPORT_SYMBOL_GPL(__put_net); 420 421 struct net *get_net_ns_by_fd(int fd) 422 { 423 struct file *file; 424 struct ns_common *ns; 425 struct net *net; 426 427 file = proc_ns_fget(fd); 428 if (IS_ERR(file)) 429 return ERR_CAST(file); 430 431 ns = get_proc_ns(file_inode(file)); 432 if (ns->ops == &netns_operations) 433 net = get_net(container_of(ns, struct net, ns)); 434 else 435 net = ERR_PTR(-EINVAL); 436 437 fput(file); 438 return net; 439 } 440 441 #else 442 struct net *get_net_ns_by_fd(int fd) 443 { 444 return ERR_PTR(-EINVAL); 445 } 446 #endif 447 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 448 449 struct net *get_net_ns_by_pid(pid_t pid) 450 { 451 struct task_struct *tsk; 452 struct net *net; 453 454 /* Lookup the network namespace */ 455 net = ERR_PTR(-ESRCH); 456 rcu_read_lock(); 457 tsk = find_task_by_vpid(pid); 458 if (tsk) { 459 struct nsproxy *nsproxy; 460 task_lock(tsk); 461 nsproxy = tsk->nsproxy; 462 if (nsproxy) 463 net = get_net(nsproxy->net_ns); 464 task_unlock(tsk); 465 } 466 rcu_read_unlock(); 467 return net; 468 } 469 EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 470 471 static __net_init int net_ns_net_init(struct net *net) 472 { 473 #ifdef CONFIG_NET_NS 474 net->ns.ops = &netns_operations; 475 #endif 476 return ns_alloc_inum(&net->ns); 477 } 478 479 static __net_exit void net_ns_net_exit(struct net *net) 480 { 481 ns_free_inum(&net->ns); 482 } 483 484 static struct pernet_operations __net_initdata net_ns_ops = { 485 .init = net_ns_net_init, 486 .exit = net_ns_net_exit, 487 }; 488 489 static struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { 490 [NETNSA_NONE] = { .type = NLA_UNSPEC }, 491 [NETNSA_NSID] = { .type = NLA_S32 }, 492 [NETNSA_PID] = { .type = NLA_U32 }, 493 [NETNSA_FD] = { .type = NLA_U32 }, 494 }; 495 496 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh) 497 { 498 struct net *net = sock_net(skb->sk); 499 struct nlattr *tb[NETNSA_MAX + 1]; 500 struct net *peer; 501 int nsid, err; 502 503 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 504 rtnl_net_policy); 505 if (err < 0) 506 return err; 507 if (!tb[NETNSA_NSID]) 508 return -EINVAL; 509 nsid = nla_get_s32(tb[NETNSA_NSID]); 510 511 if (tb[NETNSA_PID]) 512 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 513 else if (tb[NETNSA_FD]) 514 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 515 else 516 return -EINVAL; 517 if (IS_ERR(peer)) 518 return PTR_ERR(peer); 519 520 if (__peernet2id(net, peer, false) >= 0) { 521 err = -EEXIST; 522 goto out; 523 } 524 525 err = alloc_netid(net, peer, nsid); 526 if (err > 0) 527 err = 0; 528 out: 529 put_net(peer); 530 return err; 531 } 532 533 static int rtnl_net_get_size(void) 534 { 535 return NLMSG_ALIGN(sizeof(struct rtgenmsg)) 536 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ 537 ; 538 } 539 540 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, 541 int cmd, struct net *net, struct net *peer, 542 int nsid) 543 { 544 struct nlmsghdr *nlh; 545 struct rtgenmsg *rth; 546 int id; 547 548 ASSERT_RTNL(); 549 550 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); 551 if (!nlh) 552 return -EMSGSIZE; 553 554 rth = nlmsg_data(nlh); 555 rth->rtgen_family = AF_UNSPEC; 556 557 if (nsid >= 0) 558 id = nsid; 559 else 560 id = __peernet2id(net, peer, false); 561 if (nla_put_s32(skb, NETNSA_NSID, id)) 562 goto nla_put_failure; 563 564 nlmsg_end(skb, nlh); 565 return 0; 566 567 nla_put_failure: 568 nlmsg_cancel(skb, nlh); 569 return -EMSGSIZE; 570 } 571 572 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh) 573 { 574 struct net *net = sock_net(skb->sk); 575 struct nlattr *tb[NETNSA_MAX + 1]; 576 struct sk_buff *msg; 577 struct net *peer; 578 int err; 579 580 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 581 rtnl_net_policy); 582 if (err < 0) 583 return err; 584 if (tb[NETNSA_PID]) 585 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 586 else if (tb[NETNSA_FD]) 587 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 588 else 589 return -EINVAL; 590 591 if (IS_ERR(peer)) 592 return PTR_ERR(peer); 593 594 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 595 if (!msg) { 596 err = -ENOMEM; 597 goto out; 598 } 599 600 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 601 RTM_GETNSID, net, peer, -1); 602 if (err < 0) 603 goto err_out; 604 605 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); 606 goto out; 607 608 err_out: 609 nlmsg_free(msg); 610 out: 611 put_net(peer); 612 return err; 613 } 614 615 struct rtnl_net_dump_cb { 616 struct net *net; 617 struct sk_buff *skb; 618 struct netlink_callback *cb; 619 int idx; 620 int s_idx; 621 }; 622 623 static int rtnl_net_dumpid_one(int id, void *peer, void *data) 624 { 625 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; 626 int ret; 627 628 if (net_cb->idx < net_cb->s_idx) 629 goto cont; 630 631 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, 632 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, 633 RTM_NEWNSID, net_cb->net, peer, id); 634 if (ret < 0) 635 return ret; 636 637 cont: 638 net_cb->idx++; 639 return 0; 640 } 641 642 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) 643 { 644 struct net *net = sock_net(skb->sk); 645 struct rtnl_net_dump_cb net_cb = { 646 .net = net, 647 .skb = skb, 648 .cb = cb, 649 .idx = 0, 650 .s_idx = cb->args[0], 651 }; 652 653 ASSERT_RTNL(); 654 655 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 656 657 cb->args[0] = net_cb.idx; 658 return skb->len; 659 } 660 661 static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd, 662 int id) 663 { 664 struct sk_buff *msg; 665 int err = -ENOMEM; 666 667 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 668 if (!msg) 669 goto out; 670 671 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, peer, id); 672 if (err < 0) 673 goto err_out; 674 675 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); 676 return; 677 678 err_out: 679 nlmsg_free(msg); 680 out: 681 rtnl_set_sk_err(net, RTNLGRP_NSID, err); 682 } 683 684 static int __init net_ns_init(void) 685 { 686 struct net_generic *ng; 687 688 #ifdef CONFIG_NET_NS 689 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 690 SMP_CACHE_BYTES, 691 SLAB_PANIC, NULL); 692 693 /* Create workqueue for cleanup */ 694 netns_wq = create_singlethread_workqueue("netns"); 695 if (!netns_wq) 696 panic("Could not create netns workq"); 697 #endif 698 699 ng = net_alloc_generic(); 700 if (!ng) 701 panic("Could not allocate generic netns"); 702 703 rcu_assign_pointer(init_net.gen, ng); 704 705 mutex_lock(&net_mutex); 706 if (setup_net(&init_net, &init_user_ns)) 707 panic("Could not setup the initial network namespace"); 708 709 rtnl_lock(); 710 list_add_tail_rcu(&init_net.list, &net_namespace_list); 711 rtnl_unlock(); 712 713 mutex_unlock(&net_mutex); 714 715 register_pernet_subsys(&net_ns_ops); 716 717 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL); 718 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, 719 NULL); 720 721 return 0; 722 } 723 724 pure_initcall(net_ns_init); 725 726 #ifdef CONFIG_NET_NS 727 static int __register_pernet_operations(struct list_head *list, 728 struct pernet_operations *ops) 729 { 730 struct net *net; 731 int error; 732 LIST_HEAD(net_exit_list); 733 734 list_add_tail(&ops->list, list); 735 if (ops->init || (ops->id && ops->size)) { 736 for_each_net(net) { 737 error = ops_init(ops, net); 738 if (error) 739 goto out_undo; 740 list_add_tail(&net->exit_list, &net_exit_list); 741 } 742 } 743 return 0; 744 745 out_undo: 746 /* If I have an error cleanup all namespaces I initialized */ 747 list_del(&ops->list); 748 ops_exit_list(ops, &net_exit_list); 749 ops_free_list(ops, &net_exit_list); 750 return error; 751 } 752 753 static void __unregister_pernet_operations(struct pernet_operations *ops) 754 { 755 struct net *net; 756 LIST_HEAD(net_exit_list); 757 758 list_del(&ops->list); 759 for_each_net(net) 760 list_add_tail(&net->exit_list, &net_exit_list); 761 ops_exit_list(ops, &net_exit_list); 762 ops_free_list(ops, &net_exit_list); 763 } 764 765 #else 766 767 static int __register_pernet_operations(struct list_head *list, 768 struct pernet_operations *ops) 769 { 770 return ops_init(ops, &init_net); 771 } 772 773 static void __unregister_pernet_operations(struct pernet_operations *ops) 774 { 775 LIST_HEAD(net_exit_list); 776 list_add(&init_net.exit_list, &net_exit_list); 777 ops_exit_list(ops, &net_exit_list); 778 ops_free_list(ops, &net_exit_list); 779 } 780 781 #endif /* CONFIG_NET_NS */ 782 783 static DEFINE_IDA(net_generic_ids); 784 785 static int register_pernet_operations(struct list_head *list, 786 struct pernet_operations *ops) 787 { 788 int error; 789 790 if (ops->id) { 791 again: 792 error = ida_get_new_above(&net_generic_ids, 1, ops->id); 793 if (error < 0) { 794 if (error == -EAGAIN) { 795 ida_pre_get(&net_generic_ids, GFP_KERNEL); 796 goto again; 797 } 798 return error; 799 } 800 max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id); 801 } 802 error = __register_pernet_operations(list, ops); 803 if (error) { 804 rcu_barrier(); 805 if (ops->id) 806 ida_remove(&net_generic_ids, *ops->id); 807 } 808 809 return error; 810 } 811 812 static void unregister_pernet_operations(struct pernet_operations *ops) 813 { 814 815 __unregister_pernet_operations(ops); 816 rcu_barrier(); 817 if (ops->id) 818 ida_remove(&net_generic_ids, *ops->id); 819 } 820 821 /** 822 * register_pernet_subsys - register a network namespace subsystem 823 * @ops: pernet operations structure for the subsystem 824 * 825 * Register a subsystem which has init and exit functions 826 * that are called when network namespaces are created and 827 * destroyed respectively. 828 * 829 * When registered all network namespace init functions are 830 * called for every existing network namespace. Allowing kernel 831 * modules to have a race free view of the set of network namespaces. 832 * 833 * When a new network namespace is created all of the init 834 * methods are called in the order in which they were registered. 835 * 836 * When a network namespace is destroyed all of the exit methods 837 * are called in the reverse of the order with which they were 838 * registered. 839 */ 840 int register_pernet_subsys(struct pernet_operations *ops) 841 { 842 int error; 843 mutex_lock(&net_mutex); 844 error = register_pernet_operations(first_device, ops); 845 mutex_unlock(&net_mutex); 846 return error; 847 } 848 EXPORT_SYMBOL_GPL(register_pernet_subsys); 849 850 /** 851 * unregister_pernet_subsys - unregister a network namespace subsystem 852 * @ops: pernet operations structure to manipulate 853 * 854 * Remove the pernet operations structure from the list to be 855 * used when network namespaces are created or destroyed. In 856 * addition run the exit method for all existing network 857 * namespaces. 858 */ 859 void unregister_pernet_subsys(struct pernet_operations *ops) 860 { 861 mutex_lock(&net_mutex); 862 unregister_pernet_operations(ops); 863 mutex_unlock(&net_mutex); 864 } 865 EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 866 867 /** 868 * register_pernet_device - register a network namespace device 869 * @ops: pernet operations structure for the subsystem 870 * 871 * Register a device which has init and exit functions 872 * that are called when network namespaces are created and 873 * destroyed respectively. 874 * 875 * When registered all network namespace init functions are 876 * called for every existing network namespace. Allowing kernel 877 * modules to have a race free view of the set of network namespaces. 878 * 879 * When a new network namespace is created all of the init 880 * methods are called in the order in which they were registered. 881 * 882 * When a network namespace is destroyed all of the exit methods 883 * are called in the reverse of the order with which they were 884 * registered. 885 */ 886 int register_pernet_device(struct pernet_operations *ops) 887 { 888 int error; 889 mutex_lock(&net_mutex); 890 error = register_pernet_operations(&pernet_list, ops); 891 if (!error && (first_device == &pernet_list)) 892 first_device = &ops->list; 893 mutex_unlock(&net_mutex); 894 return error; 895 } 896 EXPORT_SYMBOL_GPL(register_pernet_device); 897 898 /** 899 * unregister_pernet_device - unregister a network namespace netdevice 900 * @ops: pernet operations structure to manipulate 901 * 902 * Remove the pernet operations structure from the list to be 903 * used when network namespaces are created or destroyed. In 904 * addition run the exit method for all existing network 905 * namespaces. 906 */ 907 void unregister_pernet_device(struct pernet_operations *ops) 908 { 909 mutex_lock(&net_mutex); 910 if (&ops->list == first_device) 911 first_device = first_device->next; 912 unregister_pernet_operations(ops); 913 mutex_unlock(&net_mutex); 914 } 915 EXPORT_SYMBOL_GPL(unregister_pernet_device); 916 917 #ifdef CONFIG_NET_NS 918 static struct ns_common *netns_get(struct task_struct *task) 919 { 920 struct net *net = NULL; 921 struct nsproxy *nsproxy; 922 923 task_lock(task); 924 nsproxy = task->nsproxy; 925 if (nsproxy) 926 net = get_net(nsproxy->net_ns); 927 task_unlock(task); 928 929 return net ? &net->ns : NULL; 930 } 931 932 static inline struct net *to_net_ns(struct ns_common *ns) 933 { 934 return container_of(ns, struct net, ns); 935 } 936 937 static void netns_put(struct ns_common *ns) 938 { 939 put_net(to_net_ns(ns)); 940 } 941 942 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) 943 { 944 struct net *net = to_net_ns(ns); 945 946 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 947 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 948 return -EPERM; 949 950 put_net(nsproxy->net_ns); 951 nsproxy->net_ns = get_net(net); 952 return 0; 953 } 954 955 const struct proc_ns_operations netns_operations = { 956 .name = "net", 957 .type = CLONE_NEWNET, 958 .get = netns_get, 959 .put = netns_put, 960 .install = netns_install, 961 }; 962 #endif 963