1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/workqueue.h> 4 #include <linux/rtnetlink.h> 5 #include <linux/cache.h> 6 #include <linux/slab.h> 7 #include <linux/list.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/idr.h> 11 #include <linux/rculist.h> 12 #include <linux/nsproxy.h> 13 #include <linux/fs.h> 14 #include <linux/proc_ns.h> 15 #include <linux/file.h> 16 #include <linux/export.h> 17 #include <linux/user_namespace.h> 18 #include <linux/net_namespace.h> 19 #include <net/sock.h> 20 #include <net/netlink.h> 21 #include <net/net_namespace.h> 22 #include <net/netns/generic.h> 23 24 /* 25 * Our network namespace constructor/destructor lists 26 */ 27 28 static LIST_HEAD(pernet_list); 29 static struct list_head *first_device = &pernet_list; 30 DEFINE_MUTEX(net_mutex); 31 32 LIST_HEAD(net_namespace_list); 33 EXPORT_SYMBOL_GPL(net_namespace_list); 34 35 struct net init_net = { 36 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), 37 }; 38 EXPORT_SYMBOL(init_net); 39 40 static bool init_net_initialized; 41 42 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 43 44 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; 45 46 static struct net_generic *net_alloc_generic(void) 47 { 48 struct net_generic *ng; 49 size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); 50 51 ng = kzalloc(generic_size, GFP_KERNEL); 52 if (ng) 53 ng->len = max_gen_ptrs; 54 55 return ng; 56 } 57 58 static int net_assign_generic(struct net *net, int id, void *data) 59 { 60 struct net_generic *ng, *old_ng; 61 62 BUG_ON(!mutex_is_locked(&net_mutex)); 63 BUG_ON(id == 0); 64 65 old_ng = rcu_dereference_protected(net->gen, 66 lockdep_is_held(&net_mutex)); 67 ng = old_ng; 68 if (old_ng->len >= id) 69 goto assign; 70 71 ng = net_alloc_generic(); 72 if (ng == NULL) 73 return -ENOMEM; 74 75 /* 76 * Some synchronisation notes: 77 * 78 * The net_generic explores the net->gen array inside rcu 79 * read section. Besides once set the net->gen->ptr[x] 80 * pointer never changes (see rules in netns/generic.h). 81 * 82 * That said, we simply duplicate this array and schedule 83 * the old copy for kfree after a grace period. 84 */ 85 86 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); 87 88 rcu_assign_pointer(net->gen, ng); 89 kfree_rcu(old_ng, rcu); 90 assign: 91 ng->ptr[id - 1] = data; 92 return 0; 93 } 94 95 static int ops_init(const struct pernet_operations *ops, struct net *net) 96 { 97 int err = -ENOMEM; 98 void *data = NULL; 99 100 if (ops->id && ops->size) { 101 data = kzalloc(ops->size, GFP_KERNEL); 102 if (!data) 103 goto out; 104 105 err = net_assign_generic(net, *ops->id, data); 106 if (err) 107 goto cleanup; 108 } 109 err = 0; 110 if (ops->init) 111 err = ops->init(net); 112 if (!err) 113 return 0; 114 115 cleanup: 116 kfree(data); 117 118 out: 119 return err; 120 } 121 122 static void ops_free(const struct pernet_operations *ops, struct net *net) 123 { 124 if (ops->id && ops->size) { 125 int id = *ops->id; 126 kfree(net_generic(net, id)); 127 } 128 } 129 130 static void ops_exit_list(const struct pernet_operations *ops, 131 struct list_head *net_exit_list) 132 { 133 struct net *net; 134 if (ops->exit) { 135 list_for_each_entry(net, net_exit_list, exit_list) 136 ops->exit(net); 137 } 138 if (ops->exit_batch) 139 ops->exit_batch(net_exit_list); 140 } 141 142 static void ops_free_list(const struct pernet_operations *ops, 143 struct list_head *net_exit_list) 144 { 145 struct net *net; 146 if (ops->size && ops->id) { 147 list_for_each_entry(net, net_exit_list, exit_list) 148 ops_free(ops, net); 149 } 150 } 151 152 /* should be called with nsid_lock held */ 153 static int alloc_netid(struct net *net, struct net *peer, int reqid) 154 { 155 int min = 0, max = 0; 156 157 if (reqid >= 0) { 158 min = reqid; 159 max = reqid + 1; 160 } 161 162 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); 163 } 164 165 /* This function is used by idr_for_each(). If net is equal to peer, the 166 * function returns the id so that idr_for_each() stops. Because we cannot 167 * returns the id 0 (idr_for_each() will not stop), we return the magic value 168 * NET_ID_ZERO (-1) for it. 169 */ 170 #define NET_ID_ZERO -1 171 static int net_eq_idr(int id, void *net, void *peer) 172 { 173 if (net_eq(net, peer)) 174 return id ? : NET_ID_ZERO; 175 return 0; 176 } 177 178 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc 179 * is set to true, thus the caller knows that the new id must be notified via 180 * rtnl. 181 */ 182 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) 183 { 184 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); 185 bool alloc_it = *alloc; 186 187 *alloc = false; 188 189 /* Magic value for id 0. */ 190 if (id == NET_ID_ZERO) 191 return 0; 192 if (id > 0) 193 return id; 194 195 if (alloc_it) { 196 id = alloc_netid(net, peer, -1); 197 *alloc = true; 198 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 199 } 200 201 return NETNSA_NSID_NOT_ASSIGNED; 202 } 203 204 /* should be called with nsid_lock held */ 205 static int __peernet2id(struct net *net, struct net *peer) 206 { 207 bool no = false; 208 209 return __peernet2id_alloc(net, peer, &no); 210 } 211 212 static void rtnl_net_notifyid(struct net *net, int cmd, int id); 213 /* This function returns the id of a peer netns. If no id is assigned, one will 214 * be allocated and returned. 215 */ 216 int peernet2id_alloc(struct net *net, struct net *peer) 217 { 218 unsigned long flags; 219 bool alloc; 220 int id; 221 222 spin_lock_irqsave(&net->nsid_lock, flags); 223 alloc = atomic_read(&peer->count) == 0 ? false : true; 224 id = __peernet2id_alloc(net, peer, &alloc); 225 spin_unlock_irqrestore(&net->nsid_lock, flags); 226 if (alloc && id >= 0) 227 rtnl_net_notifyid(net, RTM_NEWNSID, id); 228 return id; 229 } 230 231 /* This function returns, if assigned, the id of a peer netns. */ 232 int peernet2id(struct net *net, struct net *peer) 233 { 234 unsigned long flags; 235 int id; 236 237 spin_lock_irqsave(&net->nsid_lock, flags); 238 id = __peernet2id(net, peer); 239 spin_unlock_irqrestore(&net->nsid_lock, flags); 240 return id; 241 } 242 EXPORT_SYMBOL(peernet2id); 243 244 /* This function returns true is the peer netns has an id assigned into the 245 * current netns. 246 */ 247 bool peernet_has_id(struct net *net, struct net *peer) 248 { 249 return peernet2id(net, peer) >= 0; 250 } 251 252 struct net *get_net_ns_by_id(struct net *net, int id) 253 { 254 unsigned long flags; 255 struct net *peer; 256 257 if (id < 0) 258 return NULL; 259 260 rcu_read_lock(); 261 spin_lock_irqsave(&net->nsid_lock, flags); 262 peer = idr_find(&net->netns_ids, id); 263 if (peer) 264 get_net(peer); 265 spin_unlock_irqrestore(&net->nsid_lock, flags); 266 rcu_read_unlock(); 267 268 return peer; 269 } 270 271 /* 272 * setup_net runs the initializers for the network namespace object. 273 */ 274 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) 275 { 276 /* Must be called with net_mutex held */ 277 const struct pernet_operations *ops, *saved_ops; 278 int error = 0; 279 LIST_HEAD(net_exit_list); 280 281 atomic_set(&net->count, 1); 282 atomic_set(&net->passive, 1); 283 net->dev_base_seq = 1; 284 net->user_ns = user_ns; 285 idr_init(&net->netns_ids); 286 spin_lock_init(&net->nsid_lock); 287 288 list_for_each_entry(ops, &pernet_list, list) { 289 error = ops_init(ops, net); 290 if (error < 0) 291 goto out_undo; 292 } 293 out: 294 return error; 295 296 out_undo: 297 /* Walk through the list backwards calling the exit functions 298 * for the pernet modules whose init functions did not fail. 299 */ 300 list_add(&net->exit_list, &net_exit_list); 301 saved_ops = ops; 302 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 303 ops_exit_list(ops, &net_exit_list); 304 305 ops = saved_ops; 306 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 307 ops_free_list(ops, &net_exit_list); 308 309 rcu_barrier(); 310 goto out; 311 } 312 313 314 #ifdef CONFIG_NET_NS 315 static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 316 { 317 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); 318 } 319 320 static void dec_net_namespaces(struct ucounts *ucounts) 321 { 322 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); 323 } 324 325 static struct kmem_cache *net_cachep; 326 static struct workqueue_struct *netns_wq; 327 328 static struct net *net_alloc(void) 329 { 330 struct net *net = NULL; 331 struct net_generic *ng; 332 333 ng = net_alloc_generic(); 334 if (!ng) 335 goto out; 336 337 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); 338 if (!net) 339 goto out_free; 340 341 rcu_assign_pointer(net->gen, ng); 342 out: 343 return net; 344 345 out_free: 346 kfree(ng); 347 goto out; 348 } 349 350 static void net_free(struct net *net) 351 { 352 kfree(rcu_access_pointer(net->gen)); 353 kmem_cache_free(net_cachep, net); 354 } 355 356 void net_drop_ns(void *p) 357 { 358 struct net *ns = p; 359 if (ns && atomic_dec_and_test(&ns->passive)) 360 net_free(ns); 361 } 362 363 struct net *copy_net_ns(unsigned long flags, 364 struct user_namespace *user_ns, struct net *old_net) 365 { 366 struct ucounts *ucounts; 367 struct net *net; 368 int rv; 369 370 if (!(flags & CLONE_NEWNET)) 371 return get_net(old_net); 372 373 ucounts = inc_net_namespaces(user_ns); 374 if (!ucounts) 375 return ERR_PTR(-ENOSPC); 376 377 net = net_alloc(); 378 if (!net) { 379 dec_net_namespaces(ucounts); 380 return ERR_PTR(-ENOMEM); 381 } 382 383 get_user_ns(user_ns); 384 385 mutex_lock(&net_mutex); 386 net->ucounts = ucounts; 387 rv = setup_net(net, user_ns); 388 if (rv == 0) { 389 rtnl_lock(); 390 list_add_tail_rcu(&net->list, &net_namespace_list); 391 rtnl_unlock(); 392 } 393 mutex_unlock(&net_mutex); 394 if (rv < 0) { 395 dec_net_namespaces(ucounts); 396 put_user_ns(user_ns); 397 net_drop_ns(net); 398 return ERR_PTR(rv); 399 } 400 return net; 401 } 402 403 static DEFINE_SPINLOCK(cleanup_list_lock); 404 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 405 406 static void cleanup_net(struct work_struct *work) 407 { 408 const struct pernet_operations *ops; 409 struct net *net, *tmp; 410 struct list_head net_kill_list; 411 LIST_HEAD(net_exit_list); 412 413 /* Atomically snapshot the list of namespaces to cleanup */ 414 spin_lock_irq(&cleanup_list_lock); 415 list_replace_init(&cleanup_list, &net_kill_list); 416 spin_unlock_irq(&cleanup_list_lock); 417 418 mutex_lock(&net_mutex); 419 420 /* Don't let anyone else find us. */ 421 rtnl_lock(); 422 list_for_each_entry(net, &net_kill_list, cleanup_list) { 423 list_del_rcu(&net->list); 424 list_add_tail(&net->exit_list, &net_exit_list); 425 for_each_net(tmp) { 426 int id; 427 428 spin_lock_irq(&tmp->nsid_lock); 429 id = __peernet2id(tmp, net); 430 if (id >= 0) 431 idr_remove(&tmp->netns_ids, id); 432 spin_unlock_irq(&tmp->nsid_lock); 433 if (id >= 0) 434 rtnl_net_notifyid(tmp, RTM_DELNSID, id); 435 } 436 spin_lock_irq(&net->nsid_lock); 437 idr_destroy(&net->netns_ids); 438 spin_unlock_irq(&net->nsid_lock); 439 440 } 441 rtnl_unlock(); 442 443 /* 444 * Another CPU might be rcu-iterating the list, wait for it. 445 * This needs to be before calling the exit() notifiers, so 446 * the rcu_barrier() below isn't sufficient alone. 447 */ 448 synchronize_rcu(); 449 450 /* Run all of the network namespace exit methods */ 451 list_for_each_entry_reverse(ops, &pernet_list, list) 452 ops_exit_list(ops, &net_exit_list); 453 454 /* Free the net generic variables */ 455 list_for_each_entry_reverse(ops, &pernet_list, list) 456 ops_free_list(ops, &net_exit_list); 457 458 mutex_unlock(&net_mutex); 459 460 /* Ensure there are no outstanding rcu callbacks using this 461 * network namespace. 462 */ 463 rcu_barrier(); 464 465 /* Finally it is safe to free my network namespace structure */ 466 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 467 list_del_init(&net->exit_list); 468 dec_net_namespaces(net->ucounts); 469 put_user_ns(net->user_ns); 470 net_drop_ns(net); 471 } 472 } 473 static DECLARE_WORK(net_cleanup_work, cleanup_net); 474 475 void __put_net(struct net *net) 476 { 477 /* Cleanup the network namespace in process context */ 478 unsigned long flags; 479 480 spin_lock_irqsave(&cleanup_list_lock, flags); 481 list_add(&net->cleanup_list, &cleanup_list); 482 spin_unlock_irqrestore(&cleanup_list_lock, flags); 483 484 queue_work(netns_wq, &net_cleanup_work); 485 } 486 EXPORT_SYMBOL_GPL(__put_net); 487 488 struct net *get_net_ns_by_fd(int fd) 489 { 490 struct file *file; 491 struct ns_common *ns; 492 struct net *net; 493 494 file = proc_ns_fget(fd); 495 if (IS_ERR(file)) 496 return ERR_CAST(file); 497 498 ns = get_proc_ns(file_inode(file)); 499 if (ns->ops == &netns_operations) 500 net = get_net(container_of(ns, struct net, ns)); 501 else 502 net = ERR_PTR(-EINVAL); 503 504 fput(file); 505 return net; 506 } 507 508 #else 509 struct net *get_net_ns_by_fd(int fd) 510 { 511 return ERR_PTR(-EINVAL); 512 } 513 #endif 514 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 515 516 struct net *get_net_ns_by_pid(pid_t pid) 517 { 518 struct task_struct *tsk; 519 struct net *net; 520 521 /* Lookup the network namespace */ 522 net = ERR_PTR(-ESRCH); 523 rcu_read_lock(); 524 tsk = find_task_by_vpid(pid); 525 if (tsk) { 526 struct nsproxy *nsproxy; 527 task_lock(tsk); 528 nsproxy = tsk->nsproxy; 529 if (nsproxy) 530 net = get_net(nsproxy->net_ns); 531 task_unlock(tsk); 532 } 533 rcu_read_unlock(); 534 return net; 535 } 536 EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 537 538 static __net_init int net_ns_net_init(struct net *net) 539 { 540 #ifdef CONFIG_NET_NS 541 net->ns.ops = &netns_operations; 542 #endif 543 return ns_alloc_inum(&net->ns); 544 } 545 546 static __net_exit void net_ns_net_exit(struct net *net) 547 { 548 ns_free_inum(&net->ns); 549 } 550 551 static struct pernet_operations __net_initdata net_ns_ops = { 552 .init = net_ns_net_init, 553 .exit = net_ns_net_exit, 554 }; 555 556 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { 557 [NETNSA_NONE] = { .type = NLA_UNSPEC }, 558 [NETNSA_NSID] = { .type = NLA_S32 }, 559 [NETNSA_PID] = { .type = NLA_U32 }, 560 [NETNSA_FD] = { .type = NLA_U32 }, 561 }; 562 563 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh) 564 { 565 struct net *net = sock_net(skb->sk); 566 struct nlattr *tb[NETNSA_MAX + 1]; 567 unsigned long flags; 568 struct net *peer; 569 int nsid, err; 570 571 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 572 rtnl_net_policy); 573 if (err < 0) 574 return err; 575 if (!tb[NETNSA_NSID]) 576 return -EINVAL; 577 nsid = nla_get_s32(tb[NETNSA_NSID]); 578 579 if (tb[NETNSA_PID]) 580 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 581 else if (tb[NETNSA_FD]) 582 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 583 else 584 return -EINVAL; 585 if (IS_ERR(peer)) 586 return PTR_ERR(peer); 587 588 spin_lock_irqsave(&net->nsid_lock, flags); 589 if (__peernet2id(net, peer) >= 0) { 590 spin_unlock_irqrestore(&net->nsid_lock, flags); 591 err = -EEXIST; 592 goto out; 593 } 594 595 err = alloc_netid(net, peer, nsid); 596 spin_unlock_irqrestore(&net->nsid_lock, flags); 597 if (err >= 0) { 598 rtnl_net_notifyid(net, RTM_NEWNSID, err); 599 err = 0; 600 } 601 out: 602 put_net(peer); 603 return err; 604 } 605 606 static int rtnl_net_get_size(void) 607 { 608 return NLMSG_ALIGN(sizeof(struct rtgenmsg)) 609 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ 610 ; 611 } 612 613 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, 614 int cmd, struct net *net, int nsid) 615 { 616 struct nlmsghdr *nlh; 617 struct rtgenmsg *rth; 618 619 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); 620 if (!nlh) 621 return -EMSGSIZE; 622 623 rth = nlmsg_data(nlh); 624 rth->rtgen_family = AF_UNSPEC; 625 626 if (nla_put_s32(skb, NETNSA_NSID, nsid)) 627 goto nla_put_failure; 628 629 nlmsg_end(skb, nlh); 630 return 0; 631 632 nla_put_failure: 633 nlmsg_cancel(skb, nlh); 634 return -EMSGSIZE; 635 } 636 637 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh) 638 { 639 struct net *net = sock_net(skb->sk); 640 struct nlattr *tb[NETNSA_MAX + 1]; 641 struct sk_buff *msg; 642 struct net *peer; 643 int err, id; 644 645 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 646 rtnl_net_policy); 647 if (err < 0) 648 return err; 649 if (tb[NETNSA_PID]) 650 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 651 else if (tb[NETNSA_FD]) 652 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 653 else 654 return -EINVAL; 655 656 if (IS_ERR(peer)) 657 return PTR_ERR(peer); 658 659 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 660 if (!msg) { 661 err = -ENOMEM; 662 goto out; 663 } 664 665 id = peernet2id(net, peer); 666 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 667 RTM_NEWNSID, net, id); 668 if (err < 0) 669 goto err_out; 670 671 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); 672 goto out; 673 674 err_out: 675 nlmsg_free(msg); 676 out: 677 put_net(peer); 678 return err; 679 } 680 681 struct rtnl_net_dump_cb { 682 struct net *net; 683 struct sk_buff *skb; 684 struct netlink_callback *cb; 685 int idx; 686 int s_idx; 687 }; 688 689 static int rtnl_net_dumpid_one(int id, void *peer, void *data) 690 { 691 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; 692 int ret; 693 694 if (net_cb->idx < net_cb->s_idx) 695 goto cont; 696 697 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, 698 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, 699 RTM_NEWNSID, net_cb->net, id); 700 if (ret < 0) 701 return ret; 702 703 cont: 704 net_cb->idx++; 705 return 0; 706 } 707 708 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) 709 { 710 struct net *net = sock_net(skb->sk); 711 struct rtnl_net_dump_cb net_cb = { 712 .net = net, 713 .skb = skb, 714 .cb = cb, 715 .idx = 0, 716 .s_idx = cb->args[0], 717 }; 718 unsigned long flags; 719 720 spin_lock_irqsave(&net->nsid_lock, flags); 721 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 722 spin_unlock_irqrestore(&net->nsid_lock, flags); 723 724 cb->args[0] = net_cb.idx; 725 return skb->len; 726 } 727 728 static void rtnl_net_notifyid(struct net *net, int cmd, int id) 729 { 730 struct sk_buff *msg; 731 int err = -ENOMEM; 732 733 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 734 if (!msg) 735 goto out; 736 737 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); 738 if (err < 0) 739 goto err_out; 740 741 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); 742 return; 743 744 err_out: 745 nlmsg_free(msg); 746 out: 747 rtnl_set_sk_err(net, RTNLGRP_NSID, err); 748 } 749 750 static int __init net_ns_init(void) 751 { 752 struct net_generic *ng; 753 754 #ifdef CONFIG_NET_NS 755 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 756 SMP_CACHE_BYTES, 757 SLAB_PANIC, NULL); 758 759 /* Create workqueue for cleanup */ 760 netns_wq = create_singlethread_workqueue("netns"); 761 if (!netns_wq) 762 panic("Could not create netns workq"); 763 #endif 764 765 ng = net_alloc_generic(); 766 if (!ng) 767 panic("Could not allocate generic netns"); 768 769 rcu_assign_pointer(init_net.gen, ng); 770 771 mutex_lock(&net_mutex); 772 if (setup_net(&init_net, &init_user_ns)) 773 panic("Could not setup the initial network namespace"); 774 775 init_net_initialized = true; 776 777 rtnl_lock(); 778 list_add_tail_rcu(&init_net.list, &net_namespace_list); 779 rtnl_unlock(); 780 781 mutex_unlock(&net_mutex); 782 783 register_pernet_subsys(&net_ns_ops); 784 785 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL); 786 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, 787 NULL); 788 789 return 0; 790 } 791 792 pure_initcall(net_ns_init); 793 794 #ifdef CONFIG_NET_NS 795 static int __register_pernet_operations(struct list_head *list, 796 struct pernet_operations *ops) 797 { 798 struct net *net; 799 int error; 800 LIST_HEAD(net_exit_list); 801 802 list_add_tail(&ops->list, list); 803 if (ops->init || (ops->id && ops->size)) { 804 for_each_net(net) { 805 error = ops_init(ops, net); 806 if (error) 807 goto out_undo; 808 list_add_tail(&net->exit_list, &net_exit_list); 809 } 810 } 811 return 0; 812 813 out_undo: 814 /* If I have an error cleanup all namespaces I initialized */ 815 list_del(&ops->list); 816 ops_exit_list(ops, &net_exit_list); 817 ops_free_list(ops, &net_exit_list); 818 return error; 819 } 820 821 static void __unregister_pernet_operations(struct pernet_operations *ops) 822 { 823 struct net *net; 824 LIST_HEAD(net_exit_list); 825 826 list_del(&ops->list); 827 for_each_net(net) 828 list_add_tail(&net->exit_list, &net_exit_list); 829 ops_exit_list(ops, &net_exit_list); 830 ops_free_list(ops, &net_exit_list); 831 } 832 833 #else 834 835 static int __register_pernet_operations(struct list_head *list, 836 struct pernet_operations *ops) 837 { 838 if (!init_net_initialized) { 839 list_add_tail(&ops->list, list); 840 return 0; 841 } 842 843 return ops_init(ops, &init_net); 844 } 845 846 static void __unregister_pernet_operations(struct pernet_operations *ops) 847 { 848 if (!init_net_initialized) { 849 list_del(&ops->list); 850 } else { 851 LIST_HEAD(net_exit_list); 852 list_add(&init_net.exit_list, &net_exit_list); 853 ops_exit_list(ops, &net_exit_list); 854 ops_free_list(ops, &net_exit_list); 855 } 856 } 857 858 #endif /* CONFIG_NET_NS */ 859 860 static DEFINE_IDA(net_generic_ids); 861 862 static int register_pernet_operations(struct list_head *list, 863 struct pernet_operations *ops) 864 { 865 int error; 866 867 if (ops->id) { 868 again: 869 error = ida_get_new_above(&net_generic_ids, 1, ops->id); 870 if (error < 0) { 871 if (error == -EAGAIN) { 872 ida_pre_get(&net_generic_ids, GFP_KERNEL); 873 goto again; 874 } 875 return error; 876 } 877 max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id); 878 } 879 error = __register_pernet_operations(list, ops); 880 if (error) { 881 rcu_barrier(); 882 if (ops->id) 883 ida_remove(&net_generic_ids, *ops->id); 884 } 885 886 return error; 887 } 888 889 static void unregister_pernet_operations(struct pernet_operations *ops) 890 { 891 892 __unregister_pernet_operations(ops); 893 rcu_barrier(); 894 if (ops->id) 895 ida_remove(&net_generic_ids, *ops->id); 896 } 897 898 /** 899 * register_pernet_subsys - register a network namespace subsystem 900 * @ops: pernet operations structure for the subsystem 901 * 902 * Register a subsystem which has init and exit functions 903 * that are called when network namespaces are created and 904 * destroyed respectively. 905 * 906 * When registered all network namespace init functions are 907 * called for every existing network namespace. Allowing kernel 908 * modules to have a race free view of the set of network namespaces. 909 * 910 * When a new network namespace is created all of the init 911 * methods are called in the order in which they were registered. 912 * 913 * When a network namespace is destroyed all of the exit methods 914 * are called in the reverse of the order with which they were 915 * registered. 916 */ 917 int register_pernet_subsys(struct pernet_operations *ops) 918 { 919 int error; 920 mutex_lock(&net_mutex); 921 error = register_pernet_operations(first_device, ops); 922 mutex_unlock(&net_mutex); 923 return error; 924 } 925 EXPORT_SYMBOL_GPL(register_pernet_subsys); 926 927 /** 928 * unregister_pernet_subsys - unregister a network namespace subsystem 929 * @ops: pernet operations structure to manipulate 930 * 931 * Remove the pernet operations structure from the list to be 932 * used when network namespaces are created or destroyed. In 933 * addition run the exit method for all existing network 934 * namespaces. 935 */ 936 void unregister_pernet_subsys(struct pernet_operations *ops) 937 { 938 mutex_lock(&net_mutex); 939 unregister_pernet_operations(ops); 940 mutex_unlock(&net_mutex); 941 } 942 EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 943 944 /** 945 * register_pernet_device - register a network namespace device 946 * @ops: pernet operations structure for the subsystem 947 * 948 * Register a device which has init and exit functions 949 * that are called when network namespaces are created and 950 * destroyed respectively. 951 * 952 * When registered all network namespace init functions are 953 * called for every existing network namespace. Allowing kernel 954 * modules to have a race free view of the set of network namespaces. 955 * 956 * When a new network namespace is created all of the init 957 * methods are called in the order in which they were registered. 958 * 959 * When a network namespace is destroyed all of the exit methods 960 * are called in the reverse of the order with which they were 961 * registered. 962 */ 963 int register_pernet_device(struct pernet_operations *ops) 964 { 965 int error; 966 mutex_lock(&net_mutex); 967 error = register_pernet_operations(&pernet_list, ops); 968 if (!error && (first_device == &pernet_list)) 969 first_device = &ops->list; 970 mutex_unlock(&net_mutex); 971 return error; 972 } 973 EXPORT_SYMBOL_GPL(register_pernet_device); 974 975 /** 976 * unregister_pernet_device - unregister a network namespace netdevice 977 * @ops: pernet operations structure to manipulate 978 * 979 * Remove the pernet operations structure from the list to be 980 * used when network namespaces are created or destroyed. In 981 * addition run the exit method for all existing network 982 * namespaces. 983 */ 984 void unregister_pernet_device(struct pernet_operations *ops) 985 { 986 mutex_lock(&net_mutex); 987 if (&ops->list == first_device) 988 first_device = first_device->next; 989 unregister_pernet_operations(ops); 990 mutex_unlock(&net_mutex); 991 } 992 EXPORT_SYMBOL_GPL(unregister_pernet_device); 993 994 #ifdef CONFIG_NET_NS 995 static struct ns_common *netns_get(struct task_struct *task) 996 { 997 struct net *net = NULL; 998 struct nsproxy *nsproxy; 999 1000 task_lock(task); 1001 nsproxy = task->nsproxy; 1002 if (nsproxy) 1003 net = get_net(nsproxy->net_ns); 1004 task_unlock(task); 1005 1006 return net ? &net->ns : NULL; 1007 } 1008 1009 static inline struct net *to_net_ns(struct ns_common *ns) 1010 { 1011 return container_of(ns, struct net, ns); 1012 } 1013 1014 static void netns_put(struct ns_common *ns) 1015 { 1016 put_net(to_net_ns(ns)); 1017 } 1018 1019 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) 1020 { 1021 struct net *net = to_net_ns(ns); 1022 1023 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 1024 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 1025 return -EPERM; 1026 1027 put_net(nsproxy->net_ns); 1028 nsproxy->net_ns = get_net(net); 1029 return 0; 1030 } 1031 1032 static struct user_namespace *netns_owner(struct ns_common *ns) 1033 { 1034 return to_net_ns(ns)->user_ns; 1035 } 1036 1037 const struct proc_ns_operations netns_operations = { 1038 .name = "net", 1039 .type = CLONE_NEWNET, 1040 .get = netns_get, 1041 .put = netns_put, 1042 .install = netns_install, 1043 .owner = netns_owner, 1044 }; 1045 #endif 1046