1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/workqueue.h> 4 #include <linux/rtnetlink.h> 5 #include <linux/cache.h> 6 #include <linux/slab.h> 7 #include <linux/list.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/idr.h> 11 #include <linux/rculist.h> 12 #include <linux/nsproxy.h> 13 #include <linux/fs.h> 14 #include <linux/proc_ns.h> 15 #include <linux/file.h> 16 #include <linux/export.h> 17 #include <linux/user_namespace.h> 18 #include <linux/net_namespace.h> 19 #include <net/sock.h> 20 #include <net/netlink.h> 21 #include <net/net_namespace.h> 22 #include <net/netns/generic.h> 23 24 /* 25 * Our network namespace constructor/destructor lists 26 */ 27 28 static LIST_HEAD(pernet_list); 29 static struct list_head *first_device = &pernet_list; 30 DEFINE_MUTEX(net_mutex); 31 32 LIST_HEAD(net_namespace_list); 33 EXPORT_SYMBOL_GPL(net_namespace_list); 34 35 struct net init_net = { 36 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), 37 }; 38 EXPORT_SYMBOL(init_net); 39 40 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 41 42 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; 43 44 static struct net_generic *net_alloc_generic(void) 45 { 46 struct net_generic *ng; 47 size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); 48 49 ng = kzalloc(generic_size, GFP_KERNEL); 50 if (ng) 51 ng->len = max_gen_ptrs; 52 53 return ng; 54 } 55 56 static int net_assign_generic(struct net *net, int id, void *data) 57 { 58 struct net_generic *ng, *old_ng; 59 60 BUG_ON(!mutex_is_locked(&net_mutex)); 61 BUG_ON(id == 0); 62 63 old_ng = rcu_dereference_protected(net->gen, 64 lockdep_is_held(&net_mutex)); 65 ng = old_ng; 66 if (old_ng->len >= id) 67 goto assign; 68 69 ng = net_alloc_generic(); 70 if (ng == NULL) 71 return -ENOMEM; 72 73 /* 74 * Some synchronisation notes: 75 * 76 * The net_generic explores the net->gen array inside rcu 77 * read section. Besides once set the net->gen->ptr[x] 78 * pointer never changes (see rules in netns/generic.h). 79 * 80 * That said, we simply duplicate this array and schedule 81 * the old copy for kfree after a grace period. 82 */ 83 84 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); 85 86 rcu_assign_pointer(net->gen, ng); 87 kfree_rcu(old_ng, rcu); 88 assign: 89 ng->ptr[id - 1] = data; 90 return 0; 91 } 92 93 static int ops_init(const struct pernet_operations *ops, struct net *net) 94 { 95 int err = -ENOMEM; 96 void *data = NULL; 97 98 if (ops->id && ops->size) { 99 data = kzalloc(ops->size, GFP_KERNEL); 100 if (!data) 101 goto out; 102 103 err = net_assign_generic(net, *ops->id, data); 104 if (err) 105 goto cleanup; 106 } 107 err = 0; 108 if (ops->init) 109 err = ops->init(net); 110 if (!err) 111 return 0; 112 113 cleanup: 114 kfree(data); 115 116 out: 117 return err; 118 } 119 120 static void ops_free(const struct pernet_operations *ops, struct net *net) 121 { 122 if (ops->id && ops->size) { 123 int id = *ops->id; 124 kfree(net_generic(net, id)); 125 } 126 } 127 128 static void ops_exit_list(const struct pernet_operations *ops, 129 struct list_head *net_exit_list) 130 { 131 struct net *net; 132 if (ops->exit) { 133 list_for_each_entry(net, net_exit_list, exit_list) 134 ops->exit(net); 135 } 136 if (ops->exit_batch) 137 ops->exit_batch(net_exit_list); 138 } 139 140 static void ops_free_list(const struct pernet_operations *ops, 141 struct list_head *net_exit_list) 142 { 143 struct net *net; 144 if (ops->size && ops->id) { 145 list_for_each_entry(net, net_exit_list, exit_list) 146 ops_free(ops, net); 147 } 148 } 149 150 /* should be called with nsid_lock held */ 151 static int alloc_netid(struct net *net, struct net *peer, int reqid) 152 { 153 int min = 0, max = 0; 154 155 if (reqid >= 0) { 156 min = reqid; 157 max = reqid + 1; 158 } 159 160 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); 161 } 162 163 /* This function is used by idr_for_each(). If net is equal to peer, the 164 * function returns the id so that idr_for_each() stops. Because we cannot 165 * returns the id 0 (idr_for_each() will not stop), we return the magic value 166 * NET_ID_ZERO (-1) for it. 167 */ 168 #define NET_ID_ZERO -1 169 static int net_eq_idr(int id, void *net, void *peer) 170 { 171 if (net_eq(net, peer)) 172 return id ? : NET_ID_ZERO; 173 return 0; 174 } 175 176 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc 177 * is set to true, thus the caller knows that the new id must be notified via 178 * rtnl. 179 */ 180 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) 181 { 182 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); 183 bool alloc_it = *alloc; 184 185 *alloc = false; 186 187 /* Magic value for id 0. */ 188 if (id == NET_ID_ZERO) 189 return 0; 190 if (id > 0) 191 return id; 192 193 if (alloc_it) { 194 id = alloc_netid(net, peer, -1); 195 *alloc = true; 196 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 197 } 198 199 return NETNSA_NSID_NOT_ASSIGNED; 200 } 201 202 /* should be called with nsid_lock held */ 203 static int __peernet2id(struct net *net, struct net *peer) 204 { 205 bool no = false; 206 207 return __peernet2id_alloc(net, peer, &no); 208 } 209 210 static void rtnl_net_notifyid(struct net *net, int cmd, int id); 211 /* This function returns the id of a peer netns. If no id is assigned, one will 212 * be allocated and returned. 213 */ 214 int peernet2id_alloc(struct net *net, struct net *peer) 215 { 216 unsigned long flags; 217 bool alloc; 218 int id; 219 220 spin_lock_irqsave(&net->nsid_lock, flags); 221 alloc = atomic_read(&peer->count) == 0 ? false : true; 222 id = __peernet2id_alloc(net, peer, &alloc); 223 spin_unlock_irqrestore(&net->nsid_lock, flags); 224 if (alloc && id >= 0) 225 rtnl_net_notifyid(net, RTM_NEWNSID, id); 226 return id; 227 } 228 EXPORT_SYMBOL(peernet2id_alloc); 229 230 /* This function returns, if assigned, the id of a peer netns. */ 231 int peernet2id(struct net *net, struct net *peer) 232 { 233 unsigned long flags; 234 int id; 235 236 spin_lock_irqsave(&net->nsid_lock, flags); 237 id = __peernet2id(net, peer); 238 spin_unlock_irqrestore(&net->nsid_lock, flags); 239 return id; 240 } 241 242 /* This function returns true is the peer netns has an id assigned into the 243 * current netns. 244 */ 245 bool peernet_has_id(struct net *net, struct net *peer) 246 { 247 return peernet2id(net, peer) >= 0; 248 } 249 250 struct net *get_net_ns_by_id(struct net *net, int id) 251 { 252 unsigned long flags; 253 struct net *peer; 254 255 if (id < 0) 256 return NULL; 257 258 rcu_read_lock(); 259 spin_lock_irqsave(&net->nsid_lock, flags); 260 peer = idr_find(&net->netns_ids, id); 261 if (peer) 262 get_net(peer); 263 spin_unlock_irqrestore(&net->nsid_lock, flags); 264 rcu_read_unlock(); 265 266 return peer; 267 } 268 269 static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 270 { 271 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); 272 } 273 274 static void dec_net_namespaces(struct ucounts *ucounts) 275 { 276 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); 277 } 278 279 /* 280 * setup_net runs the initializers for the network namespace object. 281 */ 282 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) 283 { 284 /* Must be called with net_mutex held */ 285 const struct pernet_operations *ops, *saved_ops; 286 int error = 0; 287 LIST_HEAD(net_exit_list); 288 289 atomic_set(&net->count, 1); 290 atomic_set(&net->passive, 1); 291 net->dev_base_seq = 1; 292 net->user_ns = user_ns; 293 idr_init(&net->netns_ids); 294 spin_lock_init(&net->nsid_lock); 295 296 list_for_each_entry(ops, &pernet_list, list) { 297 error = ops_init(ops, net); 298 if (error < 0) 299 goto out_undo; 300 } 301 out: 302 return error; 303 304 out_undo: 305 /* Walk through the list backwards calling the exit functions 306 * for the pernet modules whose init functions did not fail. 307 */ 308 list_add(&net->exit_list, &net_exit_list); 309 saved_ops = ops; 310 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 311 ops_exit_list(ops, &net_exit_list); 312 313 ops = saved_ops; 314 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 315 ops_free_list(ops, &net_exit_list); 316 317 rcu_barrier(); 318 goto out; 319 } 320 321 322 #ifdef CONFIG_NET_NS 323 static struct kmem_cache *net_cachep; 324 static struct workqueue_struct *netns_wq; 325 326 static struct net *net_alloc(void) 327 { 328 struct net *net = NULL; 329 struct net_generic *ng; 330 331 ng = net_alloc_generic(); 332 if (!ng) 333 goto out; 334 335 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); 336 if (!net) 337 goto out_free; 338 339 rcu_assign_pointer(net->gen, ng); 340 out: 341 return net; 342 343 out_free: 344 kfree(ng); 345 goto out; 346 } 347 348 static void net_free(struct net *net) 349 { 350 kfree(rcu_access_pointer(net->gen)); 351 kmem_cache_free(net_cachep, net); 352 } 353 354 void net_drop_ns(void *p) 355 { 356 struct net *ns = p; 357 if (ns && atomic_dec_and_test(&ns->passive)) 358 net_free(ns); 359 } 360 361 struct net *copy_net_ns(unsigned long flags, 362 struct user_namespace *user_ns, struct net *old_net) 363 { 364 struct ucounts *ucounts; 365 struct net *net; 366 int rv; 367 368 if (!(flags & CLONE_NEWNET)) 369 return get_net(old_net); 370 371 ucounts = inc_net_namespaces(user_ns); 372 if (!ucounts) 373 return ERR_PTR(-ENFILE); 374 375 net = net_alloc(); 376 if (!net) { 377 dec_net_namespaces(ucounts); 378 return ERR_PTR(-ENOMEM); 379 } 380 381 get_user_ns(user_ns); 382 383 mutex_lock(&net_mutex); 384 net->ucounts = ucounts; 385 rv = setup_net(net, user_ns); 386 if (rv == 0) { 387 rtnl_lock(); 388 list_add_tail_rcu(&net->list, &net_namespace_list); 389 rtnl_unlock(); 390 } 391 mutex_unlock(&net_mutex); 392 if (rv < 0) { 393 dec_net_namespaces(ucounts); 394 put_user_ns(user_ns); 395 net_drop_ns(net); 396 return ERR_PTR(rv); 397 } 398 return net; 399 } 400 401 static DEFINE_SPINLOCK(cleanup_list_lock); 402 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 403 404 static void cleanup_net(struct work_struct *work) 405 { 406 const struct pernet_operations *ops; 407 struct net *net, *tmp; 408 struct list_head net_kill_list; 409 LIST_HEAD(net_exit_list); 410 411 /* Atomically snapshot the list of namespaces to cleanup */ 412 spin_lock_irq(&cleanup_list_lock); 413 list_replace_init(&cleanup_list, &net_kill_list); 414 spin_unlock_irq(&cleanup_list_lock); 415 416 mutex_lock(&net_mutex); 417 418 /* Don't let anyone else find us. */ 419 rtnl_lock(); 420 list_for_each_entry(net, &net_kill_list, cleanup_list) { 421 list_del_rcu(&net->list); 422 list_add_tail(&net->exit_list, &net_exit_list); 423 for_each_net(tmp) { 424 int id; 425 426 spin_lock_irq(&tmp->nsid_lock); 427 id = __peernet2id(tmp, net); 428 if (id >= 0) 429 idr_remove(&tmp->netns_ids, id); 430 spin_unlock_irq(&tmp->nsid_lock); 431 if (id >= 0) 432 rtnl_net_notifyid(tmp, RTM_DELNSID, id); 433 } 434 spin_lock_irq(&net->nsid_lock); 435 idr_destroy(&net->netns_ids); 436 spin_unlock_irq(&net->nsid_lock); 437 438 } 439 rtnl_unlock(); 440 441 /* 442 * Another CPU might be rcu-iterating the list, wait for it. 443 * This needs to be before calling the exit() notifiers, so 444 * the rcu_barrier() below isn't sufficient alone. 445 */ 446 synchronize_rcu(); 447 448 /* Run all of the network namespace exit methods */ 449 list_for_each_entry_reverse(ops, &pernet_list, list) 450 ops_exit_list(ops, &net_exit_list); 451 452 /* Free the net generic variables */ 453 list_for_each_entry_reverse(ops, &pernet_list, list) 454 ops_free_list(ops, &net_exit_list); 455 456 mutex_unlock(&net_mutex); 457 458 /* Ensure there are no outstanding rcu callbacks using this 459 * network namespace. 460 */ 461 rcu_barrier(); 462 463 /* Finally it is safe to free my network namespace structure */ 464 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 465 list_del_init(&net->exit_list); 466 dec_net_namespaces(net->ucounts); 467 put_user_ns(net->user_ns); 468 net_drop_ns(net); 469 } 470 } 471 static DECLARE_WORK(net_cleanup_work, cleanup_net); 472 473 void __put_net(struct net *net) 474 { 475 /* Cleanup the network namespace in process context */ 476 unsigned long flags; 477 478 spin_lock_irqsave(&cleanup_list_lock, flags); 479 list_add(&net->cleanup_list, &cleanup_list); 480 spin_unlock_irqrestore(&cleanup_list_lock, flags); 481 482 queue_work(netns_wq, &net_cleanup_work); 483 } 484 EXPORT_SYMBOL_GPL(__put_net); 485 486 struct net *get_net_ns_by_fd(int fd) 487 { 488 struct file *file; 489 struct ns_common *ns; 490 struct net *net; 491 492 file = proc_ns_fget(fd); 493 if (IS_ERR(file)) 494 return ERR_CAST(file); 495 496 ns = get_proc_ns(file_inode(file)); 497 if (ns->ops == &netns_operations) 498 net = get_net(container_of(ns, struct net, ns)); 499 else 500 net = ERR_PTR(-EINVAL); 501 502 fput(file); 503 return net; 504 } 505 506 #else 507 struct net *get_net_ns_by_fd(int fd) 508 { 509 return ERR_PTR(-EINVAL); 510 } 511 #endif 512 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 513 514 struct net *get_net_ns_by_pid(pid_t pid) 515 { 516 struct task_struct *tsk; 517 struct net *net; 518 519 /* Lookup the network namespace */ 520 net = ERR_PTR(-ESRCH); 521 rcu_read_lock(); 522 tsk = find_task_by_vpid(pid); 523 if (tsk) { 524 struct nsproxy *nsproxy; 525 task_lock(tsk); 526 nsproxy = tsk->nsproxy; 527 if (nsproxy) 528 net = get_net(nsproxy->net_ns); 529 task_unlock(tsk); 530 } 531 rcu_read_unlock(); 532 return net; 533 } 534 EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 535 536 static __net_init int net_ns_net_init(struct net *net) 537 { 538 #ifdef CONFIG_NET_NS 539 net->ns.ops = &netns_operations; 540 #endif 541 return ns_alloc_inum(&net->ns); 542 } 543 544 static __net_exit void net_ns_net_exit(struct net *net) 545 { 546 ns_free_inum(&net->ns); 547 } 548 549 static struct pernet_operations __net_initdata net_ns_ops = { 550 .init = net_ns_net_init, 551 .exit = net_ns_net_exit, 552 }; 553 554 static struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { 555 [NETNSA_NONE] = { .type = NLA_UNSPEC }, 556 [NETNSA_NSID] = { .type = NLA_S32 }, 557 [NETNSA_PID] = { .type = NLA_U32 }, 558 [NETNSA_FD] = { .type = NLA_U32 }, 559 }; 560 561 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh) 562 { 563 struct net *net = sock_net(skb->sk); 564 struct nlattr *tb[NETNSA_MAX + 1]; 565 unsigned long flags; 566 struct net *peer; 567 int nsid, err; 568 569 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 570 rtnl_net_policy); 571 if (err < 0) 572 return err; 573 if (!tb[NETNSA_NSID]) 574 return -EINVAL; 575 nsid = nla_get_s32(tb[NETNSA_NSID]); 576 577 if (tb[NETNSA_PID]) 578 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 579 else if (tb[NETNSA_FD]) 580 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 581 else 582 return -EINVAL; 583 if (IS_ERR(peer)) 584 return PTR_ERR(peer); 585 586 spin_lock_irqsave(&net->nsid_lock, flags); 587 if (__peernet2id(net, peer) >= 0) { 588 spin_unlock_irqrestore(&net->nsid_lock, flags); 589 err = -EEXIST; 590 goto out; 591 } 592 593 err = alloc_netid(net, peer, nsid); 594 spin_unlock_irqrestore(&net->nsid_lock, flags); 595 if (err >= 0) { 596 rtnl_net_notifyid(net, RTM_NEWNSID, err); 597 err = 0; 598 } 599 out: 600 put_net(peer); 601 return err; 602 } 603 604 static int rtnl_net_get_size(void) 605 { 606 return NLMSG_ALIGN(sizeof(struct rtgenmsg)) 607 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ 608 ; 609 } 610 611 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, 612 int cmd, struct net *net, int nsid) 613 { 614 struct nlmsghdr *nlh; 615 struct rtgenmsg *rth; 616 617 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); 618 if (!nlh) 619 return -EMSGSIZE; 620 621 rth = nlmsg_data(nlh); 622 rth->rtgen_family = AF_UNSPEC; 623 624 if (nla_put_s32(skb, NETNSA_NSID, nsid)) 625 goto nla_put_failure; 626 627 nlmsg_end(skb, nlh); 628 return 0; 629 630 nla_put_failure: 631 nlmsg_cancel(skb, nlh); 632 return -EMSGSIZE; 633 } 634 635 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh) 636 { 637 struct net *net = sock_net(skb->sk); 638 struct nlattr *tb[NETNSA_MAX + 1]; 639 struct sk_buff *msg; 640 struct net *peer; 641 int err, id; 642 643 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 644 rtnl_net_policy); 645 if (err < 0) 646 return err; 647 if (tb[NETNSA_PID]) 648 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 649 else if (tb[NETNSA_FD]) 650 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 651 else 652 return -EINVAL; 653 654 if (IS_ERR(peer)) 655 return PTR_ERR(peer); 656 657 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 658 if (!msg) { 659 err = -ENOMEM; 660 goto out; 661 } 662 663 id = peernet2id(net, peer); 664 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 665 RTM_NEWNSID, net, id); 666 if (err < 0) 667 goto err_out; 668 669 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); 670 goto out; 671 672 err_out: 673 nlmsg_free(msg); 674 out: 675 put_net(peer); 676 return err; 677 } 678 679 struct rtnl_net_dump_cb { 680 struct net *net; 681 struct sk_buff *skb; 682 struct netlink_callback *cb; 683 int idx; 684 int s_idx; 685 }; 686 687 static int rtnl_net_dumpid_one(int id, void *peer, void *data) 688 { 689 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; 690 int ret; 691 692 if (net_cb->idx < net_cb->s_idx) 693 goto cont; 694 695 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, 696 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, 697 RTM_NEWNSID, net_cb->net, id); 698 if (ret < 0) 699 return ret; 700 701 cont: 702 net_cb->idx++; 703 return 0; 704 } 705 706 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) 707 { 708 struct net *net = sock_net(skb->sk); 709 struct rtnl_net_dump_cb net_cb = { 710 .net = net, 711 .skb = skb, 712 .cb = cb, 713 .idx = 0, 714 .s_idx = cb->args[0], 715 }; 716 unsigned long flags; 717 718 spin_lock_irqsave(&net->nsid_lock, flags); 719 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 720 spin_unlock_irqrestore(&net->nsid_lock, flags); 721 722 cb->args[0] = net_cb.idx; 723 return skb->len; 724 } 725 726 static void rtnl_net_notifyid(struct net *net, int cmd, int id) 727 { 728 struct sk_buff *msg; 729 int err = -ENOMEM; 730 731 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 732 if (!msg) 733 goto out; 734 735 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); 736 if (err < 0) 737 goto err_out; 738 739 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); 740 return; 741 742 err_out: 743 nlmsg_free(msg); 744 out: 745 rtnl_set_sk_err(net, RTNLGRP_NSID, err); 746 } 747 748 static int __init net_ns_init(void) 749 { 750 struct net_generic *ng; 751 752 #ifdef CONFIG_NET_NS 753 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 754 SMP_CACHE_BYTES, 755 SLAB_PANIC, NULL); 756 757 /* Create workqueue for cleanup */ 758 netns_wq = create_singlethread_workqueue("netns"); 759 if (!netns_wq) 760 panic("Could not create netns workq"); 761 #endif 762 763 ng = net_alloc_generic(); 764 if (!ng) 765 panic("Could not allocate generic netns"); 766 767 rcu_assign_pointer(init_net.gen, ng); 768 769 mutex_lock(&net_mutex); 770 if (setup_net(&init_net, &init_user_ns)) 771 panic("Could not setup the initial network namespace"); 772 773 rtnl_lock(); 774 list_add_tail_rcu(&init_net.list, &net_namespace_list); 775 rtnl_unlock(); 776 777 mutex_unlock(&net_mutex); 778 779 register_pernet_subsys(&net_ns_ops); 780 781 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL); 782 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, 783 NULL); 784 785 return 0; 786 } 787 788 pure_initcall(net_ns_init); 789 790 #ifdef CONFIG_NET_NS 791 static int __register_pernet_operations(struct list_head *list, 792 struct pernet_operations *ops) 793 { 794 struct net *net; 795 int error; 796 LIST_HEAD(net_exit_list); 797 798 list_add_tail(&ops->list, list); 799 if (ops->init || (ops->id && ops->size)) { 800 for_each_net(net) { 801 error = ops_init(ops, net); 802 if (error) 803 goto out_undo; 804 list_add_tail(&net->exit_list, &net_exit_list); 805 } 806 } 807 return 0; 808 809 out_undo: 810 /* If I have an error cleanup all namespaces I initialized */ 811 list_del(&ops->list); 812 ops_exit_list(ops, &net_exit_list); 813 ops_free_list(ops, &net_exit_list); 814 return error; 815 } 816 817 static void __unregister_pernet_operations(struct pernet_operations *ops) 818 { 819 struct net *net; 820 LIST_HEAD(net_exit_list); 821 822 list_del(&ops->list); 823 for_each_net(net) 824 list_add_tail(&net->exit_list, &net_exit_list); 825 ops_exit_list(ops, &net_exit_list); 826 ops_free_list(ops, &net_exit_list); 827 } 828 829 #else 830 831 static int __register_pernet_operations(struct list_head *list, 832 struct pernet_operations *ops) 833 { 834 return ops_init(ops, &init_net); 835 } 836 837 static void __unregister_pernet_operations(struct pernet_operations *ops) 838 { 839 LIST_HEAD(net_exit_list); 840 list_add(&init_net.exit_list, &net_exit_list); 841 ops_exit_list(ops, &net_exit_list); 842 ops_free_list(ops, &net_exit_list); 843 } 844 845 #endif /* CONFIG_NET_NS */ 846 847 static DEFINE_IDA(net_generic_ids); 848 849 static int register_pernet_operations(struct list_head *list, 850 struct pernet_operations *ops) 851 { 852 int error; 853 854 if (ops->id) { 855 again: 856 error = ida_get_new_above(&net_generic_ids, 1, ops->id); 857 if (error < 0) { 858 if (error == -EAGAIN) { 859 ida_pre_get(&net_generic_ids, GFP_KERNEL); 860 goto again; 861 } 862 return error; 863 } 864 max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id); 865 } 866 error = __register_pernet_operations(list, ops); 867 if (error) { 868 rcu_barrier(); 869 if (ops->id) 870 ida_remove(&net_generic_ids, *ops->id); 871 } 872 873 return error; 874 } 875 876 static void unregister_pernet_operations(struct pernet_operations *ops) 877 { 878 879 __unregister_pernet_operations(ops); 880 rcu_barrier(); 881 if (ops->id) 882 ida_remove(&net_generic_ids, *ops->id); 883 } 884 885 /** 886 * register_pernet_subsys - register a network namespace subsystem 887 * @ops: pernet operations structure for the subsystem 888 * 889 * Register a subsystem which has init and exit functions 890 * that are called when network namespaces are created and 891 * destroyed respectively. 892 * 893 * When registered all network namespace init functions are 894 * called for every existing network namespace. Allowing kernel 895 * modules to have a race free view of the set of network namespaces. 896 * 897 * When a new network namespace is created all of the init 898 * methods are called in the order in which they were registered. 899 * 900 * When a network namespace is destroyed all of the exit methods 901 * are called in the reverse of the order with which they were 902 * registered. 903 */ 904 int register_pernet_subsys(struct pernet_operations *ops) 905 { 906 int error; 907 mutex_lock(&net_mutex); 908 error = register_pernet_operations(first_device, ops); 909 mutex_unlock(&net_mutex); 910 return error; 911 } 912 EXPORT_SYMBOL_GPL(register_pernet_subsys); 913 914 /** 915 * unregister_pernet_subsys - unregister a network namespace subsystem 916 * @ops: pernet operations structure to manipulate 917 * 918 * Remove the pernet operations structure from the list to be 919 * used when network namespaces are created or destroyed. In 920 * addition run the exit method for all existing network 921 * namespaces. 922 */ 923 void unregister_pernet_subsys(struct pernet_operations *ops) 924 { 925 mutex_lock(&net_mutex); 926 unregister_pernet_operations(ops); 927 mutex_unlock(&net_mutex); 928 } 929 EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 930 931 /** 932 * register_pernet_device - register a network namespace device 933 * @ops: pernet operations structure for the subsystem 934 * 935 * Register a device which has init and exit functions 936 * that are called when network namespaces are created and 937 * destroyed respectively. 938 * 939 * When registered all network namespace init functions are 940 * called for every existing network namespace. Allowing kernel 941 * modules to have a race free view of the set of network namespaces. 942 * 943 * When a new network namespace is created all of the init 944 * methods are called in the order in which they were registered. 945 * 946 * When a network namespace is destroyed all of the exit methods 947 * are called in the reverse of the order with which they were 948 * registered. 949 */ 950 int register_pernet_device(struct pernet_operations *ops) 951 { 952 int error; 953 mutex_lock(&net_mutex); 954 error = register_pernet_operations(&pernet_list, ops); 955 if (!error && (first_device == &pernet_list)) 956 first_device = &ops->list; 957 mutex_unlock(&net_mutex); 958 return error; 959 } 960 EXPORT_SYMBOL_GPL(register_pernet_device); 961 962 /** 963 * unregister_pernet_device - unregister a network namespace netdevice 964 * @ops: pernet operations structure to manipulate 965 * 966 * Remove the pernet operations structure from the list to be 967 * used when network namespaces are created or destroyed. In 968 * addition run the exit method for all existing network 969 * namespaces. 970 */ 971 void unregister_pernet_device(struct pernet_operations *ops) 972 { 973 mutex_lock(&net_mutex); 974 if (&ops->list == first_device) 975 first_device = first_device->next; 976 unregister_pernet_operations(ops); 977 mutex_unlock(&net_mutex); 978 } 979 EXPORT_SYMBOL_GPL(unregister_pernet_device); 980 981 #ifdef CONFIG_NET_NS 982 static struct ns_common *netns_get(struct task_struct *task) 983 { 984 struct net *net = NULL; 985 struct nsproxy *nsproxy; 986 987 task_lock(task); 988 nsproxy = task->nsproxy; 989 if (nsproxy) 990 net = get_net(nsproxy->net_ns); 991 task_unlock(task); 992 993 return net ? &net->ns : NULL; 994 } 995 996 static inline struct net *to_net_ns(struct ns_common *ns) 997 { 998 return container_of(ns, struct net, ns); 999 } 1000 1001 static void netns_put(struct ns_common *ns) 1002 { 1003 put_net(to_net_ns(ns)); 1004 } 1005 1006 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) 1007 { 1008 struct net *net = to_net_ns(ns); 1009 1010 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 1011 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 1012 return -EPERM; 1013 1014 put_net(nsproxy->net_ns); 1015 nsproxy->net_ns = get_net(net); 1016 return 0; 1017 } 1018 1019 const struct proc_ns_operations netns_operations = { 1020 .name = "net", 1021 .type = CLONE_NEWNET, 1022 .get = netns_get, 1023 .put = netns_put, 1024 .install = netns_install, 1025 }; 1026 #endif 1027