1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/workqueue.h> 4 #include <linux/rtnetlink.h> 5 #include <linux/cache.h> 6 #include <linux/slab.h> 7 #include <linux/list.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/idr.h> 11 #include <linux/rculist.h> 12 #include <linux/nsproxy.h> 13 #include <linux/fs.h> 14 #include <linux/proc_ns.h> 15 #include <linux/file.h> 16 #include <linux/export.h> 17 #include <linux/user_namespace.h> 18 #include <linux/net_namespace.h> 19 #include <net/sock.h> 20 #include <net/netlink.h> 21 #include <net/net_namespace.h> 22 #include <net/netns/generic.h> 23 24 /* 25 * Our network namespace constructor/destructor lists 26 */ 27 28 static LIST_HEAD(pernet_list); 29 static struct list_head *first_device = &pernet_list; 30 DEFINE_MUTEX(net_mutex); 31 32 LIST_HEAD(net_namespace_list); 33 EXPORT_SYMBOL_GPL(net_namespace_list); 34 35 struct net init_net = { 36 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), 37 }; 38 EXPORT_SYMBOL(init_net); 39 40 static bool init_net_initialized; 41 42 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 43 44 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; 45 46 static struct net_generic *net_alloc_generic(void) 47 { 48 struct net_generic *ng; 49 size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); 50 51 ng = kzalloc(generic_size, GFP_KERNEL); 52 if (ng) 53 ng->len = max_gen_ptrs; 54 55 return ng; 56 } 57 58 static int net_assign_generic(struct net *net, unsigned int id, void *data) 59 { 60 struct net_generic *ng, *old_ng; 61 62 BUG_ON(!mutex_is_locked(&net_mutex)); 63 BUG_ON(id == 0); 64 65 old_ng = rcu_dereference_protected(net->gen, 66 lockdep_is_held(&net_mutex)); 67 ng = old_ng; 68 if (old_ng->len >= id) 69 goto assign; 70 71 ng = net_alloc_generic(); 72 if (ng == NULL) 73 return -ENOMEM; 74 75 /* 76 * Some synchronisation notes: 77 * 78 * The net_generic explores the net->gen array inside rcu 79 * read section. Besides once set the net->gen->ptr[x] 80 * pointer never changes (see rules in netns/generic.h). 81 * 82 * That said, we simply duplicate this array and schedule 83 * the old copy for kfree after a grace period. 84 */ 85 86 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); 87 88 rcu_assign_pointer(net->gen, ng); 89 kfree_rcu(old_ng, rcu); 90 assign: 91 ng->ptr[id - 1] = data; 92 return 0; 93 } 94 95 static int ops_init(const struct pernet_operations *ops, struct net *net) 96 { 97 int err = -ENOMEM; 98 void *data = NULL; 99 100 if (ops->id && ops->size) { 101 data = kzalloc(ops->size, GFP_KERNEL); 102 if (!data) 103 goto out; 104 105 err = net_assign_generic(net, *ops->id, data); 106 if (err) 107 goto cleanup; 108 } 109 err = 0; 110 if (ops->init) 111 err = ops->init(net); 112 if (!err) 113 return 0; 114 115 cleanup: 116 kfree(data); 117 118 out: 119 return err; 120 } 121 122 static void ops_free(const struct pernet_operations *ops, struct net *net) 123 { 124 if (ops->id && ops->size) { 125 kfree(net_generic(net, *ops->id)); 126 } 127 } 128 129 static void ops_exit_list(const struct pernet_operations *ops, 130 struct list_head *net_exit_list) 131 { 132 struct net *net; 133 if (ops->exit) { 134 list_for_each_entry(net, net_exit_list, exit_list) 135 ops->exit(net); 136 } 137 if (ops->exit_batch) 138 ops->exit_batch(net_exit_list); 139 } 140 141 static void ops_free_list(const struct pernet_operations *ops, 142 struct list_head *net_exit_list) 143 { 144 struct net *net; 145 if (ops->size && ops->id) { 146 list_for_each_entry(net, net_exit_list, exit_list) 147 ops_free(ops, net); 148 } 149 } 150 151 /* should be called with nsid_lock held */ 152 static int alloc_netid(struct net *net, struct net *peer, int reqid) 153 { 154 int min = 0, max = 0; 155 156 if (reqid >= 0) { 157 min = reqid; 158 max = reqid + 1; 159 } 160 161 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); 162 } 163 164 /* This function is used by idr_for_each(). If net is equal to peer, the 165 * function returns the id so that idr_for_each() stops. Because we cannot 166 * returns the id 0 (idr_for_each() will not stop), we return the magic value 167 * NET_ID_ZERO (-1) for it. 168 */ 169 #define NET_ID_ZERO -1 170 static int net_eq_idr(int id, void *net, void *peer) 171 { 172 if (net_eq(net, peer)) 173 return id ? : NET_ID_ZERO; 174 return 0; 175 } 176 177 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc 178 * is set to true, thus the caller knows that the new id must be notified via 179 * rtnl. 180 */ 181 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) 182 { 183 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); 184 bool alloc_it = *alloc; 185 186 *alloc = false; 187 188 /* Magic value for id 0. */ 189 if (id == NET_ID_ZERO) 190 return 0; 191 if (id > 0) 192 return id; 193 194 if (alloc_it) { 195 id = alloc_netid(net, peer, -1); 196 *alloc = true; 197 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 198 } 199 200 return NETNSA_NSID_NOT_ASSIGNED; 201 } 202 203 /* should be called with nsid_lock held */ 204 static int __peernet2id(struct net *net, struct net *peer) 205 { 206 bool no = false; 207 208 return __peernet2id_alloc(net, peer, &no); 209 } 210 211 static void rtnl_net_notifyid(struct net *net, int cmd, int id); 212 /* This function returns the id of a peer netns. If no id is assigned, one will 213 * be allocated and returned. 214 */ 215 int peernet2id_alloc(struct net *net, struct net *peer) 216 { 217 unsigned long flags; 218 bool alloc; 219 int id; 220 221 spin_lock_irqsave(&net->nsid_lock, flags); 222 alloc = atomic_read(&peer->count) == 0 ? false : true; 223 id = __peernet2id_alloc(net, peer, &alloc); 224 spin_unlock_irqrestore(&net->nsid_lock, flags); 225 if (alloc && id >= 0) 226 rtnl_net_notifyid(net, RTM_NEWNSID, id); 227 return id; 228 } 229 230 /* This function returns, if assigned, the id of a peer netns. */ 231 int peernet2id(struct net *net, struct net *peer) 232 { 233 unsigned long flags; 234 int id; 235 236 spin_lock_irqsave(&net->nsid_lock, flags); 237 id = __peernet2id(net, peer); 238 spin_unlock_irqrestore(&net->nsid_lock, flags); 239 return id; 240 } 241 EXPORT_SYMBOL(peernet2id); 242 243 /* This function returns true is the peer netns has an id assigned into the 244 * current netns. 245 */ 246 bool peernet_has_id(struct net *net, struct net *peer) 247 { 248 return peernet2id(net, peer) >= 0; 249 } 250 251 struct net *get_net_ns_by_id(struct net *net, int id) 252 { 253 unsigned long flags; 254 struct net *peer; 255 256 if (id < 0) 257 return NULL; 258 259 rcu_read_lock(); 260 spin_lock_irqsave(&net->nsid_lock, flags); 261 peer = idr_find(&net->netns_ids, id); 262 if (peer) 263 get_net(peer); 264 spin_unlock_irqrestore(&net->nsid_lock, flags); 265 rcu_read_unlock(); 266 267 return peer; 268 } 269 270 /* 271 * setup_net runs the initializers for the network namespace object. 272 */ 273 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) 274 { 275 /* Must be called with net_mutex held */ 276 const struct pernet_operations *ops, *saved_ops; 277 int error = 0; 278 LIST_HEAD(net_exit_list); 279 280 atomic_set(&net->count, 1); 281 atomic_set(&net->passive, 1); 282 net->dev_base_seq = 1; 283 net->user_ns = user_ns; 284 idr_init(&net->netns_ids); 285 spin_lock_init(&net->nsid_lock); 286 287 list_for_each_entry(ops, &pernet_list, list) { 288 error = ops_init(ops, net); 289 if (error < 0) 290 goto out_undo; 291 } 292 out: 293 return error; 294 295 out_undo: 296 /* Walk through the list backwards calling the exit functions 297 * for the pernet modules whose init functions did not fail. 298 */ 299 list_add(&net->exit_list, &net_exit_list); 300 saved_ops = ops; 301 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 302 ops_exit_list(ops, &net_exit_list); 303 304 ops = saved_ops; 305 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 306 ops_free_list(ops, &net_exit_list); 307 308 rcu_barrier(); 309 goto out; 310 } 311 312 313 #ifdef CONFIG_NET_NS 314 static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 315 { 316 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); 317 } 318 319 static void dec_net_namespaces(struct ucounts *ucounts) 320 { 321 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); 322 } 323 324 static struct kmem_cache *net_cachep; 325 static struct workqueue_struct *netns_wq; 326 327 static struct net *net_alloc(void) 328 { 329 struct net *net = NULL; 330 struct net_generic *ng; 331 332 ng = net_alloc_generic(); 333 if (!ng) 334 goto out; 335 336 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); 337 if (!net) 338 goto out_free; 339 340 rcu_assign_pointer(net->gen, ng); 341 out: 342 return net; 343 344 out_free: 345 kfree(ng); 346 goto out; 347 } 348 349 static void net_free(struct net *net) 350 { 351 kfree(rcu_access_pointer(net->gen)); 352 kmem_cache_free(net_cachep, net); 353 } 354 355 void net_drop_ns(void *p) 356 { 357 struct net *ns = p; 358 if (ns && atomic_dec_and_test(&ns->passive)) 359 net_free(ns); 360 } 361 362 struct net *copy_net_ns(unsigned long flags, 363 struct user_namespace *user_ns, struct net *old_net) 364 { 365 struct ucounts *ucounts; 366 struct net *net; 367 int rv; 368 369 if (!(flags & CLONE_NEWNET)) 370 return get_net(old_net); 371 372 ucounts = inc_net_namespaces(user_ns); 373 if (!ucounts) 374 return ERR_PTR(-ENOSPC); 375 376 net = net_alloc(); 377 if (!net) { 378 dec_net_namespaces(ucounts); 379 return ERR_PTR(-ENOMEM); 380 } 381 382 get_user_ns(user_ns); 383 384 rv = mutex_lock_killable(&net_mutex); 385 if (rv < 0) { 386 net_free(net); 387 dec_net_namespaces(ucounts); 388 put_user_ns(user_ns); 389 return ERR_PTR(rv); 390 } 391 392 net->ucounts = ucounts; 393 rv = setup_net(net, user_ns); 394 if (rv == 0) { 395 rtnl_lock(); 396 list_add_tail_rcu(&net->list, &net_namespace_list); 397 rtnl_unlock(); 398 } 399 mutex_unlock(&net_mutex); 400 if (rv < 0) { 401 dec_net_namespaces(ucounts); 402 put_user_ns(user_ns); 403 net_drop_ns(net); 404 return ERR_PTR(rv); 405 } 406 return net; 407 } 408 409 static DEFINE_SPINLOCK(cleanup_list_lock); 410 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 411 412 static void cleanup_net(struct work_struct *work) 413 { 414 const struct pernet_operations *ops; 415 struct net *net, *tmp; 416 struct list_head net_kill_list; 417 LIST_HEAD(net_exit_list); 418 419 /* Atomically snapshot the list of namespaces to cleanup */ 420 spin_lock_irq(&cleanup_list_lock); 421 list_replace_init(&cleanup_list, &net_kill_list); 422 spin_unlock_irq(&cleanup_list_lock); 423 424 mutex_lock(&net_mutex); 425 426 /* Don't let anyone else find us. */ 427 rtnl_lock(); 428 list_for_each_entry(net, &net_kill_list, cleanup_list) { 429 list_del_rcu(&net->list); 430 list_add_tail(&net->exit_list, &net_exit_list); 431 for_each_net(tmp) { 432 int id; 433 434 spin_lock_irq(&tmp->nsid_lock); 435 id = __peernet2id(tmp, net); 436 if (id >= 0) 437 idr_remove(&tmp->netns_ids, id); 438 spin_unlock_irq(&tmp->nsid_lock); 439 if (id >= 0) 440 rtnl_net_notifyid(tmp, RTM_DELNSID, id); 441 } 442 spin_lock_irq(&net->nsid_lock); 443 idr_destroy(&net->netns_ids); 444 spin_unlock_irq(&net->nsid_lock); 445 446 } 447 rtnl_unlock(); 448 449 /* 450 * Another CPU might be rcu-iterating the list, wait for it. 451 * This needs to be before calling the exit() notifiers, so 452 * the rcu_barrier() below isn't sufficient alone. 453 */ 454 synchronize_rcu(); 455 456 /* Run all of the network namespace exit methods */ 457 list_for_each_entry_reverse(ops, &pernet_list, list) 458 ops_exit_list(ops, &net_exit_list); 459 460 /* Free the net generic variables */ 461 list_for_each_entry_reverse(ops, &pernet_list, list) 462 ops_free_list(ops, &net_exit_list); 463 464 mutex_unlock(&net_mutex); 465 466 /* Ensure there are no outstanding rcu callbacks using this 467 * network namespace. 468 */ 469 rcu_barrier(); 470 471 /* Finally it is safe to free my network namespace structure */ 472 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 473 list_del_init(&net->exit_list); 474 dec_net_namespaces(net->ucounts); 475 put_user_ns(net->user_ns); 476 net_drop_ns(net); 477 } 478 } 479 static DECLARE_WORK(net_cleanup_work, cleanup_net); 480 481 void __put_net(struct net *net) 482 { 483 /* Cleanup the network namespace in process context */ 484 unsigned long flags; 485 486 spin_lock_irqsave(&cleanup_list_lock, flags); 487 list_add(&net->cleanup_list, &cleanup_list); 488 spin_unlock_irqrestore(&cleanup_list_lock, flags); 489 490 queue_work(netns_wq, &net_cleanup_work); 491 } 492 EXPORT_SYMBOL_GPL(__put_net); 493 494 struct net *get_net_ns_by_fd(int fd) 495 { 496 struct file *file; 497 struct ns_common *ns; 498 struct net *net; 499 500 file = proc_ns_fget(fd); 501 if (IS_ERR(file)) 502 return ERR_CAST(file); 503 504 ns = get_proc_ns(file_inode(file)); 505 if (ns->ops == &netns_operations) 506 net = get_net(container_of(ns, struct net, ns)); 507 else 508 net = ERR_PTR(-EINVAL); 509 510 fput(file); 511 return net; 512 } 513 514 #else 515 struct net *get_net_ns_by_fd(int fd) 516 { 517 return ERR_PTR(-EINVAL); 518 } 519 #endif 520 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 521 522 struct net *get_net_ns_by_pid(pid_t pid) 523 { 524 struct task_struct *tsk; 525 struct net *net; 526 527 /* Lookup the network namespace */ 528 net = ERR_PTR(-ESRCH); 529 rcu_read_lock(); 530 tsk = find_task_by_vpid(pid); 531 if (tsk) { 532 struct nsproxy *nsproxy; 533 task_lock(tsk); 534 nsproxy = tsk->nsproxy; 535 if (nsproxy) 536 net = get_net(nsproxy->net_ns); 537 task_unlock(tsk); 538 } 539 rcu_read_unlock(); 540 return net; 541 } 542 EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 543 544 static __net_init int net_ns_net_init(struct net *net) 545 { 546 #ifdef CONFIG_NET_NS 547 net->ns.ops = &netns_operations; 548 #endif 549 return ns_alloc_inum(&net->ns); 550 } 551 552 static __net_exit void net_ns_net_exit(struct net *net) 553 { 554 ns_free_inum(&net->ns); 555 } 556 557 static struct pernet_operations __net_initdata net_ns_ops = { 558 .init = net_ns_net_init, 559 .exit = net_ns_net_exit, 560 }; 561 562 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { 563 [NETNSA_NONE] = { .type = NLA_UNSPEC }, 564 [NETNSA_NSID] = { .type = NLA_S32 }, 565 [NETNSA_PID] = { .type = NLA_U32 }, 566 [NETNSA_FD] = { .type = NLA_U32 }, 567 }; 568 569 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh) 570 { 571 struct net *net = sock_net(skb->sk); 572 struct nlattr *tb[NETNSA_MAX + 1]; 573 unsigned long flags; 574 struct net *peer; 575 int nsid, err; 576 577 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 578 rtnl_net_policy); 579 if (err < 0) 580 return err; 581 if (!tb[NETNSA_NSID]) 582 return -EINVAL; 583 nsid = nla_get_s32(tb[NETNSA_NSID]); 584 585 if (tb[NETNSA_PID]) 586 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 587 else if (tb[NETNSA_FD]) 588 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 589 else 590 return -EINVAL; 591 if (IS_ERR(peer)) 592 return PTR_ERR(peer); 593 594 spin_lock_irqsave(&net->nsid_lock, flags); 595 if (__peernet2id(net, peer) >= 0) { 596 spin_unlock_irqrestore(&net->nsid_lock, flags); 597 err = -EEXIST; 598 goto out; 599 } 600 601 err = alloc_netid(net, peer, nsid); 602 spin_unlock_irqrestore(&net->nsid_lock, flags); 603 if (err >= 0) { 604 rtnl_net_notifyid(net, RTM_NEWNSID, err); 605 err = 0; 606 } 607 out: 608 put_net(peer); 609 return err; 610 } 611 612 static int rtnl_net_get_size(void) 613 { 614 return NLMSG_ALIGN(sizeof(struct rtgenmsg)) 615 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ 616 ; 617 } 618 619 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, 620 int cmd, struct net *net, int nsid) 621 { 622 struct nlmsghdr *nlh; 623 struct rtgenmsg *rth; 624 625 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); 626 if (!nlh) 627 return -EMSGSIZE; 628 629 rth = nlmsg_data(nlh); 630 rth->rtgen_family = AF_UNSPEC; 631 632 if (nla_put_s32(skb, NETNSA_NSID, nsid)) 633 goto nla_put_failure; 634 635 nlmsg_end(skb, nlh); 636 return 0; 637 638 nla_put_failure: 639 nlmsg_cancel(skb, nlh); 640 return -EMSGSIZE; 641 } 642 643 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh) 644 { 645 struct net *net = sock_net(skb->sk); 646 struct nlattr *tb[NETNSA_MAX + 1]; 647 struct sk_buff *msg; 648 struct net *peer; 649 int err, id; 650 651 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 652 rtnl_net_policy); 653 if (err < 0) 654 return err; 655 if (tb[NETNSA_PID]) 656 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 657 else if (tb[NETNSA_FD]) 658 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 659 else 660 return -EINVAL; 661 662 if (IS_ERR(peer)) 663 return PTR_ERR(peer); 664 665 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 666 if (!msg) { 667 err = -ENOMEM; 668 goto out; 669 } 670 671 id = peernet2id(net, peer); 672 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 673 RTM_NEWNSID, net, id); 674 if (err < 0) 675 goto err_out; 676 677 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); 678 goto out; 679 680 err_out: 681 nlmsg_free(msg); 682 out: 683 put_net(peer); 684 return err; 685 } 686 687 struct rtnl_net_dump_cb { 688 struct net *net; 689 struct sk_buff *skb; 690 struct netlink_callback *cb; 691 int idx; 692 int s_idx; 693 }; 694 695 static int rtnl_net_dumpid_one(int id, void *peer, void *data) 696 { 697 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; 698 int ret; 699 700 if (net_cb->idx < net_cb->s_idx) 701 goto cont; 702 703 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, 704 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, 705 RTM_NEWNSID, net_cb->net, id); 706 if (ret < 0) 707 return ret; 708 709 cont: 710 net_cb->idx++; 711 return 0; 712 } 713 714 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) 715 { 716 struct net *net = sock_net(skb->sk); 717 struct rtnl_net_dump_cb net_cb = { 718 .net = net, 719 .skb = skb, 720 .cb = cb, 721 .idx = 0, 722 .s_idx = cb->args[0], 723 }; 724 unsigned long flags; 725 726 spin_lock_irqsave(&net->nsid_lock, flags); 727 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 728 spin_unlock_irqrestore(&net->nsid_lock, flags); 729 730 cb->args[0] = net_cb.idx; 731 return skb->len; 732 } 733 734 static void rtnl_net_notifyid(struct net *net, int cmd, int id) 735 { 736 struct sk_buff *msg; 737 int err = -ENOMEM; 738 739 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 740 if (!msg) 741 goto out; 742 743 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); 744 if (err < 0) 745 goto err_out; 746 747 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); 748 return; 749 750 err_out: 751 nlmsg_free(msg); 752 out: 753 rtnl_set_sk_err(net, RTNLGRP_NSID, err); 754 } 755 756 static int __init net_ns_init(void) 757 { 758 struct net_generic *ng; 759 760 #ifdef CONFIG_NET_NS 761 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 762 SMP_CACHE_BYTES, 763 SLAB_PANIC, NULL); 764 765 /* Create workqueue for cleanup */ 766 netns_wq = create_singlethread_workqueue("netns"); 767 if (!netns_wq) 768 panic("Could not create netns workq"); 769 #endif 770 771 ng = net_alloc_generic(); 772 if (!ng) 773 panic("Could not allocate generic netns"); 774 775 rcu_assign_pointer(init_net.gen, ng); 776 777 mutex_lock(&net_mutex); 778 if (setup_net(&init_net, &init_user_ns)) 779 panic("Could not setup the initial network namespace"); 780 781 init_net_initialized = true; 782 783 rtnl_lock(); 784 list_add_tail_rcu(&init_net.list, &net_namespace_list); 785 rtnl_unlock(); 786 787 mutex_unlock(&net_mutex); 788 789 register_pernet_subsys(&net_ns_ops); 790 791 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL); 792 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, 793 NULL); 794 795 return 0; 796 } 797 798 pure_initcall(net_ns_init); 799 800 #ifdef CONFIG_NET_NS 801 static int __register_pernet_operations(struct list_head *list, 802 struct pernet_operations *ops) 803 { 804 struct net *net; 805 int error; 806 LIST_HEAD(net_exit_list); 807 808 list_add_tail(&ops->list, list); 809 if (ops->init || (ops->id && ops->size)) { 810 for_each_net(net) { 811 error = ops_init(ops, net); 812 if (error) 813 goto out_undo; 814 list_add_tail(&net->exit_list, &net_exit_list); 815 } 816 } 817 return 0; 818 819 out_undo: 820 /* If I have an error cleanup all namespaces I initialized */ 821 list_del(&ops->list); 822 ops_exit_list(ops, &net_exit_list); 823 ops_free_list(ops, &net_exit_list); 824 return error; 825 } 826 827 static void __unregister_pernet_operations(struct pernet_operations *ops) 828 { 829 struct net *net; 830 LIST_HEAD(net_exit_list); 831 832 list_del(&ops->list); 833 for_each_net(net) 834 list_add_tail(&net->exit_list, &net_exit_list); 835 ops_exit_list(ops, &net_exit_list); 836 ops_free_list(ops, &net_exit_list); 837 } 838 839 #else 840 841 static int __register_pernet_operations(struct list_head *list, 842 struct pernet_operations *ops) 843 { 844 if (!init_net_initialized) { 845 list_add_tail(&ops->list, list); 846 return 0; 847 } 848 849 return ops_init(ops, &init_net); 850 } 851 852 static void __unregister_pernet_operations(struct pernet_operations *ops) 853 { 854 if (!init_net_initialized) { 855 list_del(&ops->list); 856 } else { 857 LIST_HEAD(net_exit_list); 858 list_add(&init_net.exit_list, &net_exit_list); 859 ops_exit_list(ops, &net_exit_list); 860 ops_free_list(ops, &net_exit_list); 861 } 862 } 863 864 #endif /* CONFIG_NET_NS */ 865 866 static DEFINE_IDA(net_generic_ids); 867 868 static int register_pernet_operations(struct list_head *list, 869 struct pernet_operations *ops) 870 { 871 int error; 872 873 if (ops->id) { 874 again: 875 error = ida_get_new_above(&net_generic_ids, 1, ops->id); 876 if (error < 0) { 877 if (error == -EAGAIN) { 878 ida_pre_get(&net_generic_ids, GFP_KERNEL); 879 goto again; 880 } 881 return error; 882 } 883 max_gen_ptrs = max(max_gen_ptrs, *ops->id); 884 } 885 error = __register_pernet_operations(list, ops); 886 if (error) { 887 rcu_barrier(); 888 if (ops->id) 889 ida_remove(&net_generic_ids, *ops->id); 890 } 891 892 return error; 893 } 894 895 static void unregister_pernet_operations(struct pernet_operations *ops) 896 { 897 898 __unregister_pernet_operations(ops); 899 rcu_barrier(); 900 if (ops->id) 901 ida_remove(&net_generic_ids, *ops->id); 902 } 903 904 /** 905 * register_pernet_subsys - register a network namespace subsystem 906 * @ops: pernet operations structure for the subsystem 907 * 908 * Register a subsystem which has init and exit functions 909 * that are called when network namespaces are created and 910 * destroyed respectively. 911 * 912 * When registered all network namespace init functions are 913 * called for every existing network namespace. Allowing kernel 914 * modules to have a race free view of the set of network namespaces. 915 * 916 * When a new network namespace is created all of the init 917 * methods are called in the order in which they were registered. 918 * 919 * When a network namespace is destroyed all of the exit methods 920 * are called in the reverse of the order with which they were 921 * registered. 922 */ 923 int register_pernet_subsys(struct pernet_operations *ops) 924 { 925 int error; 926 mutex_lock(&net_mutex); 927 error = register_pernet_operations(first_device, ops); 928 mutex_unlock(&net_mutex); 929 return error; 930 } 931 EXPORT_SYMBOL_GPL(register_pernet_subsys); 932 933 /** 934 * unregister_pernet_subsys - unregister a network namespace subsystem 935 * @ops: pernet operations structure to manipulate 936 * 937 * Remove the pernet operations structure from the list to be 938 * used when network namespaces are created or destroyed. In 939 * addition run the exit method for all existing network 940 * namespaces. 941 */ 942 void unregister_pernet_subsys(struct pernet_operations *ops) 943 { 944 mutex_lock(&net_mutex); 945 unregister_pernet_operations(ops); 946 mutex_unlock(&net_mutex); 947 } 948 EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 949 950 /** 951 * register_pernet_device - register a network namespace device 952 * @ops: pernet operations structure for the subsystem 953 * 954 * Register a device which has init and exit functions 955 * that are called when network namespaces are created and 956 * destroyed respectively. 957 * 958 * When registered all network namespace init functions are 959 * called for every existing network namespace. Allowing kernel 960 * modules to have a race free view of the set of network namespaces. 961 * 962 * When a new network namespace is created all of the init 963 * methods are called in the order in which they were registered. 964 * 965 * When a network namespace is destroyed all of the exit methods 966 * are called in the reverse of the order with which they were 967 * registered. 968 */ 969 int register_pernet_device(struct pernet_operations *ops) 970 { 971 int error; 972 mutex_lock(&net_mutex); 973 error = register_pernet_operations(&pernet_list, ops); 974 if (!error && (first_device == &pernet_list)) 975 first_device = &ops->list; 976 mutex_unlock(&net_mutex); 977 return error; 978 } 979 EXPORT_SYMBOL_GPL(register_pernet_device); 980 981 /** 982 * unregister_pernet_device - unregister a network namespace netdevice 983 * @ops: pernet operations structure to manipulate 984 * 985 * Remove the pernet operations structure from the list to be 986 * used when network namespaces are created or destroyed. In 987 * addition run the exit method for all existing network 988 * namespaces. 989 */ 990 void unregister_pernet_device(struct pernet_operations *ops) 991 { 992 mutex_lock(&net_mutex); 993 if (&ops->list == first_device) 994 first_device = first_device->next; 995 unregister_pernet_operations(ops); 996 mutex_unlock(&net_mutex); 997 } 998 EXPORT_SYMBOL_GPL(unregister_pernet_device); 999 1000 #ifdef CONFIG_NET_NS 1001 static struct ns_common *netns_get(struct task_struct *task) 1002 { 1003 struct net *net = NULL; 1004 struct nsproxy *nsproxy; 1005 1006 task_lock(task); 1007 nsproxy = task->nsproxy; 1008 if (nsproxy) 1009 net = get_net(nsproxy->net_ns); 1010 task_unlock(task); 1011 1012 return net ? &net->ns : NULL; 1013 } 1014 1015 static inline struct net *to_net_ns(struct ns_common *ns) 1016 { 1017 return container_of(ns, struct net, ns); 1018 } 1019 1020 static void netns_put(struct ns_common *ns) 1021 { 1022 put_net(to_net_ns(ns)); 1023 } 1024 1025 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) 1026 { 1027 struct net *net = to_net_ns(ns); 1028 1029 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 1030 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 1031 return -EPERM; 1032 1033 put_net(nsproxy->net_ns); 1034 nsproxy->net_ns = get_net(net); 1035 return 0; 1036 } 1037 1038 static struct user_namespace *netns_owner(struct ns_common *ns) 1039 { 1040 return to_net_ns(ns)->user_ns; 1041 } 1042 1043 const struct proc_ns_operations netns_operations = { 1044 .name = "net", 1045 .type = CLONE_NEWNET, 1046 .get = netns_get, 1047 .put = netns_put, 1048 .install = netns_install, 1049 .owner = netns_owner, 1050 }; 1051 #endif 1052