1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/workqueue.h> 4 #include <linux/rtnetlink.h> 5 #include <linux/cache.h> 6 #include <linux/slab.h> 7 #include <linux/list.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/idr.h> 11 #include <linux/rculist.h> 12 #include <linux/nsproxy.h> 13 #include <linux/fs.h> 14 #include <linux/proc_ns.h> 15 #include <linux/file.h> 16 #include <linux/export.h> 17 #include <linux/user_namespace.h> 18 #include <linux/net_namespace.h> 19 #include <linux/sched/task.h> 20 21 #include <net/sock.h> 22 #include <net/netlink.h> 23 #include <net/net_namespace.h> 24 #include <net/netns/generic.h> 25 26 /* 27 * Our network namespace constructor/destructor lists 28 */ 29 30 static LIST_HEAD(pernet_list); 31 static struct list_head *first_device = &pernet_list; 32 DEFINE_MUTEX(net_mutex); 33 34 LIST_HEAD(net_namespace_list); 35 EXPORT_SYMBOL_GPL(net_namespace_list); 36 37 struct net init_net = { 38 .count = ATOMIC_INIT(1), 39 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), 40 }; 41 EXPORT_SYMBOL(init_net); 42 43 static bool init_net_initialized; 44 45 #define MIN_PERNET_OPS_ID \ 46 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *)) 47 48 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 49 50 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; 51 52 static struct net_generic *net_alloc_generic(void) 53 { 54 struct net_generic *ng; 55 unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); 56 57 ng = kzalloc(generic_size, GFP_KERNEL); 58 if (ng) 59 ng->s.len = max_gen_ptrs; 60 61 return ng; 62 } 63 64 static int net_assign_generic(struct net *net, unsigned int id, void *data) 65 { 66 struct net_generic *ng, *old_ng; 67 68 BUG_ON(!mutex_is_locked(&net_mutex)); 69 BUG_ON(id < MIN_PERNET_OPS_ID); 70 71 old_ng = rcu_dereference_protected(net->gen, 72 lockdep_is_held(&net_mutex)); 73 if (old_ng->s.len > id) { 74 old_ng->ptr[id] = data; 75 return 0; 76 } 77 78 ng = net_alloc_generic(); 79 if (ng == NULL) 80 return -ENOMEM; 81 82 /* 83 * Some synchronisation notes: 84 * 85 * The net_generic explores the net->gen array inside rcu 86 * read section. Besides once set the net->gen->ptr[x] 87 * pointer never changes (see rules in netns/generic.h). 88 * 89 * That said, we simply duplicate this array and schedule 90 * the old copy for kfree after a grace period. 91 */ 92 93 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID], 94 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *)); 95 ng->ptr[id] = data; 96 97 rcu_assign_pointer(net->gen, ng); 98 kfree_rcu(old_ng, s.rcu); 99 return 0; 100 } 101 102 static int ops_init(const struct pernet_operations *ops, struct net *net) 103 { 104 int err = -ENOMEM; 105 void *data = NULL; 106 107 if (ops->id && ops->size) { 108 data = kzalloc(ops->size, GFP_KERNEL); 109 if (!data) 110 goto out; 111 112 err = net_assign_generic(net, *ops->id, data); 113 if (err) 114 goto cleanup; 115 } 116 err = 0; 117 if (ops->init) 118 err = ops->init(net); 119 if (!err) 120 return 0; 121 122 cleanup: 123 kfree(data); 124 125 out: 126 return err; 127 } 128 129 static void ops_free(const struct pernet_operations *ops, struct net *net) 130 { 131 if (ops->id && ops->size) { 132 kfree(net_generic(net, *ops->id)); 133 } 134 } 135 136 static void ops_exit_list(const struct pernet_operations *ops, 137 struct list_head *net_exit_list) 138 { 139 struct net *net; 140 if (ops->exit) { 141 list_for_each_entry(net, net_exit_list, exit_list) 142 ops->exit(net); 143 } 144 if (ops->exit_batch) 145 ops->exit_batch(net_exit_list); 146 } 147 148 static void ops_free_list(const struct pernet_operations *ops, 149 struct list_head *net_exit_list) 150 { 151 struct net *net; 152 if (ops->size && ops->id) { 153 list_for_each_entry(net, net_exit_list, exit_list) 154 ops_free(ops, net); 155 } 156 } 157 158 /* should be called with nsid_lock held */ 159 static int alloc_netid(struct net *net, struct net *peer, int reqid) 160 { 161 int min = 0, max = 0; 162 163 if (reqid >= 0) { 164 min = reqid; 165 max = reqid + 1; 166 } 167 168 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); 169 } 170 171 /* This function is used by idr_for_each(). If net is equal to peer, the 172 * function returns the id so that idr_for_each() stops. Because we cannot 173 * returns the id 0 (idr_for_each() will not stop), we return the magic value 174 * NET_ID_ZERO (-1) for it. 175 */ 176 #define NET_ID_ZERO -1 177 static int net_eq_idr(int id, void *net, void *peer) 178 { 179 if (net_eq(net, peer)) 180 return id ? : NET_ID_ZERO; 181 return 0; 182 } 183 184 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc 185 * is set to true, thus the caller knows that the new id must be notified via 186 * rtnl. 187 */ 188 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) 189 { 190 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); 191 bool alloc_it = *alloc; 192 193 *alloc = false; 194 195 /* Magic value for id 0. */ 196 if (id == NET_ID_ZERO) 197 return 0; 198 if (id > 0) 199 return id; 200 201 if (alloc_it) { 202 id = alloc_netid(net, peer, -1); 203 *alloc = true; 204 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 205 } 206 207 return NETNSA_NSID_NOT_ASSIGNED; 208 } 209 210 /* should be called with nsid_lock held */ 211 static int __peernet2id(struct net *net, struct net *peer) 212 { 213 bool no = false; 214 215 return __peernet2id_alloc(net, peer, &no); 216 } 217 218 static void rtnl_net_notifyid(struct net *net, int cmd, int id); 219 /* This function returns the id of a peer netns. If no id is assigned, one will 220 * be allocated and returned. 221 */ 222 int peernet2id_alloc(struct net *net, struct net *peer) 223 { 224 bool alloc; 225 int id; 226 227 if (atomic_read(&net->count) == 0) 228 return NETNSA_NSID_NOT_ASSIGNED; 229 spin_lock_bh(&net->nsid_lock); 230 alloc = atomic_read(&peer->count) == 0 ? false : true; 231 id = __peernet2id_alloc(net, peer, &alloc); 232 spin_unlock_bh(&net->nsid_lock); 233 if (alloc && id >= 0) 234 rtnl_net_notifyid(net, RTM_NEWNSID, id); 235 return id; 236 } 237 238 /* This function returns, if assigned, the id of a peer netns. */ 239 int peernet2id(struct net *net, struct net *peer) 240 { 241 int id; 242 243 spin_lock_bh(&net->nsid_lock); 244 id = __peernet2id(net, peer); 245 spin_unlock_bh(&net->nsid_lock); 246 return id; 247 } 248 EXPORT_SYMBOL(peernet2id); 249 250 /* This function returns true is the peer netns has an id assigned into the 251 * current netns. 252 */ 253 bool peernet_has_id(struct net *net, struct net *peer) 254 { 255 return peernet2id(net, peer) >= 0; 256 } 257 258 struct net *get_net_ns_by_id(struct net *net, int id) 259 { 260 struct net *peer; 261 262 if (id < 0) 263 return NULL; 264 265 rcu_read_lock(); 266 spin_lock_bh(&net->nsid_lock); 267 peer = idr_find(&net->netns_ids, id); 268 if (peer) 269 get_net(peer); 270 spin_unlock_bh(&net->nsid_lock); 271 rcu_read_unlock(); 272 273 return peer; 274 } 275 276 /* 277 * setup_net runs the initializers for the network namespace object. 278 */ 279 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) 280 { 281 /* Must be called with net_mutex held */ 282 const struct pernet_operations *ops, *saved_ops; 283 int error = 0; 284 LIST_HEAD(net_exit_list); 285 286 atomic_set(&net->count, 1); 287 atomic_set(&net->passive, 1); 288 net->dev_base_seq = 1; 289 net->user_ns = user_ns; 290 idr_init(&net->netns_ids); 291 spin_lock_init(&net->nsid_lock); 292 293 list_for_each_entry(ops, &pernet_list, list) { 294 error = ops_init(ops, net); 295 if (error < 0) 296 goto out_undo; 297 } 298 out: 299 return error; 300 301 out_undo: 302 /* Walk through the list backwards calling the exit functions 303 * for the pernet modules whose init functions did not fail. 304 */ 305 list_add(&net->exit_list, &net_exit_list); 306 saved_ops = ops; 307 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 308 ops_exit_list(ops, &net_exit_list); 309 310 ops = saved_ops; 311 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 312 ops_free_list(ops, &net_exit_list); 313 314 rcu_barrier(); 315 goto out; 316 } 317 318 319 #ifdef CONFIG_NET_NS 320 static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 321 { 322 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); 323 } 324 325 static void dec_net_namespaces(struct ucounts *ucounts) 326 { 327 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); 328 } 329 330 static struct kmem_cache *net_cachep; 331 static struct workqueue_struct *netns_wq; 332 333 static struct net *net_alloc(void) 334 { 335 struct net *net = NULL; 336 struct net_generic *ng; 337 338 ng = net_alloc_generic(); 339 if (!ng) 340 goto out; 341 342 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); 343 if (!net) 344 goto out_free; 345 346 rcu_assign_pointer(net->gen, ng); 347 out: 348 return net; 349 350 out_free: 351 kfree(ng); 352 goto out; 353 } 354 355 static void net_free(struct net *net) 356 { 357 kfree(rcu_access_pointer(net->gen)); 358 kmem_cache_free(net_cachep, net); 359 } 360 361 void net_drop_ns(void *p) 362 { 363 struct net *ns = p; 364 if (ns && atomic_dec_and_test(&ns->passive)) 365 net_free(ns); 366 } 367 368 struct net *copy_net_ns(unsigned long flags, 369 struct user_namespace *user_ns, struct net *old_net) 370 { 371 struct ucounts *ucounts; 372 struct net *net; 373 int rv; 374 375 if (!(flags & CLONE_NEWNET)) 376 return get_net(old_net); 377 378 ucounts = inc_net_namespaces(user_ns); 379 if (!ucounts) 380 return ERR_PTR(-ENOSPC); 381 382 net = net_alloc(); 383 if (!net) { 384 dec_net_namespaces(ucounts); 385 return ERR_PTR(-ENOMEM); 386 } 387 388 get_user_ns(user_ns); 389 390 rv = mutex_lock_killable(&net_mutex); 391 if (rv < 0) { 392 net_free(net); 393 dec_net_namespaces(ucounts); 394 put_user_ns(user_ns); 395 return ERR_PTR(rv); 396 } 397 398 net->ucounts = ucounts; 399 rv = setup_net(net, user_ns); 400 if (rv == 0) { 401 rtnl_lock(); 402 list_add_tail_rcu(&net->list, &net_namespace_list); 403 rtnl_unlock(); 404 } 405 mutex_unlock(&net_mutex); 406 if (rv < 0) { 407 dec_net_namespaces(ucounts); 408 put_user_ns(user_ns); 409 net_drop_ns(net); 410 return ERR_PTR(rv); 411 } 412 return net; 413 } 414 415 static DEFINE_SPINLOCK(cleanup_list_lock); 416 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 417 418 static void cleanup_net(struct work_struct *work) 419 { 420 const struct pernet_operations *ops; 421 struct net *net, *tmp; 422 struct list_head net_kill_list; 423 LIST_HEAD(net_exit_list); 424 425 /* Atomically snapshot the list of namespaces to cleanup */ 426 spin_lock_irq(&cleanup_list_lock); 427 list_replace_init(&cleanup_list, &net_kill_list); 428 spin_unlock_irq(&cleanup_list_lock); 429 430 mutex_lock(&net_mutex); 431 432 /* Don't let anyone else find us. */ 433 rtnl_lock(); 434 list_for_each_entry(net, &net_kill_list, cleanup_list) { 435 list_del_rcu(&net->list); 436 list_add_tail(&net->exit_list, &net_exit_list); 437 for_each_net(tmp) { 438 int id; 439 440 spin_lock_bh(&tmp->nsid_lock); 441 id = __peernet2id(tmp, net); 442 if (id >= 0) 443 idr_remove(&tmp->netns_ids, id); 444 spin_unlock_bh(&tmp->nsid_lock); 445 if (id >= 0) 446 rtnl_net_notifyid(tmp, RTM_DELNSID, id); 447 } 448 spin_lock_bh(&net->nsid_lock); 449 idr_destroy(&net->netns_ids); 450 spin_unlock_bh(&net->nsid_lock); 451 452 } 453 rtnl_unlock(); 454 455 /* 456 * Another CPU might be rcu-iterating the list, wait for it. 457 * This needs to be before calling the exit() notifiers, so 458 * the rcu_barrier() below isn't sufficient alone. 459 */ 460 synchronize_rcu(); 461 462 /* Run all of the network namespace exit methods */ 463 list_for_each_entry_reverse(ops, &pernet_list, list) 464 ops_exit_list(ops, &net_exit_list); 465 466 /* Free the net generic variables */ 467 list_for_each_entry_reverse(ops, &pernet_list, list) 468 ops_free_list(ops, &net_exit_list); 469 470 mutex_unlock(&net_mutex); 471 472 /* Ensure there are no outstanding rcu callbacks using this 473 * network namespace. 474 */ 475 rcu_barrier(); 476 477 /* Finally it is safe to free my network namespace structure */ 478 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 479 list_del_init(&net->exit_list); 480 dec_net_namespaces(net->ucounts); 481 put_user_ns(net->user_ns); 482 net_drop_ns(net); 483 } 484 } 485 static DECLARE_WORK(net_cleanup_work, cleanup_net); 486 487 void __put_net(struct net *net) 488 { 489 /* Cleanup the network namespace in process context */ 490 unsigned long flags; 491 492 spin_lock_irqsave(&cleanup_list_lock, flags); 493 list_add(&net->cleanup_list, &cleanup_list); 494 spin_unlock_irqrestore(&cleanup_list_lock, flags); 495 496 queue_work(netns_wq, &net_cleanup_work); 497 } 498 EXPORT_SYMBOL_GPL(__put_net); 499 500 struct net *get_net_ns_by_fd(int fd) 501 { 502 struct file *file; 503 struct ns_common *ns; 504 struct net *net; 505 506 file = proc_ns_fget(fd); 507 if (IS_ERR(file)) 508 return ERR_CAST(file); 509 510 ns = get_proc_ns(file_inode(file)); 511 if (ns->ops == &netns_operations) 512 net = get_net(container_of(ns, struct net, ns)); 513 else 514 net = ERR_PTR(-EINVAL); 515 516 fput(file); 517 return net; 518 } 519 520 #else 521 struct net *get_net_ns_by_fd(int fd) 522 { 523 return ERR_PTR(-EINVAL); 524 } 525 #endif 526 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 527 528 struct net *get_net_ns_by_pid(pid_t pid) 529 { 530 struct task_struct *tsk; 531 struct net *net; 532 533 /* Lookup the network namespace */ 534 net = ERR_PTR(-ESRCH); 535 rcu_read_lock(); 536 tsk = find_task_by_vpid(pid); 537 if (tsk) { 538 struct nsproxy *nsproxy; 539 task_lock(tsk); 540 nsproxy = tsk->nsproxy; 541 if (nsproxy) 542 net = get_net(nsproxy->net_ns); 543 task_unlock(tsk); 544 } 545 rcu_read_unlock(); 546 return net; 547 } 548 EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 549 550 static __net_init int net_ns_net_init(struct net *net) 551 { 552 #ifdef CONFIG_NET_NS 553 net->ns.ops = &netns_operations; 554 #endif 555 return ns_alloc_inum(&net->ns); 556 } 557 558 static __net_exit void net_ns_net_exit(struct net *net) 559 { 560 ns_free_inum(&net->ns); 561 } 562 563 static struct pernet_operations __net_initdata net_ns_ops = { 564 .init = net_ns_net_init, 565 .exit = net_ns_net_exit, 566 }; 567 568 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { 569 [NETNSA_NONE] = { .type = NLA_UNSPEC }, 570 [NETNSA_NSID] = { .type = NLA_S32 }, 571 [NETNSA_PID] = { .type = NLA_U32 }, 572 [NETNSA_FD] = { .type = NLA_U32 }, 573 }; 574 575 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, 576 struct netlink_ext_ack *extack) 577 { 578 struct net *net = sock_net(skb->sk); 579 struct nlattr *tb[NETNSA_MAX + 1]; 580 struct net *peer; 581 int nsid, err; 582 583 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 584 rtnl_net_policy, extack); 585 if (err < 0) 586 return err; 587 if (!tb[NETNSA_NSID]) 588 return -EINVAL; 589 nsid = nla_get_s32(tb[NETNSA_NSID]); 590 591 if (tb[NETNSA_PID]) 592 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 593 else if (tb[NETNSA_FD]) 594 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 595 else 596 return -EINVAL; 597 if (IS_ERR(peer)) 598 return PTR_ERR(peer); 599 600 spin_lock_bh(&net->nsid_lock); 601 if (__peernet2id(net, peer) >= 0) { 602 spin_unlock_bh(&net->nsid_lock); 603 err = -EEXIST; 604 goto out; 605 } 606 607 err = alloc_netid(net, peer, nsid); 608 spin_unlock_bh(&net->nsid_lock); 609 if (err >= 0) { 610 rtnl_net_notifyid(net, RTM_NEWNSID, err); 611 err = 0; 612 } 613 out: 614 put_net(peer); 615 return err; 616 } 617 618 static int rtnl_net_get_size(void) 619 { 620 return NLMSG_ALIGN(sizeof(struct rtgenmsg)) 621 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ 622 ; 623 } 624 625 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, 626 int cmd, struct net *net, int nsid) 627 { 628 struct nlmsghdr *nlh; 629 struct rtgenmsg *rth; 630 631 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); 632 if (!nlh) 633 return -EMSGSIZE; 634 635 rth = nlmsg_data(nlh); 636 rth->rtgen_family = AF_UNSPEC; 637 638 if (nla_put_s32(skb, NETNSA_NSID, nsid)) 639 goto nla_put_failure; 640 641 nlmsg_end(skb, nlh); 642 return 0; 643 644 nla_put_failure: 645 nlmsg_cancel(skb, nlh); 646 return -EMSGSIZE; 647 } 648 649 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, 650 struct netlink_ext_ack *extack) 651 { 652 struct net *net = sock_net(skb->sk); 653 struct nlattr *tb[NETNSA_MAX + 1]; 654 struct sk_buff *msg; 655 struct net *peer; 656 int err, id; 657 658 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 659 rtnl_net_policy, extack); 660 if (err < 0) 661 return err; 662 if (tb[NETNSA_PID]) 663 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 664 else if (tb[NETNSA_FD]) 665 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 666 else 667 return -EINVAL; 668 669 if (IS_ERR(peer)) 670 return PTR_ERR(peer); 671 672 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 673 if (!msg) { 674 err = -ENOMEM; 675 goto out; 676 } 677 678 id = peernet2id(net, peer); 679 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 680 RTM_NEWNSID, net, id); 681 if (err < 0) 682 goto err_out; 683 684 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); 685 goto out; 686 687 err_out: 688 nlmsg_free(msg); 689 out: 690 put_net(peer); 691 return err; 692 } 693 694 struct rtnl_net_dump_cb { 695 struct net *net; 696 struct sk_buff *skb; 697 struct netlink_callback *cb; 698 int idx; 699 int s_idx; 700 }; 701 702 static int rtnl_net_dumpid_one(int id, void *peer, void *data) 703 { 704 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; 705 int ret; 706 707 if (net_cb->idx < net_cb->s_idx) 708 goto cont; 709 710 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, 711 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, 712 RTM_NEWNSID, net_cb->net, id); 713 if (ret < 0) 714 return ret; 715 716 cont: 717 net_cb->idx++; 718 return 0; 719 } 720 721 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) 722 { 723 struct net *net = sock_net(skb->sk); 724 struct rtnl_net_dump_cb net_cb = { 725 .net = net, 726 .skb = skb, 727 .cb = cb, 728 .idx = 0, 729 .s_idx = cb->args[0], 730 }; 731 732 spin_lock_bh(&net->nsid_lock); 733 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 734 spin_unlock_bh(&net->nsid_lock); 735 736 cb->args[0] = net_cb.idx; 737 return skb->len; 738 } 739 740 static void rtnl_net_notifyid(struct net *net, int cmd, int id) 741 { 742 struct sk_buff *msg; 743 int err = -ENOMEM; 744 745 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 746 if (!msg) 747 goto out; 748 749 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); 750 if (err < 0) 751 goto err_out; 752 753 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); 754 return; 755 756 err_out: 757 nlmsg_free(msg); 758 out: 759 rtnl_set_sk_err(net, RTNLGRP_NSID, err); 760 } 761 762 static int __init net_ns_init(void) 763 { 764 struct net_generic *ng; 765 766 #ifdef CONFIG_NET_NS 767 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 768 SMP_CACHE_BYTES, 769 SLAB_PANIC, NULL); 770 771 /* Create workqueue for cleanup */ 772 netns_wq = create_singlethread_workqueue("netns"); 773 if (!netns_wq) 774 panic("Could not create netns workq"); 775 #endif 776 777 ng = net_alloc_generic(); 778 if (!ng) 779 panic("Could not allocate generic netns"); 780 781 rcu_assign_pointer(init_net.gen, ng); 782 783 mutex_lock(&net_mutex); 784 if (setup_net(&init_net, &init_user_ns)) 785 panic("Could not setup the initial network namespace"); 786 787 init_net_initialized = true; 788 789 rtnl_lock(); 790 list_add_tail_rcu(&init_net.list, &net_namespace_list); 791 rtnl_unlock(); 792 793 mutex_unlock(&net_mutex); 794 795 register_pernet_subsys(&net_ns_ops); 796 797 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL); 798 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, 799 NULL); 800 801 return 0; 802 } 803 804 pure_initcall(net_ns_init); 805 806 #ifdef CONFIG_NET_NS 807 static int __register_pernet_operations(struct list_head *list, 808 struct pernet_operations *ops) 809 { 810 struct net *net; 811 int error; 812 LIST_HEAD(net_exit_list); 813 814 list_add_tail(&ops->list, list); 815 if (ops->init || (ops->id && ops->size)) { 816 for_each_net(net) { 817 error = ops_init(ops, net); 818 if (error) 819 goto out_undo; 820 list_add_tail(&net->exit_list, &net_exit_list); 821 } 822 } 823 return 0; 824 825 out_undo: 826 /* If I have an error cleanup all namespaces I initialized */ 827 list_del(&ops->list); 828 ops_exit_list(ops, &net_exit_list); 829 ops_free_list(ops, &net_exit_list); 830 return error; 831 } 832 833 static void __unregister_pernet_operations(struct pernet_operations *ops) 834 { 835 struct net *net; 836 LIST_HEAD(net_exit_list); 837 838 list_del(&ops->list); 839 for_each_net(net) 840 list_add_tail(&net->exit_list, &net_exit_list); 841 ops_exit_list(ops, &net_exit_list); 842 ops_free_list(ops, &net_exit_list); 843 } 844 845 #else 846 847 static int __register_pernet_operations(struct list_head *list, 848 struct pernet_operations *ops) 849 { 850 if (!init_net_initialized) { 851 list_add_tail(&ops->list, list); 852 return 0; 853 } 854 855 return ops_init(ops, &init_net); 856 } 857 858 static void __unregister_pernet_operations(struct pernet_operations *ops) 859 { 860 if (!init_net_initialized) { 861 list_del(&ops->list); 862 } else { 863 LIST_HEAD(net_exit_list); 864 list_add(&init_net.exit_list, &net_exit_list); 865 ops_exit_list(ops, &net_exit_list); 866 ops_free_list(ops, &net_exit_list); 867 } 868 } 869 870 #endif /* CONFIG_NET_NS */ 871 872 static DEFINE_IDA(net_generic_ids); 873 874 static int register_pernet_operations(struct list_head *list, 875 struct pernet_operations *ops) 876 { 877 int error; 878 879 if (ops->id) { 880 again: 881 error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id); 882 if (error < 0) { 883 if (error == -EAGAIN) { 884 ida_pre_get(&net_generic_ids, GFP_KERNEL); 885 goto again; 886 } 887 return error; 888 } 889 max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); 890 } 891 error = __register_pernet_operations(list, ops); 892 if (error) { 893 rcu_barrier(); 894 if (ops->id) 895 ida_remove(&net_generic_ids, *ops->id); 896 } 897 898 return error; 899 } 900 901 static void unregister_pernet_operations(struct pernet_operations *ops) 902 { 903 904 __unregister_pernet_operations(ops); 905 rcu_barrier(); 906 if (ops->id) 907 ida_remove(&net_generic_ids, *ops->id); 908 } 909 910 /** 911 * register_pernet_subsys - register a network namespace subsystem 912 * @ops: pernet operations structure for the subsystem 913 * 914 * Register a subsystem which has init and exit functions 915 * that are called when network namespaces are created and 916 * destroyed respectively. 917 * 918 * When registered all network namespace init functions are 919 * called for every existing network namespace. Allowing kernel 920 * modules to have a race free view of the set of network namespaces. 921 * 922 * When a new network namespace is created all of the init 923 * methods are called in the order in which they were registered. 924 * 925 * When a network namespace is destroyed all of the exit methods 926 * are called in the reverse of the order with which they were 927 * registered. 928 */ 929 int register_pernet_subsys(struct pernet_operations *ops) 930 { 931 int error; 932 mutex_lock(&net_mutex); 933 error = register_pernet_operations(first_device, ops); 934 mutex_unlock(&net_mutex); 935 return error; 936 } 937 EXPORT_SYMBOL_GPL(register_pernet_subsys); 938 939 /** 940 * unregister_pernet_subsys - unregister a network namespace subsystem 941 * @ops: pernet operations structure to manipulate 942 * 943 * Remove the pernet operations structure from the list to be 944 * used when network namespaces are created or destroyed. In 945 * addition run the exit method for all existing network 946 * namespaces. 947 */ 948 void unregister_pernet_subsys(struct pernet_operations *ops) 949 { 950 mutex_lock(&net_mutex); 951 unregister_pernet_operations(ops); 952 mutex_unlock(&net_mutex); 953 } 954 EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 955 956 /** 957 * register_pernet_device - register a network namespace device 958 * @ops: pernet operations structure for the subsystem 959 * 960 * Register a device which has init and exit functions 961 * that are called when network namespaces are created and 962 * destroyed respectively. 963 * 964 * When registered all network namespace init functions are 965 * called for every existing network namespace. Allowing kernel 966 * modules to have a race free view of the set of network namespaces. 967 * 968 * When a new network namespace is created all of the init 969 * methods are called in the order in which they were registered. 970 * 971 * When a network namespace is destroyed all of the exit methods 972 * are called in the reverse of the order with which they were 973 * registered. 974 */ 975 int register_pernet_device(struct pernet_operations *ops) 976 { 977 int error; 978 mutex_lock(&net_mutex); 979 error = register_pernet_operations(&pernet_list, ops); 980 if (!error && (first_device == &pernet_list)) 981 first_device = &ops->list; 982 mutex_unlock(&net_mutex); 983 return error; 984 } 985 EXPORT_SYMBOL_GPL(register_pernet_device); 986 987 /** 988 * unregister_pernet_device - unregister a network namespace netdevice 989 * @ops: pernet operations structure to manipulate 990 * 991 * Remove the pernet operations structure from the list to be 992 * used when network namespaces are created or destroyed. In 993 * addition run the exit method for all existing network 994 * namespaces. 995 */ 996 void unregister_pernet_device(struct pernet_operations *ops) 997 { 998 mutex_lock(&net_mutex); 999 if (&ops->list == first_device) 1000 first_device = first_device->next; 1001 unregister_pernet_operations(ops); 1002 mutex_unlock(&net_mutex); 1003 } 1004 EXPORT_SYMBOL_GPL(unregister_pernet_device); 1005 1006 #ifdef CONFIG_NET_NS 1007 static struct ns_common *netns_get(struct task_struct *task) 1008 { 1009 struct net *net = NULL; 1010 struct nsproxy *nsproxy; 1011 1012 task_lock(task); 1013 nsproxy = task->nsproxy; 1014 if (nsproxy) 1015 net = get_net(nsproxy->net_ns); 1016 task_unlock(task); 1017 1018 return net ? &net->ns : NULL; 1019 } 1020 1021 static inline struct net *to_net_ns(struct ns_common *ns) 1022 { 1023 return container_of(ns, struct net, ns); 1024 } 1025 1026 static void netns_put(struct ns_common *ns) 1027 { 1028 put_net(to_net_ns(ns)); 1029 } 1030 1031 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) 1032 { 1033 struct net *net = to_net_ns(ns); 1034 1035 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 1036 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 1037 return -EPERM; 1038 1039 put_net(nsproxy->net_ns); 1040 nsproxy->net_ns = get_net(net); 1041 return 0; 1042 } 1043 1044 static struct user_namespace *netns_owner(struct ns_common *ns) 1045 { 1046 return to_net_ns(ns)->user_ns; 1047 } 1048 1049 const struct proc_ns_operations netns_operations = { 1050 .name = "net", 1051 .type = CLONE_NEWNET, 1052 .get = netns_get, 1053 .put = netns_put, 1054 .install = netns_install, 1055 .owner = netns_owner, 1056 }; 1057 #endif 1058