1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/workqueue.h> 4 #include <linux/rtnetlink.h> 5 #include <linux/cache.h> 6 #include <linux/slab.h> 7 #include <linux/list.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/idr.h> 11 #include <linux/rculist.h> 12 #include <linux/nsproxy.h> 13 #include <linux/fs.h> 14 #include <linux/proc_ns.h> 15 #include <linux/file.h> 16 #include <linux/export.h> 17 #include <linux/user_namespace.h> 18 #include <linux/net_namespace.h> 19 #include <linux/sched/task.h> 20 21 #include <net/sock.h> 22 #include <net/netlink.h> 23 #include <net/net_namespace.h> 24 #include <net/netns/generic.h> 25 26 /* 27 * Our network namespace constructor/destructor lists 28 */ 29 30 static LIST_HEAD(pernet_list); 31 static struct list_head *first_device = &pernet_list; 32 DEFINE_MUTEX(net_mutex); 33 34 LIST_HEAD(net_namespace_list); 35 EXPORT_SYMBOL_GPL(net_namespace_list); 36 37 struct net init_net = { 38 .count = REFCOUNT_INIT(1), 39 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), 40 }; 41 EXPORT_SYMBOL(init_net); 42 43 static bool init_net_initialized; 44 45 #define MIN_PERNET_OPS_ID \ 46 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *)) 47 48 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 49 50 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; 51 52 static struct net_generic *net_alloc_generic(void) 53 { 54 struct net_generic *ng; 55 unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); 56 57 ng = kzalloc(generic_size, GFP_KERNEL); 58 if (ng) 59 ng->s.len = max_gen_ptrs; 60 61 return ng; 62 } 63 64 static int net_assign_generic(struct net *net, unsigned int id, void *data) 65 { 66 struct net_generic *ng, *old_ng; 67 68 BUG_ON(!mutex_is_locked(&net_mutex)); 69 BUG_ON(id < MIN_PERNET_OPS_ID); 70 71 old_ng = rcu_dereference_protected(net->gen, 72 lockdep_is_held(&net_mutex)); 73 if (old_ng->s.len > id) { 74 old_ng->ptr[id] = data; 75 return 0; 76 } 77 78 ng = net_alloc_generic(); 79 if (ng == NULL) 80 return -ENOMEM; 81 82 /* 83 * Some synchronisation notes: 84 * 85 * The net_generic explores the net->gen array inside rcu 86 * read section. Besides once set the net->gen->ptr[x] 87 * pointer never changes (see rules in netns/generic.h). 88 * 89 * That said, we simply duplicate this array and schedule 90 * the old copy for kfree after a grace period. 91 */ 92 93 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID], 94 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *)); 95 ng->ptr[id] = data; 96 97 rcu_assign_pointer(net->gen, ng); 98 kfree_rcu(old_ng, s.rcu); 99 return 0; 100 } 101 102 static int ops_init(const struct pernet_operations *ops, struct net *net) 103 { 104 int err = -ENOMEM; 105 void *data = NULL; 106 107 if (ops->id && ops->size) { 108 data = kzalloc(ops->size, GFP_KERNEL); 109 if (!data) 110 goto out; 111 112 err = net_assign_generic(net, *ops->id, data); 113 if (err) 114 goto cleanup; 115 } 116 err = 0; 117 if (ops->init) 118 err = ops->init(net); 119 if (!err) 120 return 0; 121 122 cleanup: 123 kfree(data); 124 125 out: 126 return err; 127 } 128 129 static void ops_free(const struct pernet_operations *ops, struct net *net) 130 { 131 if (ops->id && ops->size) { 132 kfree(net_generic(net, *ops->id)); 133 } 134 } 135 136 static void ops_exit_list(const struct pernet_operations *ops, 137 struct list_head *net_exit_list) 138 { 139 struct net *net; 140 if (ops->exit) { 141 list_for_each_entry(net, net_exit_list, exit_list) 142 ops->exit(net); 143 } 144 if (ops->exit_batch) 145 ops->exit_batch(net_exit_list); 146 } 147 148 static void ops_free_list(const struct pernet_operations *ops, 149 struct list_head *net_exit_list) 150 { 151 struct net *net; 152 if (ops->size && ops->id) { 153 list_for_each_entry(net, net_exit_list, exit_list) 154 ops_free(ops, net); 155 } 156 } 157 158 /* should be called with nsid_lock held */ 159 static int alloc_netid(struct net *net, struct net *peer, int reqid) 160 { 161 int min = 0, max = 0; 162 163 if (reqid >= 0) { 164 min = reqid; 165 max = reqid + 1; 166 } 167 168 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); 169 } 170 171 /* This function is used by idr_for_each(). If net is equal to peer, the 172 * function returns the id so that idr_for_each() stops. Because we cannot 173 * returns the id 0 (idr_for_each() will not stop), we return the magic value 174 * NET_ID_ZERO (-1) for it. 175 */ 176 #define NET_ID_ZERO -1 177 static int net_eq_idr(int id, void *net, void *peer) 178 { 179 if (net_eq(net, peer)) 180 return id ? : NET_ID_ZERO; 181 return 0; 182 } 183 184 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc 185 * is set to true, thus the caller knows that the new id must be notified via 186 * rtnl. 187 */ 188 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) 189 { 190 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); 191 bool alloc_it = *alloc; 192 193 *alloc = false; 194 195 /* Magic value for id 0. */ 196 if (id == NET_ID_ZERO) 197 return 0; 198 if (id > 0) 199 return id; 200 201 if (alloc_it) { 202 id = alloc_netid(net, peer, -1); 203 *alloc = true; 204 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 205 } 206 207 return NETNSA_NSID_NOT_ASSIGNED; 208 } 209 210 /* should be called with nsid_lock held */ 211 static int __peernet2id(struct net *net, struct net *peer) 212 { 213 bool no = false; 214 215 return __peernet2id_alloc(net, peer, &no); 216 } 217 218 static void rtnl_net_notifyid(struct net *net, int cmd, int id); 219 /* This function returns the id of a peer netns. If no id is assigned, one will 220 * be allocated and returned. 221 */ 222 int peernet2id_alloc(struct net *net, struct net *peer) 223 { 224 bool alloc = false, alive = false; 225 int id; 226 227 if (refcount_read(&net->count) == 0) 228 return NETNSA_NSID_NOT_ASSIGNED; 229 spin_lock_bh(&net->nsid_lock); 230 /* 231 * When peer is obtained from RCU lists, we may race with 232 * its cleanup. Check whether it's alive, and this guarantees 233 * we never hash a peer back to net->netns_ids, after it has 234 * just been idr_remove()'d from there in cleanup_net(). 235 */ 236 if (maybe_get_net(peer)) 237 alive = alloc = true; 238 id = __peernet2id_alloc(net, peer, &alloc); 239 spin_unlock_bh(&net->nsid_lock); 240 if (alloc && id >= 0) 241 rtnl_net_notifyid(net, RTM_NEWNSID, id); 242 if (alive) 243 put_net(peer); 244 return id; 245 } 246 EXPORT_SYMBOL_GPL(peernet2id_alloc); 247 248 /* This function returns, if assigned, the id of a peer netns. */ 249 int peernet2id(struct net *net, struct net *peer) 250 { 251 int id; 252 253 spin_lock_bh(&net->nsid_lock); 254 id = __peernet2id(net, peer); 255 spin_unlock_bh(&net->nsid_lock); 256 return id; 257 } 258 EXPORT_SYMBOL(peernet2id); 259 260 /* This function returns true is the peer netns has an id assigned into the 261 * current netns. 262 */ 263 bool peernet_has_id(struct net *net, struct net *peer) 264 { 265 return peernet2id(net, peer) >= 0; 266 } 267 268 struct net *get_net_ns_by_id(struct net *net, int id) 269 { 270 struct net *peer; 271 272 if (id < 0) 273 return NULL; 274 275 rcu_read_lock(); 276 spin_lock_bh(&net->nsid_lock); 277 peer = idr_find(&net->netns_ids, id); 278 if (peer) 279 peer = maybe_get_net(peer); 280 spin_unlock_bh(&net->nsid_lock); 281 rcu_read_unlock(); 282 283 return peer; 284 } 285 286 /* 287 * setup_net runs the initializers for the network namespace object. 288 */ 289 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) 290 { 291 /* Must be called with net_mutex held */ 292 const struct pernet_operations *ops, *saved_ops; 293 int error = 0; 294 LIST_HEAD(net_exit_list); 295 296 refcount_set(&net->count, 1); 297 refcount_set(&net->passive, 1); 298 net->dev_base_seq = 1; 299 net->user_ns = user_ns; 300 idr_init(&net->netns_ids); 301 spin_lock_init(&net->nsid_lock); 302 303 list_for_each_entry(ops, &pernet_list, list) { 304 error = ops_init(ops, net); 305 if (error < 0) 306 goto out_undo; 307 } 308 out: 309 return error; 310 311 out_undo: 312 /* Walk through the list backwards calling the exit functions 313 * for the pernet modules whose init functions did not fail. 314 */ 315 list_add(&net->exit_list, &net_exit_list); 316 saved_ops = ops; 317 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 318 ops_exit_list(ops, &net_exit_list); 319 320 ops = saved_ops; 321 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 322 ops_free_list(ops, &net_exit_list); 323 324 rcu_barrier(); 325 goto out; 326 } 327 328 static int __net_init net_defaults_init_net(struct net *net) 329 { 330 net->core.sysctl_somaxconn = SOMAXCONN; 331 return 0; 332 } 333 334 static struct pernet_operations net_defaults_ops = { 335 .init = net_defaults_init_net, 336 }; 337 338 static __init int net_defaults_init(void) 339 { 340 if (register_pernet_subsys(&net_defaults_ops)) 341 panic("Cannot initialize net default settings"); 342 343 return 0; 344 } 345 346 core_initcall(net_defaults_init); 347 348 #ifdef CONFIG_NET_NS 349 static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 350 { 351 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); 352 } 353 354 static void dec_net_namespaces(struct ucounts *ucounts) 355 { 356 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); 357 } 358 359 static struct kmem_cache *net_cachep; 360 static struct workqueue_struct *netns_wq; 361 362 static struct net *net_alloc(void) 363 { 364 struct net *net = NULL; 365 struct net_generic *ng; 366 367 ng = net_alloc_generic(); 368 if (!ng) 369 goto out; 370 371 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); 372 if (!net) 373 goto out_free; 374 375 rcu_assign_pointer(net->gen, ng); 376 out: 377 return net; 378 379 out_free: 380 kfree(ng); 381 goto out; 382 } 383 384 static void net_free(struct net *net) 385 { 386 kfree(rcu_access_pointer(net->gen)); 387 kmem_cache_free(net_cachep, net); 388 } 389 390 void net_drop_ns(void *p) 391 { 392 struct net *ns = p; 393 if (ns && refcount_dec_and_test(&ns->passive)) 394 net_free(ns); 395 } 396 397 struct net *copy_net_ns(unsigned long flags, 398 struct user_namespace *user_ns, struct net *old_net) 399 { 400 struct ucounts *ucounts; 401 struct net *net; 402 int rv; 403 404 if (!(flags & CLONE_NEWNET)) 405 return get_net(old_net); 406 407 ucounts = inc_net_namespaces(user_ns); 408 if (!ucounts) 409 return ERR_PTR(-ENOSPC); 410 411 net = net_alloc(); 412 if (!net) { 413 dec_net_namespaces(ucounts); 414 return ERR_PTR(-ENOMEM); 415 } 416 417 get_user_ns(user_ns); 418 419 rv = mutex_lock_killable(&net_mutex); 420 if (rv < 0) { 421 net_free(net); 422 dec_net_namespaces(ucounts); 423 put_user_ns(user_ns); 424 return ERR_PTR(rv); 425 } 426 427 net->ucounts = ucounts; 428 rv = setup_net(net, user_ns); 429 if (rv == 0) { 430 rtnl_lock(); 431 list_add_tail_rcu(&net->list, &net_namespace_list); 432 rtnl_unlock(); 433 } 434 mutex_unlock(&net_mutex); 435 if (rv < 0) { 436 dec_net_namespaces(ucounts); 437 put_user_ns(user_ns); 438 net_drop_ns(net); 439 return ERR_PTR(rv); 440 } 441 return net; 442 } 443 444 static DEFINE_SPINLOCK(cleanup_list_lock); 445 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 446 447 static void cleanup_net(struct work_struct *work) 448 { 449 const struct pernet_operations *ops; 450 struct net *net, *tmp; 451 struct list_head net_kill_list; 452 LIST_HEAD(net_exit_list); 453 454 /* Atomically snapshot the list of namespaces to cleanup */ 455 spin_lock_irq(&cleanup_list_lock); 456 list_replace_init(&cleanup_list, &net_kill_list); 457 spin_unlock_irq(&cleanup_list_lock); 458 459 mutex_lock(&net_mutex); 460 461 /* Don't let anyone else find us. */ 462 rtnl_lock(); 463 list_for_each_entry(net, &net_kill_list, cleanup_list) { 464 list_del_rcu(&net->list); 465 list_add_tail(&net->exit_list, &net_exit_list); 466 for_each_net(tmp) { 467 int id; 468 469 spin_lock_bh(&tmp->nsid_lock); 470 id = __peernet2id(tmp, net); 471 if (id >= 0) 472 idr_remove(&tmp->netns_ids, id); 473 spin_unlock_bh(&tmp->nsid_lock); 474 if (id >= 0) 475 rtnl_net_notifyid(tmp, RTM_DELNSID, id); 476 } 477 spin_lock_bh(&net->nsid_lock); 478 idr_destroy(&net->netns_ids); 479 spin_unlock_bh(&net->nsid_lock); 480 481 } 482 rtnl_unlock(); 483 484 /* 485 * Another CPU might be rcu-iterating the list, wait for it. 486 * This needs to be before calling the exit() notifiers, so 487 * the rcu_barrier() below isn't sufficient alone. 488 */ 489 synchronize_rcu(); 490 491 /* Run all of the network namespace exit methods */ 492 list_for_each_entry_reverse(ops, &pernet_list, list) 493 ops_exit_list(ops, &net_exit_list); 494 495 /* Free the net generic variables */ 496 list_for_each_entry_reverse(ops, &pernet_list, list) 497 ops_free_list(ops, &net_exit_list); 498 499 mutex_unlock(&net_mutex); 500 501 /* Ensure there are no outstanding rcu callbacks using this 502 * network namespace. 503 */ 504 rcu_barrier(); 505 506 /* Finally it is safe to free my network namespace structure */ 507 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 508 list_del_init(&net->exit_list); 509 dec_net_namespaces(net->ucounts); 510 put_user_ns(net->user_ns); 511 net_drop_ns(net); 512 } 513 } 514 515 /** 516 * net_ns_barrier - wait until concurrent net_cleanup_work is done 517 * 518 * cleanup_net runs from work queue and will first remove namespaces 519 * from the global list, then run net exit functions. 520 * 521 * Call this in module exit path to make sure that all netns 522 * ->exit ops have been invoked before the function is removed. 523 */ 524 void net_ns_barrier(void) 525 { 526 mutex_lock(&net_mutex); 527 mutex_unlock(&net_mutex); 528 } 529 EXPORT_SYMBOL(net_ns_barrier); 530 531 static DECLARE_WORK(net_cleanup_work, cleanup_net); 532 533 void __put_net(struct net *net) 534 { 535 /* Cleanup the network namespace in process context */ 536 unsigned long flags; 537 538 spin_lock_irqsave(&cleanup_list_lock, flags); 539 list_add(&net->cleanup_list, &cleanup_list); 540 spin_unlock_irqrestore(&cleanup_list_lock, flags); 541 542 queue_work(netns_wq, &net_cleanup_work); 543 } 544 EXPORT_SYMBOL_GPL(__put_net); 545 546 struct net *get_net_ns_by_fd(int fd) 547 { 548 struct file *file; 549 struct ns_common *ns; 550 struct net *net; 551 552 file = proc_ns_fget(fd); 553 if (IS_ERR(file)) 554 return ERR_CAST(file); 555 556 ns = get_proc_ns(file_inode(file)); 557 if (ns->ops == &netns_operations) 558 net = get_net(container_of(ns, struct net, ns)); 559 else 560 net = ERR_PTR(-EINVAL); 561 562 fput(file); 563 return net; 564 } 565 566 #else 567 struct net *get_net_ns_by_fd(int fd) 568 { 569 return ERR_PTR(-EINVAL); 570 } 571 #endif 572 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 573 574 struct net *get_net_ns_by_pid(pid_t pid) 575 { 576 struct task_struct *tsk; 577 struct net *net; 578 579 /* Lookup the network namespace */ 580 net = ERR_PTR(-ESRCH); 581 rcu_read_lock(); 582 tsk = find_task_by_vpid(pid); 583 if (tsk) { 584 struct nsproxy *nsproxy; 585 task_lock(tsk); 586 nsproxy = tsk->nsproxy; 587 if (nsproxy) 588 net = get_net(nsproxy->net_ns); 589 task_unlock(tsk); 590 } 591 rcu_read_unlock(); 592 return net; 593 } 594 EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 595 596 static __net_init int net_ns_net_init(struct net *net) 597 { 598 #ifdef CONFIG_NET_NS 599 net->ns.ops = &netns_operations; 600 #endif 601 return ns_alloc_inum(&net->ns); 602 } 603 604 static __net_exit void net_ns_net_exit(struct net *net) 605 { 606 ns_free_inum(&net->ns); 607 } 608 609 static struct pernet_operations __net_initdata net_ns_ops = { 610 .init = net_ns_net_init, 611 .exit = net_ns_net_exit, 612 }; 613 614 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { 615 [NETNSA_NONE] = { .type = NLA_UNSPEC }, 616 [NETNSA_NSID] = { .type = NLA_S32 }, 617 [NETNSA_PID] = { .type = NLA_U32 }, 618 [NETNSA_FD] = { .type = NLA_U32 }, 619 }; 620 621 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, 622 struct netlink_ext_ack *extack) 623 { 624 struct net *net = sock_net(skb->sk); 625 struct nlattr *tb[NETNSA_MAX + 1]; 626 struct nlattr *nla; 627 struct net *peer; 628 int nsid, err; 629 630 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 631 rtnl_net_policy, extack); 632 if (err < 0) 633 return err; 634 if (!tb[NETNSA_NSID]) { 635 NL_SET_ERR_MSG(extack, "nsid is missing"); 636 return -EINVAL; 637 } 638 nsid = nla_get_s32(tb[NETNSA_NSID]); 639 640 if (tb[NETNSA_PID]) { 641 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 642 nla = tb[NETNSA_PID]; 643 } else if (tb[NETNSA_FD]) { 644 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 645 nla = tb[NETNSA_FD]; 646 } else { 647 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 648 return -EINVAL; 649 } 650 if (IS_ERR(peer)) { 651 NL_SET_BAD_ATTR(extack, nla); 652 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 653 return PTR_ERR(peer); 654 } 655 656 spin_lock_bh(&net->nsid_lock); 657 if (__peernet2id(net, peer) >= 0) { 658 spin_unlock_bh(&net->nsid_lock); 659 err = -EEXIST; 660 NL_SET_BAD_ATTR(extack, nla); 661 NL_SET_ERR_MSG(extack, 662 "Peer netns already has a nsid assigned"); 663 goto out; 664 } 665 666 err = alloc_netid(net, peer, nsid); 667 spin_unlock_bh(&net->nsid_lock); 668 if (err >= 0) { 669 rtnl_net_notifyid(net, RTM_NEWNSID, err); 670 err = 0; 671 } else if (err == -ENOSPC && nsid >= 0) { 672 err = -EEXIST; 673 NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]); 674 NL_SET_ERR_MSG(extack, "The specified nsid is already used"); 675 } 676 out: 677 put_net(peer); 678 return err; 679 } 680 681 static int rtnl_net_get_size(void) 682 { 683 return NLMSG_ALIGN(sizeof(struct rtgenmsg)) 684 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ 685 ; 686 } 687 688 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, 689 int cmd, struct net *net, int nsid) 690 { 691 struct nlmsghdr *nlh; 692 struct rtgenmsg *rth; 693 694 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); 695 if (!nlh) 696 return -EMSGSIZE; 697 698 rth = nlmsg_data(nlh); 699 rth->rtgen_family = AF_UNSPEC; 700 701 if (nla_put_s32(skb, NETNSA_NSID, nsid)) 702 goto nla_put_failure; 703 704 nlmsg_end(skb, nlh); 705 return 0; 706 707 nla_put_failure: 708 nlmsg_cancel(skb, nlh); 709 return -EMSGSIZE; 710 } 711 712 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, 713 struct netlink_ext_ack *extack) 714 { 715 struct net *net = sock_net(skb->sk); 716 struct nlattr *tb[NETNSA_MAX + 1]; 717 struct nlattr *nla; 718 struct sk_buff *msg; 719 struct net *peer; 720 int err, id; 721 722 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 723 rtnl_net_policy, extack); 724 if (err < 0) 725 return err; 726 if (tb[NETNSA_PID]) { 727 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 728 nla = tb[NETNSA_PID]; 729 } else if (tb[NETNSA_FD]) { 730 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 731 nla = tb[NETNSA_FD]; 732 } else { 733 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 734 return -EINVAL; 735 } 736 737 if (IS_ERR(peer)) { 738 NL_SET_BAD_ATTR(extack, nla); 739 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 740 return PTR_ERR(peer); 741 } 742 743 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 744 if (!msg) { 745 err = -ENOMEM; 746 goto out; 747 } 748 749 id = peernet2id(net, peer); 750 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 751 RTM_NEWNSID, net, id); 752 if (err < 0) 753 goto err_out; 754 755 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); 756 goto out; 757 758 err_out: 759 nlmsg_free(msg); 760 out: 761 put_net(peer); 762 return err; 763 } 764 765 struct rtnl_net_dump_cb { 766 struct net *net; 767 struct sk_buff *skb; 768 struct netlink_callback *cb; 769 int idx; 770 int s_idx; 771 }; 772 773 static int rtnl_net_dumpid_one(int id, void *peer, void *data) 774 { 775 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; 776 int ret; 777 778 if (net_cb->idx < net_cb->s_idx) 779 goto cont; 780 781 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, 782 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, 783 RTM_NEWNSID, net_cb->net, id); 784 if (ret < 0) 785 return ret; 786 787 cont: 788 net_cb->idx++; 789 return 0; 790 } 791 792 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) 793 { 794 struct net *net = sock_net(skb->sk); 795 struct rtnl_net_dump_cb net_cb = { 796 .net = net, 797 .skb = skb, 798 .cb = cb, 799 .idx = 0, 800 .s_idx = cb->args[0], 801 }; 802 803 spin_lock_bh(&net->nsid_lock); 804 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 805 spin_unlock_bh(&net->nsid_lock); 806 807 cb->args[0] = net_cb.idx; 808 return skb->len; 809 } 810 811 static void rtnl_net_notifyid(struct net *net, int cmd, int id) 812 { 813 struct sk_buff *msg; 814 int err = -ENOMEM; 815 816 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 817 if (!msg) 818 goto out; 819 820 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); 821 if (err < 0) 822 goto err_out; 823 824 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); 825 return; 826 827 err_out: 828 nlmsg_free(msg); 829 out: 830 rtnl_set_sk_err(net, RTNLGRP_NSID, err); 831 } 832 833 static int __init net_ns_init(void) 834 { 835 struct net_generic *ng; 836 837 #ifdef CONFIG_NET_NS 838 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 839 SMP_CACHE_BYTES, 840 SLAB_PANIC, NULL); 841 842 /* Create workqueue for cleanup */ 843 netns_wq = create_singlethread_workqueue("netns"); 844 if (!netns_wq) 845 panic("Could not create netns workq"); 846 #endif 847 848 ng = net_alloc_generic(); 849 if (!ng) 850 panic("Could not allocate generic netns"); 851 852 rcu_assign_pointer(init_net.gen, ng); 853 854 mutex_lock(&net_mutex); 855 if (setup_net(&init_net, &init_user_ns)) 856 panic("Could not setup the initial network namespace"); 857 858 init_net_initialized = true; 859 860 rtnl_lock(); 861 list_add_tail_rcu(&init_net.list, &net_namespace_list); 862 rtnl_unlock(); 863 864 mutex_unlock(&net_mutex); 865 866 register_pernet_subsys(&net_ns_ops); 867 868 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, 869 RTNL_FLAG_DOIT_UNLOCKED); 870 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, 871 RTNL_FLAG_DOIT_UNLOCKED); 872 873 return 0; 874 } 875 876 pure_initcall(net_ns_init); 877 878 #ifdef CONFIG_NET_NS 879 static int __register_pernet_operations(struct list_head *list, 880 struct pernet_operations *ops) 881 { 882 struct net *net; 883 int error; 884 LIST_HEAD(net_exit_list); 885 886 list_add_tail(&ops->list, list); 887 if (ops->init || (ops->id && ops->size)) { 888 for_each_net(net) { 889 error = ops_init(ops, net); 890 if (error) 891 goto out_undo; 892 list_add_tail(&net->exit_list, &net_exit_list); 893 } 894 } 895 return 0; 896 897 out_undo: 898 /* If I have an error cleanup all namespaces I initialized */ 899 list_del(&ops->list); 900 ops_exit_list(ops, &net_exit_list); 901 ops_free_list(ops, &net_exit_list); 902 return error; 903 } 904 905 static void __unregister_pernet_operations(struct pernet_operations *ops) 906 { 907 struct net *net; 908 LIST_HEAD(net_exit_list); 909 910 list_del(&ops->list); 911 for_each_net(net) 912 list_add_tail(&net->exit_list, &net_exit_list); 913 ops_exit_list(ops, &net_exit_list); 914 ops_free_list(ops, &net_exit_list); 915 } 916 917 #else 918 919 static int __register_pernet_operations(struct list_head *list, 920 struct pernet_operations *ops) 921 { 922 if (!init_net_initialized) { 923 list_add_tail(&ops->list, list); 924 return 0; 925 } 926 927 return ops_init(ops, &init_net); 928 } 929 930 static void __unregister_pernet_operations(struct pernet_operations *ops) 931 { 932 if (!init_net_initialized) { 933 list_del(&ops->list); 934 } else { 935 LIST_HEAD(net_exit_list); 936 list_add(&init_net.exit_list, &net_exit_list); 937 ops_exit_list(ops, &net_exit_list); 938 ops_free_list(ops, &net_exit_list); 939 } 940 } 941 942 #endif /* CONFIG_NET_NS */ 943 944 static DEFINE_IDA(net_generic_ids); 945 946 static int register_pernet_operations(struct list_head *list, 947 struct pernet_operations *ops) 948 { 949 int error; 950 951 if (ops->id) { 952 again: 953 error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id); 954 if (error < 0) { 955 if (error == -EAGAIN) { 956 ida_pre_get(&net_generic_ids, GFP_KERNEL); 957 goto again; 958 } 959 return error; 960 } 961 max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); 962 } 963 error = __register_pernet_operations(list, ops); 964 if (error) { 965 rcu_barrier(); 966 if (ops->id) 967 ida_remove(&net_generic_ids, *ops->id); 968 } 969 970 return error; 971 } 972 973 static void unregister_pernet_operations(struct pernet_operations *ops) 974 { 975 976 __unregister_pernet_operations(ops); 977 rcu_barrier(); 978 if (ops->id) 979 ida_remove(&net_generic_ids, *ops->id); 980 } 981 982 /** 983 * register_pernet_subsys - register a network namespace subsystem 984 * @ops: pernet operations structure for the subsystem 985 * 986 * Register a subsystem which has init and exit functions 987 * that are called when network namespaces are created and 988 * destroyed respectively. 989 * 990 * When registered all network namespace init functions are 991 * called for every existing network namespace. Allowing kernel 992 * modules to have a race free view of the set of network namespaces. 993 * 994 * When a new network namespace is created all of the init 995 * methods are called in the order in which they were registered. 996 * 997 * When a network namespace is destroyed all of the exit methods 998 * are called in the reverse of the order with which they were 999 * registered. 1000 */ 1001 int register_pernet_subsys(struct pernet_operations *ops) 1002 { 1003 int error; 1004 mutex_lock(&net_mutex); 1005 error = register_pernet_operations(first_device, ops); 1006 mutex_unlock(&net_mutex); 1007 return error; 1008 } 1009 EXPORT_SYMBOL_GPL(register_pernet_subsys); 1010 1011 /** 1012 * unregister_pernet_subsys - unregister a network namespace subsystem 1013 * @ops: pernet operations structure to manipulate 1014 * 1015 * Remove the pernet operations structure from the list to be 1016 * used when network namespaces are created or destroyed. In 1017 * addition run the exit method for all existing network 1018 * namespaces. 1019 */ 1020 void unregister_pernet_subsys(struct pernet_operations *ops) 1021 { 1022 mutex_lock(&net_mutex); 1023 unregister_pernet_operations(ops); 1024 mutex_unlock(&net_mutex); 1025 } 1026 EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 1027 1028 /** 1029 * register_pernet_device - register a network namespace device 1030 * @ops: pernet operations structure for the subsystem 1031 * 1032 * Register a device which has init and exit functions 1033 * that are called when network namespaces are created and 1034 * destroyed respectively. 1035 * 1036 * When registered all network namespace init functions are 1037 * called for every existing network namespace. Allowing kernel 1038 * modules to have a race free view of the set of network namespaces. 1039 * 1040 * When a new network namespace is created all of the init 1041 * methods are called in the order in which they were registered. 1042 * 1043 * When a network namespace is destroyed all of the exit methods 1044 * are called in the reverse of the order with which they were 1045 * registered. 1046 */ 1047 int register_pernet_device(struct pernet_operations *ops) 1048 { 1049 int error; 1050 mutex_lock(&net_mutex); 1051 error = register_pernet_operations(&pernet_list, ops); 1052 if (!error && (first_device == &pernet_list)) 1053 first_device = &ops->list; 1054 mutex_unlock(&net_mutex); 1055 return error; 1056 } 1057 EXPORT_SYMBOL_GPL(register_pernet_device); 1058 1059 /** 1060 * unregister_pernet_device - unregister a network namespace netdevice 1061 * @ops: pernet operations structure to manipulate 1062 * 1063 * Remove the pernet operations structure from the list to be 1064 * used when network namespaces are created or destroyed. In 1065 * addition run the exit method for all existing network 1066 * namespaces. 1067 */ 1068 void unregister_pernet_device(struct pernet_operations *ops) 1069 { 1070 mutex_lock(&net_mutex); 1071 if (&ops->list == first_device) 1072 first_device = first_device->next; 1073 unregister_pernet_operations(ops); 1074 mutex_unlock(&net_mutex); 1075 } 1076 EXPORT_SYMBOL_GPL(unregister_pernet_device); 1077 1078 #ifdef CONFIG_NET_NS 1079 static struct ns_common *netns_get(struct task_struct *task) 1080 { 1081 struct net *net = NULL; 1082 struct nsproxy *nsproxy; 1083 1084 task_lock(task); 1085 nsproxy = task->nsproxy; 1086 if (nsproxy) 1087 net = get_net(nsproxy->net_ns); 1088 task_unlock(task); 1089 1090 return net ? &net->ns : NULL; 1091 } 1092 1093 static inline struct net *to_net_ns(struct ns_common *ns) 1094 { 1095 return container_of(ns, struct net, ns); 1096 } 1097 1098 static void netns_put(struct ns_common *ns) 1099 { 1100 put_net(to_net_ns(ns)); 1101 } 1102 1103 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) 1104 { 1105 struct net *net = to_net_ns(ns); 1106 1107 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 1108 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 1109 return -EPERM; 1110 1111 put_net(nsproxy->net_ns); 1112 nsproxy->net_ns = get_net(net); 1113 return 0; 1114 } 1115 1116 static struct user_namespace *netns_owner(struct ns_common *ns) 1117 { 1118 return to_net_ns(ns)->user_ns; 1119 } 1120 1121 const struct proc_ns_operations netns_operations = { 1122 .name = "net", 1123 .type = CLONE_NEWNET, 1124 .get = netns_get, 1125 .put = netns_put, 1126 .install = netns_install, 1127 .owner = netns_owner, 1128 }; 1129 #endif 1130