1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/workqueue.h> 4 #include <linux/rtnetlink.h> 5 #include <linux/cache.h> 6 #include <linux/slab.h> 7 #include <linux/list.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/idr.h> 11 #include <linux/rculist.h> 12 #include <linux/nsproxy.h> 13 #include <linux/fs.h> 14 #include <linux/proc_ns.h> 15 #include <linux/file.h> 16 #include <linux/export.h> 17 #include <linux/user_namespace.h> 18 #include <linux/net_namespace.h> 19 #include <linux/sched/task.h> 20 21 #include <net/sock.h> 22 #include <net/netlink.h> 23 #include <net/net_namespace.h> 24 #include <net/netns/generic.h> 25 26 /* 27 * Our network namespace constructor/destructor lists 28 */ 29 30 static LIST_HEAD(pernet_list); 31 static struct list_head *first_device = &pernet_list; 32 DEFINE_MUTEX(net_mutex); 33 34 LIST_HEAD(net_namespace_list); 35 EXPORT_SYMBOL_GPL(net_namespace_list); 36 37 struct net init_net = { 38 .count = REFCOUNT_INIT(1), 39 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), 40 }; 41 EXPORT_SYMBOL(init_net); 42 43 static bool init_net_initialized; 44 45 #define MIN_PERNET_OPS_ID \ 46 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *)) 47 48 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 49 50 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; 51 52 static struct net_generic *net_alloc_generic(void) 53 { 54 struct net_generic *ng; 55 unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); 56 57 ng = kzalloc(generic_size, GFP_KERNEL); 58 if (ng) 59 ng->s.len = max_gen_ptrs; 60 61 return ng; 62 } 63 64 static int net_assign_generic(struct net *net, unsigned int id, void *data) 65 { 66 struct net_generic *ng, *old_ng; 67 68 BUG_ON(!mutex_is_locked(&net_mutex)); 69 BUG_ON(id < MIN_PERNET_OPS_ID); 70 71 old_ng = rcu_dereference_protected(net->gen, 72 lockdep_is_held(&net_mutex)); 73 if (old_ng->s.len > id) { 74 old_ng->ptr[id] = data; 75 return 0; 76 } 77 78 ng = net_alloc_generic(); 79 if (ng == NULL) 80 return -ENOMEM; 81 82 /* 83 * Some synchronisation notes: 84 * 85 * The net_generic explores the net->gen array inside rcu 86 * read section. Besides once set the net->gen->ptr[x] 87 * pointer never changes (see rules in netns/generic.h). 88 * 89 * That said, we simply duplicate this array and schedule 90 * the old copy for kfree after a grace period. 91 */ 92 93 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID], 94 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *)); 95 ng->ptr[id] = data; 96 97 rcu_assign_pointer(net->gen, ng); 98 kfree_rcu(old_ng, s.rcu); 99 return 0; 100 } 101 102 static int ops_init(const struct pernet_operations *ops, struct net *net) 103 { 104 int err = -ENOMEM; 105 void *data = NULL; 106 107 if (ops->id && ops->size) { 108 data = kzalloc(ops->size, GFP_KERNEL); 109 if (!data) 110 goto out; 111 112 err = net_assign_generic(net, *ops->id, data); 113 if (err) 114 goto cleanup; 115 } 116 err = 0; 117 if (ops->init) 118 err = ops->init(net); 119 if (!err) 120 return 0; 121 122 cleanup: 123 kfree(data); 124 125 out: 126 return err; 127 } 128 129 static void ops_free(const struct pernet_operations *ops, struct net *net) 130 { 131 if (ops->id && ops->size) { 132 kfree(net_generic(net, *ops->id)); 133 } 134 } 135 136 static void ops_exit_list(const struct pernet_operations *ops, 137 struct list_head *net_exit_list) 138 { 139 struct net *net; 140 if (ops->exit) { 141 list_for_each_entry(net, net_exit_list, exit_list) 142 ops->exit(net); 143 } 144 if (ops->exit_batch) 145 ops->exit_batch(net_exit_list); 146 } 147 148 static void ops_free_list(const struct pernet_operations *ops, 149 struct list_head *net_exit_list) 150 { 151 struct net *net; 152 if (ops->size && ops->id) { 153 list_for_each_entry(net, net_exit_list, exit_list) 154 ops_free(ops, net); 155 } 156 } 157 158 /* should be called with nsid_lock held */ 159 static int alloc_netid(struct net *net, struct net *peer, int reqid) 160 { 161 int min = 0, max = 0; 162 163 if (reqid >= 0) { 164 min = reqid; 165 max = reqid + 1; 166 } 167 168 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); 169 } 170 171 /* This function is used by idr_for_each(). If net is equal to peer, the 172 * function returns the id so that idr_for_each() stops. Because we cannot 173 * returns the id 0 (idr_for_each() will not stop), we return the magic value 174 * NET_ID_ZERO (-1) for it. 175 */ 176 #define NET_ID_ZERO -1 177 static int net_eq_idr(int id, void *net, void *peer) 178 { 179 if (net_eq(net, peer)) 180 return id ? : NET_ID_ZERO; 181 return 0; 182 } 183 184 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc 185 * is set to true, thus the caller knows that the new id must be notified via 186 * rtnl. 187 */ 188 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) 189 { 190 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); 191 bool alloc_it = *alloc; 192 193 *alloc = false; 194 195 /* Magic value for id 0. */ 196 if (id == NET_ID_ZERO) 197 return 0; 198 if (id > 0) 199 return id; 200 201 if (alloc_it) { 202 id = alloc_netid(net, peer, -1); 203 *alloc = true; 204 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 205 } 206 207 return NETNSA_NSID_NOT_ASSIGNED; 208 } 209 210 /* should be called with nsid_lock held */ 211 static int __peernet2id(struct net *net, struct net *peer) 212 { 213 bool no = false; 214 215 return __peernet2id_alloc(net, peer, &no); 216 } 217 218 static void rtnl_net_notifyid(struct net *net, int cmd, int id); 219 /* This function returns the id of a peer netns. If no id is assigned, one will 220 * be allocated and returned. 221 */ 222 int peernet2id_alloc(struct net *net, struct net *peer) 223 { 224 bool alloc = false, alive = false; 225 int id; 226 227 if (refcount_read(&net->count) == 0) 228 return NETNSA_NSID_NOT_ASSIGNED; 229 spin_lock_bh(&net->nsid_lock); 230 /* 231 * When peer is obtained from RCU lists, we may race with 232 * its cleanup. Check whether it's alive, and this guarantees 233 * we never hash a peer back to net->netns_ids, after it has 234 * just been idr_remove()'d from there in cleanup_net(). 235 */ 236 if (maybe_get_net(peer)) 237 alive = alloc = true; 238 id = __peernet2id_alloc(net, peer, &alloc); 239 spin_unlock_bh(&net->nsid_lock); 240 if (alloc && id >= 0) 241 rtnl_net_notifyid(net, RTM_NEWNSID, id); 242 if (alive) 243 put_net(peer); 244 return id; 245 } 246 EXPORT_SYMBOL_GPL(peernet2id_alloc); 247 248 /* This function returns, if assigned, the id of a peer netns. */ 249 int peernet2id(struct net *net, struct net *peer) 250 { 251 int id; 252 253 spin_lock_bh(&net->nsid_lock); 254 id = __peernet2id(net, peer); 255 spin_unlock_bh(&net->nsid_lock); 256 return id; 257 } 258 EXPORT_SYMBOL(peernet2id); 259 260 /* This function returns true is the peer netns has an id assigned into the 261 * current netns. 262 */ 263 bool peernet_has_id(struct net *net, struct net *peer) 264 { 265 return peernet2id(net, peer) >= 0; 266 } 267 268 struct net *get_net_ns_by_id(struct net *net, int id) 269 { 270 struct net *peer; 271 272 if (id < 0) 273 return NULL; 274 275 rcu_read_lock(); 276 peer = idr_find(&net->netns_ids, id); 277 if (peer) 278 peer = maybe_get_net(peer); 279 rcu_read_unlock(); 280 281 return peer; 282 } 283 284 /* 285 * setup_net runs the initializers for the network namespace object. 286 */ 287 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) 288 { 289 /* Must be called with net_mutex held */ 290 const struct pernet_operations *ops, *saved_ops; 291 int error = 0; 292 LIST_HEAD(net_exit_list); 293 294 refcount_set(&net->count, 1); 295 refcount_set(&net->passive, 1); 296 net->dev_base_seq = 1; 297 net->user_ns = user_ns; 298 idr_init(&net->netns_ids); 299 spin_lock_init(&net->nsid_lock); 300 301 list_for_each_entry(ops, &pernet_list, list) { 302 error = ops_init(ops, net); 303 if (error < 0) 304 goto out_undo; 305 } 306 rtnl_lock(); 307 list_add_tail_rcu(&net->list, &net_namespace_list); 308 rtnl_unlock(); 309 out: 310 return error; 311 312 out_undo: 313 /* Walk through the list backwards calling the exit functions 314 * for the pernet modules whose init functions did not fail. 315 */ 316 list_add(&net->exit_list, &net_exit_list); 317 saved_ops = ops; 318 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 319 ops_exit_list(ops, &net_exit_list); 320 321 ops = saved_ops; 322 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 323 ops_free_list(ops, &net_exit_list); 324 325 rcu_barrier(); 326 goto out; 327 } 328 329 static int __net_init net_defaults_init_net(struct net *net) 330 { 331 net->core.sysctl_somaxconn = SOMAXCONN; 332 return 0; 333 } 334 335 static struct pernet_operations net_defaults_ops = { 336 .init = net_defaults_init_net, 337 }; 338 339 static __init int net_defaults_init(void) 340 { 341 if (register_pernet_subsys(&net_defaults_ops)) 342 panic("Cannot initialize net default settings"); 343 344 return 0; 345 } 346 347 core_initcall(net_defaults_init); 348 349 #ifdef CONFIG_NET_NS 350 static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 351 { 352 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); 353 } 354 355 static void dec_net_namespaces(struct ucounts *ucounts) 356 { 357 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); 358 } 359 360 static struct kmem_cache *net_cachep; 361 static struct workqueue_struct *netns_wq; 362 363 static struct net *net_alloc(void) 364 { 365 struct net *net = NULL; 366 struct net_generic *ng; 367 368 ng = net_alloc_generic(); 369 if (!ng) 370 goto out; 371 372 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); 373 if (!net) 374 goto out_free; 375 376 rcu_assign_pointer(net->gen, ng); 377 out: 378 return net; 379 380 out_free: 381 kfree(ng); 382 goto out; 383 } 384 385 static void net_free(struct net *net) 386 { 387 kfree(rcu_access_pointer(net->gen)); 388 kmem_cache_free(net_cachep, net); 389 } 390 391 void net_drop_ns(void *p) 392 { 393 struct net *ns = p; 394 if (ns && refcount_dec_and_test(&ns->passive)) 395 net_free(ns); 396 } 397 398 struct net *copy_net_ns(unsigned long flags, 399 struct user_namespace *user_ns, struct net *old_net) 400 { 401 struct ucounts *ucounts; 402 struct net *net; 403 int rv; 404 405 if (!(flags & CLONE_NEWNET)) 406 return get_net(old_net); 407 408 ucounts = inc_net_namespaces(user_ns); 409 if (!ucounts) 410 return ERR_PTR(-ENOSPC); 411 412 net = net_alloc(); 413 if (!net) { 414 dec_net_namespaces(ucounts); 415 return ERR_PTR(-ENOMEM); 416 } 417 418 get_user_ns(user_ns); 419 420 rv = mutex_lock_killable(&net_mutex); 421 if (rv < 0) { 422 net_free(net); 423 dec_net_namespaces(ucounts); 424 put_user_ns(user_ns); 425 return ERR_PTR(rv); 426 } 427 428 net->ucounts = ucounts; 429 rv = setup_net(net, user_ns); 430 mutex_unlock(&net_mutex); 431 if (rv < 0) { 432 dec_net_namespaces(ucounts); 433 put_user_ns(user_ns); 434 net_drop_ns(net); 435 return ERR_PTR(rv); 436 } 437 return net; 438 } 439 440 static void unhash_nsid(struct net *net, struct net *last) 441 { 442 struct net *tmp; 443 /* This function is only called from cleanup_net() work, 444 * and this work is the only process, that may delete 445 * a net from net_namespace_list. So, when the below 446 * is executing, the list may only grow. Thus, we do not 447 * use for_each_net_rcu() or rtnl_lock(). 448 */ 449 for_each_net(tmp) { 450 int id; 451 452 spin_lock_bh(&tmp->nsid_lock); 453 id = __peernet2id(tmp, net); 454 if (id >= 0) 455 idr_remove(&tmp->netns_ids, id); 456 spin_unlock_bh(&tmp->nsid_lock); 457 if (id >= 0) 458 rtnl_net_notifyid(tmp, RTM_DELNSID, id); 459 if (tmp == last) 460 break; 461 } 462 spin_lock_bh(&net->nsid_lock); 463 idr_destroy(&net->netns_ids); 464 spin_unlock_bh(&net->nsid_lock); 465 } 466 467 static DEFINE_SPINLOCK(cleanup_list_lock); 468 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 469 470 static void cleanup_net(struct work_struct *work) 471 { 472 const struct pernet_operations *ops; 473 struct net *net, *tmp, *last; 474 struct list_head net_kill_list; 475 LIST_HEAD(net_exit_list); 476 477 /* Atomically snapshot the list of namespaces to cleanup */ 478 spin_lock_irq(&cleanup_list_lock); 479 list_replace_init(&cleanup_list, &net_kill_list); 480 spin_unlock_irq(&cleanup_list_lock); 481 482 mutex_lock(&net_mutex); 483 484 /* Don't let anyone else find us. */ 485 rtnl_lock(); 486 list_for_each_entry(net, &net_kill_list, cleanup_list) 487 list_del_rcu(&net->list); 488 /* Cache last net. After we unlock rtnl, no one new net 489 * added to net_namespace_list can assign nsid pointer 490 * to a net from net_kill_list (see peernet2id_alloc()). 491 * So, we skip them in unhash_nsid(). 492 * 493 * Note, that unhash_nsid() does not delete nsid links 494 * between net_kill_list's nets, as they've already 495 * deleted from net_namespace_list. But, this would be 496 * useless anyway, as netns_ids are destroyed there. 497 */ 498 last = list_last_entry(&net_namespace_list, struct net, list); 499 rtnl_unlock(); 500 501 list_for_each_entry(net, &net_kill_list, cleanup_list) { 502 unhash_nsid(net, last); 503 list_add_tail(&net->exit_list, &net_exit_list); 504 } 505 506 /* 507 * Another CPU might be rcu-iterating the list, wait for it. 508 * This needs to be before calling the exit() notifiers, so 509 * the rcu_barrier() below isn't sufficient alone. 510 */ 511 synchronize_rcu(); 512 513 /* Run all of the network namespace exit methods */ 514 list_for_each_entry_reverse(ops, &pernet_list, list) 515 ops_exit_list(ops, &net_exit_list); 516 517 /* Free the net generic variables */ 518 list_for_each_entry_reverse(ops, &pernet_list, list) 519 ops_free_list(ops, &net_exit_list); 520 521 mutex_unlock(&net_mutex); 522 523 /* Ensure there are no outstanding rcu callbacks using this 524 * network namespace. 525 */ 526 rcu_barrier(); 527 528 /* Finally it is safe to free my network namespace structure */ 529 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 530 list_del_init(&net->exit_list); 531 dec_net_namespaces(net->ucounts); 532 put_user_ns(net->user_ns); 533 net_drop_ns(net); 534 } 535 } 536 537 /** 538 * net_ns_barrier - wait until concurrent net_cleanup_work is done 539 * 540 * cleanup_net runs from work queue and will first remove namespaces 541 * from the global list, then run net exit functions. 542 * 543 * Call this in module exit path to make sure that all netns 544 * ->exit ops have been invoked before the function is removed. 545 */ 546 void net_ns_barrier(void) 547 { 548 mutex_lock(&net_mutex); 549 mutex_unlock(&net_mutex); 550 } 551 EXPORT_SYMBOL(net_ns_barrier); 552 553 static DECLARE_WORK(net_cleanup_work, cleanup_net); 554 555 void __put_net(struct net *net) 556 { 557 /* Cleanup the network namespace in process context */ 558 unsigned long flags; 559 560 spin_lock_irqsave(&cleanup_list_lock, flags); 561 list_add(&net->cleanup_list, &cleanup_list); 562 spin_unlock_irqrestore(&cleanup_list_lock, flags); 563 564 queue_work(netns_wq, &net_cleanup_work); 565 } 566 EXPORT_SYMBOL_GPL(__put_net); 567 568 struct net *get_net_ns_by_fd(int fd) 569 { 570 struct file *file; 571 struct ns_common *ns; 572 struct net *net; 573 574 file = proc_ns_fget(fd); 575 if (IS_ERR(file)) 576 return ERR_CAST(file); 577 578 ns = get_proc_ns(file_inode(file)); 579 if (ns->ops == &netns_operations) 580 net = get_net(container_of(ns, struct net, ns)); 581 else 582 net = ERR_PTR(-EINVAL); 583 584 fput(file); 585 return net; 586 } 587 588 #else 589 struct net *get_net_ns_by_fd(int fd) 590 { 591 return ERR_PTR(-EINVAL); 592 } 593 #endif 594 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 595 596 struct net *get_net_ns_by_pid(pid_t pid) 597 { 598 struct task_struct *tsk; 599 struct net *net; 600 601 /* Lookup the network namespace */ 602 net = ERR_PTR(-ESRCH); 603 rcu_read_lock(); 604 tsk = find_task_by_vpid(pid); 605 if (tsk) { 606 struct nsproxy *nsproxy; 607 task_lock(tsk); 608 nsproxy = tsk->nsproxy; 609 if (nsproxy) 610 net = get_net(nsproxy->net_ns); 611 task_unlock(tsk); 612 } 613 rcu_read_unlock(); 614 return net; 615 } 616 EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 617 618 static __net_init int net_ns_net_init(struct net *net) 619 { 620 #ifdef CONFIG_NET_NS 621 net->ns.ops = &netns_operations; 622 #endif 623 return ns_alloc_inum(&net->ns); 624 } 625 626 static __net_exit void net_ns_net_exit(struct net *net) 627 { 628 ns_free_inum(&net->ns); 629 } 630 631 static struct pernet_operations __net_initdata net_ns_ops = { 632 .init = net_ns_net_init, 633 .exit = net_ns_net_exit, 634 }; 635 636 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { 637 [NETNSA_NONE] = { .type = NLA_UNSPEC }, 638 [NETNSA_NSID] = { .type = NLA_S32 }, 639 [NETNSA_PID] = { .type = NLA_U32 }, 640 [NETNSA_FD] = { .type = NLA_U32 }, 641 }; 642 643 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, 644 struct netlink_ext_ack *extack) 645 { 646 struct net *net = sock_net(skb->sk); 647 struct nlattr *tb[NETNSA_MAX + 1]; 648 struct nlattr *nla; 649 struct net *peer; 650 int nsid, err; 651 652 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 653 rtnl_net_policy, extack); 654 if (err < 0) 655 return err; 656 if (!tb[NETNSA_NSID]) { 657 NL_SET_ERR_MSG(extack, "nsid is missing"); 658 return -EINVAL; 659 } 660 nsid = nla_get_s32(tb[NETNSA_NSID]); 661 662 if (tb[NETNSA_PID]) { 663 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 664 nla = tb[NETNSA_PID]; 665 } else if (tb[NETNSA_FD]) { 666 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 667 nla = tb[NETNSA_FD]; 668 } else { 669 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 670 return -EINVAL; 671 } 672 if (IS_ERR(peer)) { 673 NL_SET_BAD_ATTR(extack, nla); 674 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 675 return PTR_ERR(peer); 676 } 677 678 spin_lock_bh(&net->nsid_lock); 679 if (__peernet2id(net, peer) >= 0) { 680 spin_unlock_bh(&net->nsid_lock); 681 err = -EEXIST; 682 NL_SET_BAD_ATTR(extack, nla); 683 NL_SET_ERR_MSG(extack, 684 "Peer netns already has a nsid assigned"); 685 goto out; 686 } 687 688 err = alloc_netid(net, peer, nsid); 689 spin_unlock_bh(&net->nsid_lock); 690 if (err >= 0) { 691 rtnl_net_notifyid(net, RTM_NEWNSID, err); 692 err = 0; 693 } else if (err == -ENOSPC && nsid >= 0) { 694 err = -EEXIST; 695 NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]); 696 NL_SET_ERR_MSG(extack, "The specified nsid is already used"); 697 } 698 out: 699 put_net(peer); 700 return err; 701 } 702 703 static int rtnl_net_get_size(void) 704 { 705 return NLMSG_ALIGN(sizeof(struct rtgenmsg)) 706 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ 707 ; 708 } 709 710 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, 711 int cmd, struct net *net, int nsid) 712 { 713 struct nlmsghdr *nlh; 714 struct rtgenmsg *rth; 715 716 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); 717 if (!nlh) 718 return -EMSGSIZE; 719 720 rth = nlmsg_data(nlh); 721 rth->rtgen_family = AF_UNSPEC; 722 723 if (nla_put_s32(skb, NETNSA_NSID, nsid)) 724 goto nla_put_failure; 725 726 nlmsg_end(skb, nlh); 727 return 0; 728 729 nla_put_failure: 730 nlmsg_cancel(skb, nlh); 731 return -EMSGSIZE; 732 } 733 734 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, 735 struct netlink_ext_ack *extack) 736 { 737 struct net *net = sock_net(skb->sk); 738 struct nlattr *tb[NETNSA_MAX + 1]; 739 struct nlattr *nla; 740 struct sk_buff *msg; 741 struct net *peer; 742 int err, id; 743 744 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 745 rtnl_net_policy, extack); 746 if (err < 0) 747 return err; 748 if (tb[NETNSA_PID]) { 749 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 750 nla = tb[NETNSA_PID]; 751 } else if (tb[NETNSA_FD]) { 752 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 753 nla = tb[NETNSA_FD]; 754 } else { 755 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 756 return -EINVAL; 757 } 758 759 if (IS_ERR(peer)) { 760 NL_SET_BAD_ATTR(extack, nla); 761 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 762 return PTR_ERR(peer); 763 } 764 765 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 766 if (!msg) { 767 err = -ENOMEM; 768 goto out; 769 } 770 771 id = peernet2id(net, peer); 772 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 773 RTM_NEWNSID, net, id); 774 if (err < 0) 775 goto err_out; 776 777 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); 778 goto out; 779 780 err_out: 781 nlmsg_free(msg); 782 out: 783 put_net(peer); 784 return err; 785 } 786 787 struct rtnl_net_dump_cb { 788 struct net *net; 789 struct sk_buff *skb; 790 struct netlink_callback *cb; 791 int idx; 792 int s_idx; 793 }; 794 795 static int rtnl_net_dumpid_one(int id, void *peer, void *data) 796 { 797 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; 798 int ret; 799 800 if (net_cb->idx < net_cb->s_idx) 801 goto cont; 802 803 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, 804 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, 805 RTM_NEWNSID, net_cb->net, id); 806 if (ret < 0) 807 return ret; 808 809 cont: 810 net_cb->idx++; 811 return 0; 812 } 813 814 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) 815 { 816 struct net *net = sock_net(skb->sk); 817 struct rtnl_net_dump_cb net_cb = { 818 .net = net, 819 .skb = skb, 820 .cb = cb, 821 .idx = 0, 822 .s_idx = cb->args[0], 823 }; 824 825 spin_lock_bh(&net->nsid_lock); 826 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 827 spin_unlock_bh(&net->nsid_lock); 828 829 cb->args[0] = net_cb.idx; 830 return skb->len; 831 } 832 833 static void rtnl_net_notifyid(struct net *net, int cmd, int id) 834 { 835 struct sk_buff *msg; 836 int err = -ENOMEM; 837 838 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 839 if (!msg) 840 goto out; 841 842 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); 843 if (err < 0) 844 goto err_out; 845 846 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); 847 return; 848 849 err_out: 850 nlmsg_free(msg); 851 out: 852 rtnl_set_sk_err(net, RTNLGRP_NSID, err); 853 } 854 855 static int __init net_ns_init(void) 856 { 857 struct net_generic *ng; 858 859 #ifdef CONFIG_NET_NS 860 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 861 SMP_CACHE_BYTES, 862 SLAB_PANIC, NULL); 863 864 /* Create workqueue for cleanup */ 865 netns_wq = create_singlethread_workqueue("netns"); 866 if (!netns_wq) 867 panic("Could not create netns workq"); 868 #endif 869 870 ng = net_alloc_generic(); 871 if (!ng) 872 panic("Could not allocate generic netns"); 873 874 rcu_assign_pointer(init_net.gen, ng); 875 876 mutex_lock(&net_mutex); 877 if (setup_net(&init_net, &init_user_ns)) 878 panic("Could not setup the initial network namespace"); 879 880 init_net_initialized = true; 881 mutex_unlock(&net_mutex); 882 883 register_pernet_subsys(&net_ns_ops); 884 885 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, 886 RTNL_FLAG_DOIT_UNLOCKED); 887 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, 888 RTNL_FLAG_DOIT_UNLOCKED); 889 890 return 0; 891 } 892 893 pure_initcall(net_ns_init); 894 895 #ifdef CONFIG_NET_NS 896 static int __register_pernet_operations(struct list_head *list, 897 struct pernet_operations *ops) 898 { 899 struct net *net; 900 int error; 901 LIST_HEAD(net_exit_list); 902 903 list_add_tail(&ops->list, list); 904 if (ops->init || (ops->id && ops->size)) { 905 for_each_net(net) { 906 error = ops_init(ops, net); 907 if (error) 908 goto out_undo; 909 list_add_tail(&net->exit_list, &net_exit_list); 910 } 911 } 912 return 0; 913 914 out_undo: 915 /* If I have an error cleanup all namespaces I initialized */ 916 list_del(&ops->list); 917 ops_exit_list(ops, &net_exit_list); 918 ops_free_list(ops, &net_exit_list); 919 return error; 920 } 921 922 static void __unregister_pernet_operations(struct pernet_operations *ops) 923 { 924 struct net *net; 925 LIST_HEAD(net_exit_list); 926 927 list_del(&ops->list); 928 for_each_net(net) 929 list_add_tail(&net->exit_list, &net_exit_list); 930 ops_exit_list(ops, &net_exit_list); 931 ops_free_list(ops, &net_exit_list); 932 } 933 934 #else 935 936 static int __register_pernet_operations(struct list_head *list, 937 struct pernet_operations *ops) 938 { 939 if (!init_net_initialized) { 940 list_add_tail(&ops->list, list); 941 return 0; 942 } 943 944 return ops_init(ops, &init_net); 945 } 946 947 static void __unregister_pernet_operations(struct pernet_operations *ops) 948 { 949 if (!init_net_initialized) { 950 list_del(&ops->list); 951 } else { 952 LIST_HEAD(net_exit_list); 953 list_add(&init_net.exit_list, &net_exit_list); 954 ops_exit_list(ops, &net_exit_list); 955 ops_free_list(ops, &net_exit_list); 956 } 957 } 958 959 #endif /* CONFIG_NET_NS */ 960 961 static DEFINE_IDA(net_generic_ids); 962 963 static int register_pernet_operations(struct list_head *list, 964 struct pernet_operations *ops) 965 { 966 int error; 967 968 if (ops->id) { 969 again: 970 error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id); 971 if (error < 0) { 972 if (error == -EAGAIN) { 973 ida_pre_get(&net_generic_ids, GFP_KERNEL); 974 goto again; 975 } 976 return error; 977 } 978 max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); 979 } 980 error = __register_pernet_operations(list, ops); 981 if (error) { 982 rcu_barrier(); 983 if (ops->id) 984 ida_remove(&net_generic_ids, *ops->id); 985 } 986 987 return error; 988 } 989 990 static void unregister_pernet_operations(struct pernet_operations *ops) 991 { 992 993 __unregister_pernet_operations(ops); 994 rcu_barrier(); 995 if (ops->id) 996 ida_remove(&net_generic_ids, *ops->id); 997 } 998 999 /** 1000 * register_pernet_subsys - register a network namespace subsystem 1001 * @ops: pernet operations structure for the subsystem 1002 * 1003 * Register a subsystem which has init and exit functions 1004 * that are called when network namespaces are created and 1005 * destroyed respectively. 1006 * 1007 * When registered all network namespace init functions are 1008 * called for every existing network namespace. Allowing kernel 1009 * modules to have a race free view of the set of network namespaces. 1010 * 1011 * When a new network namespace is created all of the init 1012 * methods are called in the order in which they were registered. 1013 * 1014 * When a network namespace is destroyed all of the exit methods 1015 * are called in the reverse of the order with which they were 1016 * registered. 1017 */ 1018 int register_pernet_subsys(struct pernet_operations *ops) 1019 { 1020 int error; 1021 mutex_lock(&net_mutex); 1022 error = register_pernet_operations(first_device, ops); 1023 mutex_unlock(&net_mutex); 1024 return error; 1025 } 1026 EXPORT_SYMBOL_GPL(register_pernet_subsys); 1027 1028 /** 1029 * unregister_pernet_subsys - unregister a network namespace subsystem 1030 * @ops: pernet operations structure to manipulate 1031 * 1032 * Remove the pernet operations structure from the list to be 1033 * used when network namespaces are created or destroyed. In 1034 * addition run the exit method for all existing network 1035 * namespaces. 1036 */ 1037 void unregister_pernet_subsys(struct pernet_operations *ops) 1038 { 1039 mutex_lock(&net_mutex); 1040 unregister_pernet_operations(ops); 1041 mutex_unlock(&net_mutex); 1042 } 1043 EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 1044 1045 /** 1046 * register_pernet_device - register a network namespace device 1047 * @ops: pernet operations structure for the subsystem 1048 * 1049 * Register a device which has init and exit functions 1050 * that are called when network namespaces are created and 1051 * destroyed respectively. 1052 * 1053 * When registered all network namespace init functions are 1054 * called for every existing network namespace. Allowing kernel 1055 * modules to have a race free view of the set of network namespaces. 1056 * 1057 * When a new network namespace is created all of the init 1058 * methods are called in the order in which they were registered. 1059 * 1060 * When a network namespace is destroyed all of the exit methods 1061 * are called in the reverse of the order with which they were 1062 * registered. 1063 */ 1064 int register_pernet_device(struct pernet_operations *ops) 1065 { 1066 int error; 1067 mutex_lock(&net_mutex); 1068 error = register_pernet_operations(&pernet_list, ops); 1069 if (!error && (first_device == &pernet_list)) 1070 first_device = &ops->list; 1071 mutex_unlock(&net_mutex); 1072 return error; 1073 } 1074 EXPORT_SYMBOL_GPL(register_pernet_device); 1075 1076 /** 1077 * unregister_pernet_device - unregister a network namespace netdevice 1078 * @ops: pernet operations structure to manipulate 1079 * 1080 * Remove the pernet operations structure from the list to be 1081 * used when network namespaces are created or destroyed. In 1082 * addition run the exit method for all existing network 1083 * namespaces. 1084 */ 1085 void unregister_pernet_device(struct pernet_operations *ops) 1086 { 1087 mutex_lock(&net_mutex); 1088 if (&ops->list == first_device) 1089 first_device = first_device->next; 1090 unregister_pernet_operations(ops); 1091 mutex_unlock(&net_mutex); 1092 } 1093 EXPORT_SYMBOL_GPL(unregister_pernet_device); 1094 1095 #ifdef CONFIG_NET_NS 1096 static struct ns_common *netns_get(struct task_struct *task) 1097 { 1098 struct net *net = NULL; 1099 struct nsproxy *nsproxy; 1100 1101 task_lock(task); 1102 nsproxy = task->nsproxy; 1103 if (nsproxy) 1104 net = get_net(nsproxy->net_ns); 1105 task_unlock(task); 1106 1107 return net ? &net->ns : NULL; 1108 } 1109 1110 static inline struct net *to_net_ns(struct ns_common *ns) 1111 { 1112 return container_of(ns, struct net, ns); 1113 } 1114 1115 static void netns_put(struct ns_common *ns) 1116 { 1117 put_net(to_net_ns(ns)); 1118 } 1119 1120 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) 1121 { 1122 struct net *net = to_net_ns(ns); 1123 1124 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 1125 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 1126 return -EPERM; 1127 1128 put_net(nsproxy->net_ns); 1129 nsproxy->net_ns = get_net(net); 1130 return 0; 1131 } 1132 1133 static struct user_namespace *netns_owner(struct ns_common *ns) 1134 { 1135 return to_net_ns(ns)->user_ns; 1136 } 1137 1138 const struct proc_ns_operations netns_operations = { 1139 .name = "net", 1140 .type = CLONE_NEWNET, 1141 .get = netns_get, 1142 .put = netns_put, 1143 .install = netns_install, 1144 .owner = netns_owner, 1145 }; 1146 #endif 1147