1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/workqueue.h> 4 #include <linux/rtnetlink.h> 5 #include <linux/cache.h> 6 #include <linux/slab.h> 7 #include <linux/list.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/idr.h> 11 #include <linux/rculist.h> 12 #include <linux/nsproxy.h> 13 #include <linux/fs.h> 14 #include <linux/proc_ns.h> 15 #include <linux/file.h> 16 #include <linux/export.h> 17 #include <linux/user_namespace.h> 18 #include <linux/net_namespace.h> 19 #include <linux/sched/task.h> 20 21 #include <net/sock.h> 22 #include <net/netlink.h> 23 #include <net/net_namespace.h> 24 #include <net/netns/generic.h> 25 26 /* 27 * Our network namespace constructor/destructor lists 28 */ 29 30 static LIST_HEAD(pernet_list); 31 static struct list_head *first_device = &pernet_list; 32 DEFINE_MUTEX(net_mutex); 33 34 LIST_HEAD(net_namespace_list); 35 EXPORT_SYMBOL_GPL(net_namespace_list); 36 37 struct net init_net = { 38 .count = REFCOUNT_INIT(1), 39 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), 40 }; 41 EXPORT_SYMBOL(init_net); 42 43 static bool init_net_initialized; 44 45 #define MIN_PERNET_OPS_ID \ 46 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *)) 47 48 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 49 50 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; 51 52 static struct net_generic *net_alloc_generic(void) 53 { 54 struct net_generic *ng; 55 unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); 56 57 ng = kzalloc(generic_size, GFP_KERNEL); 58 if (ng) 59 ng->s.len = max_gen_ptrs; 60 61 return ng; 62 } 63 64 static int net_assign_generic(struct net *net, unsigned int id, void *data) 65 { 66 struct net_generic *ng, *old_ng; 67 68 BUG_ON(!mutex_is_locked(&net_mutex)); 69 BUG_ON(id < MIN_PERNET_OPS_ID); 70 71 old_ng = rcu_dereference_protected(net->gen, 72 lockdep_is_held(&net_mutex)); 73 if (old_ng->s.len > id) { 74 old_ng->ptr[id] = data; 75 return 0; 76 } 77 78 ng = net_alloc_generic(); 79 if (ng == NULL) 80 return -ENOMEM; 81 82 /* 83 * Some synchronisation notes: 84 * 85 * The net_generic explores the net->gen array inside rcu 86 * read section. Besides once set the net->gen->ptr[x] 87 * pointer never changes (see rules in netns/generic.h). 88 * 89 * That said, we simply duplicate this array and schedule 90 * the old copy for kfree after a grace period. 91 */ 92 93 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID], 94 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *)); 95 ng->ptr[id] = data; 96 97 rcu_assign_pointer(net->gen, ng); 98 kfree_rcu(old_ng, s.rcu); 99 return 0; 100 } 101 102 static int ops_init(const struct pernet_operations *ops, struct net *net) 103 { 104 int err = -ENOMEM; 105 void *data = NULL; 106 107 if (ops->id && ops->size) { 108 data = kzalloc(ops->size, GFP_KERNEL); 109 if (!data) 110 goto out; 111 112 err = net_assign_generic(net, *ops->id, data); 113 if (err) 114 goto cleanup; 115 } 116 err = 0; 117 if (ops->init) 118 err = ops->init(net); 119 if (!err) 120 return 0; 121 122 cleanup: 123 kfree(data); 124 125 out: 126 return err; 127 } 128 129 static void ops_free(const struct pernet_operations *ops, struct net *net) 130 { 131 if (ops->id && ops->size) { 132 kfree(net_generic(net, *ops->id)); 133 } 134 } 135 136 static void ops_exit_list(const struct pernet_operations *ops, 137 struct list_head *net_exit_list) 138 { 139 struct net *net; 140 if (ops->exit) { 141 list_for_each_entry(net, net_exit_list, exit_list) 142 ops->exit(net); 143 } 144 if (ops->exit_batch) 145 ops->exit_batch(net_exit_list); 146 } 147 148 static void ops_free_list(const struct pernet_operations *ops, 149 struct list_head *net_exit_list) 150 { 151 struct net *net; 152 if (ops->size && ops->id) { 153 list_for_each_entry(net, net_exit_list, exit_list) 154 ops_free(ops, net); 155 } 156 } 157 158 /* should be called with nsid_lock held */ 159 static int alloc_netid(struct net *net, struct net *peer, int reqid) 160 { 161 int min = 0, max = 0; 162 163 if (reqid >= 0) { 164 min = reqid; 165 max = reqid + 1; 166 } 167 168 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); 169 } 170 171 /* This function is used by idr_for_each(). If net is equal to peer, the 172 * function returns the id so that idr_for_each() stops. Because we cannot 173 * returns the id 0 (idr_for_each() will not stop), we return the magic value 174 * NET_ID_ZERO (-1) for it. 175 */ 176 #define NET_ID_ZERO -1 177 static int net_eq_idr(int id, void *net, void *peer) 178 { 179 if (net_eq(net, peer)) 180 return id ? : NET_ID_ZERO; 181 return 0; 182 } 183 184 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc 185 * is set to true, thus the caller knows that the new id must be notified via 186 * rtnl. 187 */ 188 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) 189 { 190 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); 191 bool alloc_it = *alloc; 192 193 *alloc = false; 194 195 /* Magic value for id 0. */ 196 if (id == NET_ID_ZERO) 197 return 0; 198 if (id > 0) 199 return id; 200 201 if (alloc_it) { 202 id = alloc_netid(net, peer, -1); 203 *alloc = true; 204 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 205 } 206 207 return NETNSA_NSID_NOT_ASSIGNED; 208 } 209 210 /* should be called with nsid_lock held */ 211 static int __peernet2id(struct net *net, struct net *peer) 212 { 213 bool no = false; 214 215 return __peernet2id_alloc(net, peer, &no); 216 } 217 218 static void rtnl_net_notifyid(struct net *net, int cmd, int id); 219 /* This function returns the id of a peer netns. If no id is assigned, one will 220 * be allocated and returned. 221 */ 222 int peernet2id_alloc(struct net *net, struct net *peer) 223 { 224 bool alloc = false, alive = false; 225 int id; 226 227 if (refcount_read(&net->count) == 0) 228 return NETNSA_NSID_NOT_ASSIGNED; 229 spin_lock_bh(&net->nsid_lock); 230 /* 231 * When peer is obtained from RCU lists, we may race with 232 * its cleanup. Check whether it's alive, and this guarantees 233 * we never hash a peer back to net->netns_ids, after it has 234 * just been idr_remove()'d from there in cleanup_net(). 235 */ 236 if (maybe_get_net(peer)) 237 alive = alloc = true; 238 id = __peernet2id_alloc(net, peer, &alloc); 239 spin_unlock_bh(&net->nsid_lock); 240 if (alloc && id >= 0) 241 rtnl_net_notifyid(net, RTM_NEWNSID, id); 242 if (alive) 243 put_net(peer); 244 return id; 245 } 246 EXPORT_SYMBOL_GPL(peernet2id_alloc); 247 248 /* This function returns, if assigned, the id of a peer netns. */ 249 int peernet2id(struct net *net, struct net *peer) 250 { 251 int id; 252 253 spin_lock_bh(&net->nsid_lock); 254 id = __peernet2id(net, peer); 255 spin_unlock_bh(&net->nsid_lock); 256 return id; 257 } 258 EXPORT_SYMBOL(peernet2id); 259 260 /* This function returns true is the peer netns has an id assigned into the 261 * current netns. 262 */ 263 bool peernet_has_id(struct net *net, struct net *peer) 264 { 265 return peernet2id(net, peer) >= 0; 266 } 267 268 struct net *get_net_ns_by_id(struct net *net, int id) 269 { 270 struct net *peer; 271 272 if (id < 0) 273 return NULL; 274 275 rcu_read_lock(); 276 peer = idr_find(&net->netns_ids, id); 277 if (peer) 278 peer = maybe_get_net(peer); 279 rcu_read_unlock(); 280 281 return peer; 282 } 283 284 /* 285 * setup_net runs the initializers for the network namespace object. 286 */ 287 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) 288 { 289 /* Must be called with net_mutex held */ 290 const struct pernet_operations *ops, *saved_ops; 291 int error = 0; 292 LIST_HEAD(net_exit_list); 293 294 refcount_set(&net->count, 1); 295 refcount_set(&net->passive, 1); 296 net->dev_base_seq = 1; 297 net->user_ns = user_ns; 298 idr_init(&net->netns_ids); 299 spin_lock_init(&net->nsid_lock); 300 301 list_for_each_entry(ops, &pernet_list, list) { 302 error = ops_init(ops, net); 303 if (error < 0) 304 goto out_undo; 305 } 306 out: 307 return error; 308 309 out_undo: 310 /* Walk through the list backwards calling the exit functions 311 * for the pernet modules whose init functions did not fail. 312 */ 313 list_add(&net->exit_list, &net_exit_list); 314 saved_ops = ops; 315 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 316 ops_exit_list(ops, &net_exit_list); 317 318 ops = saved_ops; 319 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 320 ops_free_list(ops, &net_exit_list); 321 322 rcu_barrier(); 323 goto out; 324 } 325 326 static int __net_init net_defaults_init_net(struct net *net) 327 { 328 net->core.sysctl_somaxconn = SOMAXCONN; 329 return 0; 330 } 331 332 static struct pernet_operations net_defaults_ops = { 333 .init = net_defaults_init_net, 334 }; 335 336 static __init int net_defaults_init(void) 337 { 338 if (register_pernet_subsys(&net_defaults_ops)) 339 panic("Cannot initialize net default settings"); 340 341 return 0; 342 } 343 344 core_initcall(net_defaults_init); 345 346 #ifdef CONFIG_NET_NS 347 static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 348 { 349 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); 350 } 351 352 static void dec_net_namespaces(struct ucounts *ucounts) 353 { 354 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); 355 } 356 357 static struct kmem_cache *net_cachep; 358 static struct workqueue_struct *netns_wq; 359 360 static struct net *net_alloc(void) 361 { 362 struct net *net = NULL; 363 struct net_generic *ng; 364 365 ng = net_alloc_generic(); 366 if (!ng) 367 goto out; 368 369 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); 370 if (!net) 371 goto out_free; 372 373 rcu_assign_pointer(net->gen, ng); 374 out: 375 return net; 376 377 out_free: 378 kfree(ng); 379 goto out; 380 } 381 382 static void net_free(struct net *net) 383 { 384 kfree(rcu_access_pointer(net->gen)); 385 kmem_cache_free(net_cachep, net); 386 } 387 388 void net_drop_ns(void *p) 389 { 390 struct net *ns = p; 391 if (ns && refcount_dec_and_test(&ns->passive)) 392 net_free(ns); 393 } 394 395 struct net *copy_net_ns(unsigned long flags, 396 struct user_namespace *user_ns, struct net *old_net) 397 { 398 struct ucounts *ucounts; 399 struct net *net; 400 int rv; 401 402 if (!(flags & CLONE_NEWNET)) 403 return get_net(old_net); 404 405 ucounts = inc_net_namespaces(user_ns); 406 if (!ucounts) 407 return ERR_PTR(-ENOSPC); 408 409 net = net_alloc(); 410 if (!net) { 411 dec_net_namespaces(ucounts); 412 return ERR_PTR(-ENOMEM); 413 } 414 415 get_user_ns(user_ns); 416 417 rv = mutex_lock_killable(&net_mutex); 418 if (rv < 0) { 419 net_free(net); 420 dec_net_namespaces(ucounts); 421 put_user_ns(user_ns); 422 return ERR_PTR(rv); 423 } 424 425 net->ucounts = ucounts; 426 rv = setup_net(net, user_ns); 427 if (rv == 0) { 428 rtnl_lock(); 429 list_add_tail_rcu(&net->list, &net_namespace_list); 430 rtnl_unlock(); 431 } 432 mutex_unlock(&net_mutex); 433 if (rv < 0) { 434 dec_net_namespaces(ucounts); 435 put_user_ns(user_ns); 436 net_drop_ns(net); 437 return ERR_PTR(rv); 438 } 439 return net; 440 } 441 442 static void unhash_nsid(struct net *net, struct net *last) 443 { 444 struct net *tmp; 445 /* This function is only called from cleanup_net() work, 446 * and this work is the only process, that may delete 447 * a net from net_namespace_list. So, when the below 448 * is executing, the list may only grow. Thus, we do not 449 * use for_each_net_rcu() or rtnl_lock(). 450 */ 451 for_each_net(tmp) { 452 int id; 453 454 spin_lock_bh(&tmp->nsid_lock); 455 id = __peernet2id(tmp, net); 456 if (id >= 0) 457 idr_remove(&tmp->netns_ids, id); 458 spin_unlock_bh(&tmp->nsid_lock); 459 if (id >= 0) 460 rtnl_net_notifyid(tmp, RTM_DELNSID, id); 461 if (tmp == last) 462 break; 463 } 464 spin_lock_bh(&net->nsid_lock); 465 idr_destroy(&net->netns_ids); 466 spin_unlock_bh(&net->nsid_lock); 467 } 468 469 static DEFINE_SPINLOCK(cleanup_list_lock); 470 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 471 472 static void cleanup_net(struct work_struct *work) 473 { 474 const struct pernet_operations *ops; 475 struct net *net, *tmp, *last; 476 struct list_head net_kill_list; 477 LIST_HEAD(net_exit_list); 478 479 /* Atomically snapshot the list of namespaces to cleanup */ 480 spin_lock_irq(&cleanup_list_lock); 481 list_replace_init(&cleanup_list, &net_kill_list); 482 spin_unlock_irq(&cleanup_list_lock); 483 484 mutex_lock(&net_mutex); 485 486 /* Don't let anyone else find us. */ 487 rtnl_lock(); 488 list_for_each_entry(net, &net_kill_list, cleanup_list) 489 list_del_rcu(&net->list); 490 /* Cache last net. After we unlock rtnl, no one new net 491 * added to net_namespace_list can assign nsid pointer 492 * to a net from net_kill_list (see peernet2id_alloc()). 493 * So, we skip them in unhash_nsid(). 494 * 495 * Note, that unhash_nsid() does not delete nsid links 496 * between net_kill_list's nets, as they've already 497 * deleted from net_namespace_list. But, this would be 498 * useless anyway, as netns_ids are destroyed there. 499 */ 500 last = list_last_entry(&net_namespace_list, struct net, list); 501 rtnl_unlock(); 502 503 list_for_each_entry(net, &net_kill_list, cleanup_list) { 504 unhash_nsid(net, last); 505 list_add_tail(&net->exit_list, &net_exit_list); 506 } 507 508 /* 509 * Another CPU might be rcu-iterating the list, wait for it. 510 * This needs to be before calling the exit() notifiers, so 511 * the rcu_barrier() below isn't sufficient alone. 512 */ 513 synchronize_rcu(); 514 515 /* Run all of the network namespace exit methods */ 516 list_for_each_entry_reverse(ops, &pernet_list, list) 517 ops_exit_list(ops, &net_exit_list); 518 519 /* Free the net generic variables */ 520 list_for_each_entry_reverse(ops, &pernet_list, list) 521 ops_free_list(ops, &net_exit_list); 522 523 mutex_unlock(&net_mutex); 524 525 /* Ensure there are no outstanding rcu callbacks using this 526 * network namespace. 527 */ 528 rcu_barrier(); 529 530 /* Finally it is safe to free my network namespace structure */ 531 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 532 list_del_init(&net->exit_list); 533 dec_net_namespaces(net->ucounts); 534 put_user_ns(net->user_ns); 535 net_drop_ns(net); 536 } 537 } 538 539 /** 540 * net_ns_barrier - wait until concurrent net_cleanup_work is done 541 * 542 * cleanup_net runs from work queue and will first remove namespaces 543 * from the global list, then run net exit functions. 544 * 545 * Call this in module exit path to make sure that all netns 546 * ->exit ops have been invoked before the function is removed. 547 */ 548 void net_ns_barrier(void) 549 { 550 mutex_lock(&net_mutex); 551 mutex_unlock(&net_mutex); 552 } 553 EXPORT_SYMBOL(net_ns_barrier); 554 555 static DECLARE_WORK(net_cleanup_work, cleanup_net); 556 557 void __put_net(struct net *net) 558 { 559 /* Cleanup the network namespace in process context */ 560 unsigned long flags; 561 562 spin_lock_irqsave(&cleanup_list_lock, flags); 563 list_add(&net->cleanup_list, &cleanup_list); 564 spin_unlock_irqrestore(&cleanup_list_lock, flags); 565 566 queue_work(netns_wq, &net_cleanup_work); 567 } 568 EXPORT_SYMBOL_GPL(__put_net); 569 570 struct net *get_net_ns_by_fd(int fd) 571 { 572 struct file *file; 573 struct ns_common *ns; 574 struct net *net; 575 576 file = proc_ns_fget(fd); 577 if (IS_ERR(file)) 578 return ERR_CAST(file); 579 580 ns = get_proc_ns(file_inode(file)); 581 if (ns->ops == &netns_operations) 582 net = get_net(container_of(ns, struct net, ns)); 583 else 584 net = ERR_PTR(-EINVAL); 585 586 fput(file); 587 return net; 588 } 589 590 #else 591 struct net *get_net_ns_by_fd(int fd) 592 { 593 return ERR_PTR(-EINVAL); 594 } 595 #endif 596 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 597 598 struct net *get_net_ns_by_pid(pid_t pid) 599 { 600 struct task_struct *tsk; 601 struct net *net; 602 603 /* Lookup the network namespace */ 604 net = ERR_PTR(-ESRCH); 605 rcu_read_lock(); 606 tsk = find_task_by_vpid(pid); 607 if (tsk) { 608 struct nsproxy *nsproxy; 609 task_lock(tsk); 610 nsproxy = tsk->nsproxy; 611 if (nsproxy) 612 net = get_net(nsproxy->net_ns); 613 task_unlock(tsk); 614 } 615 rcu_read_unlock(); 616 return net; 617 } 618 EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 619 620 static __net_init int net_ns_net_init(struct net *net) 621 { 622 #ifdef CONFIG_NET_NS 623 net->ns.ops = &netns_operations; 624 #endif 625 return ns_alloc_inum(&net->ns); 626 } 627 628 static __net_exit void net_ns_net_exit(struct net *net) 629 { 630 ns_free_inum(&net->ns); 631 } 632 633 static struct pernet_operations __net_initdata net_ns_ops = { 634 .init = net_ns_net_init, 635 .exit = net_ns_net_exit, 636 }; 637 638 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { 639 [NETNSA_NONE] = { .type = NLA_UNSPEC }, 640 [NETNSA_NSID] = { .type = NLA_S32 }, 641 [NETNSA_PID] = { .type = NLA_U32 }, 642 [NETNSA_FD] = { .type = NLA_U32 }, 643 }; 644 645 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, 646 struct netlink_ext_ack *extack) 647 { 648 struct net *net = sock_net(skb->sk); 649 struct nlattr *tb[NETNSA_MAX + 1]; 650 struct nlattr *nla; 651 struct net *peer; 652 int nsid, err; 653 654 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 655 rtnl_net_policy, extack); 656 if (err < 0) 657 return err; 658 if (!tb[NETNSA_NSID]) { 659 NL_SET_ERR_MSG(extack, "nsid is missing"); 660 return -EINVAL; 661 } 662 nsid = nla_get_s32(tb[NETNSA_NSID]); 663 664 if (tb[NETNSA_PID]) { 665 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 666 nla = tb[NETNSA_PID]; 667 } else if (tb[NETNSA_FD]) { 668 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 669 nla = tb[NETNSA_FD]; 670 } else { 671 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 672 return -EINVAL; 673 } 674 if (IS_ERR(peer)) { 675 NL_SET_BAD_ATTR(extack, nla); 676 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 677 return PTR_ERR(peer); 678 } 679 680 spin_lock_bh(&net->nsid_lock); 681 if (__peernet2id(net, peer) >= 0) { 682 spin_unlock_bh(&net->nsid_lock); 683 err = -EEXIST; 684 NL_SET_BAD_ATTR(extack, nla); 685 NL_SET_ERR_MSG(extack, 686 "Peer netns already has a nsid assigned"); 687 goto out; 688 } 689 690 err = alloc_netid(net, peer, nsid); 691 spin_unlock_bh(&net->nsid_lock); 692 if (err >= 0) { 693 rtnl_net_notifyid(net, RTM_NEWNSID, err); 694 err = 0; 695 } else if (err == -ENOSPC && nsid >= 0) { 696 err = -EEXIST; 697 NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]); 698 NL_SET_ERR_MSG(extack, "The specified nsid is already used"); 699 } 700 out: 701 put_net(peer); 702 return err; 703 } 704 705 static int rtnl_net_get_size(void) 706 { 707 return NLMSG_ALIGN(sizeof(struct rtgenmsg)) 708 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ 709 ; 710 } 711 712 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, 713 int cmd, struct net *net, int nsid) 714 { 715 struct nlmsghdr *nlh; 716 struct rtgenmsg *rth; 717 718 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); 719 if (!nlh) 720 return -EMSGSIZE; 721 722 rth = nlmsg_data(nlh); 723 rth->rtgen_family = AF_UNSPEC; 724 725 if (nla_put_s32(skb, NETNSA_NSID, nsid)) 726 goto nla_put_failure; 727 728 nlmsg_end(skb, nlh); 729 return 0; 730 731 nla_put_failure: 732 nlmsg_cancel(skb, nlh); 733 return -EMSGSIZE; 734 } 735 736 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, 737 struct netlink_ext_ack *extack) 738 { 739 struct net *net = sock_net(skb->sk); 740 struct nlattr *tb[NETNSA_MAX + 1]; 741 struct nlattr *nla; 742 struct sk_buff *msg; 743 struct net *peer; 744 int err, id; 745 746 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 747 rtnl_net_policy, extack); 748 if (err < 0) 749 return err; 750 if (tb[NETNSA_PID]) { 751 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 752 nla = tb[NETNSA_PID]; 753 } else if (tb[NETNSA_FD]) { 754 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 755 nla = tb[NETNSA_FD]; 756 } else { 757 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 758 return -EINVAL; 759 } 760 761 if (IS_ERR(peer)) { 762 NL_SET_BAD_ATTR(extack, nla); 763 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 764 return PTR_ERR(peer); 765 } 766 767 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 768 if (!msg) { 769 err = -ENOMEM; 770 goto out; 771 } 772 773 id = peernet2id(net, peer); 774 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 775 RTM_NEWNSID, net, id); 776 if (err < 0) 777 goto err_out; 778 779 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); 780 goto out; 781 782 err_out: 783 nlmsg_free(msg); 784 out: 785 put_net(peer); 786 return err; 787 } 788 789 struct rtnl_net_dump_cb { 790 struct net *net; 791 struct sk_buff *skb; 792 struct netlink_callback *cb; 793 int idx; 794 int s_idx; 795 }; 796 797 static int rtnl_net_dumpid_one(int id, void *peer, void *data) 798 { 799 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; 800 int ret; 801 802 if (net_cb->idx < net_cb->s_idx) 803 goto cont; 804 805 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, 806 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, 807 RTM_NEWNSID, net_cb->net, id); 808 if (ret < 0) 809 return ret; 810 811 cont: 812 net_cb->idx++; 813 return 0; 814 } 815 816 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) 817 { 818 struct net *net = sock_net(skb->sk); 819 struct rtnl_net_dump_cb net_cb = { 820 .net = net, 821 .skb = skb, 822 .cb = cb, 823 .idx = 0, 824 .s_idx = cb->args[0], 825 }; 826 827 spin_lock_bh(&net->nsid_lock); 828 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 829 spin_unlock_bh(&net->nsid_lock); 830 831 cb->args[0] = net_cb.idx; 832 return skb->len; 833 } 834 835 static void rtnl_net_notifyid(struct net *net, int cmd, int id) 836 { 837 struct sk_buff *msg; 838 int err = -ENOMEM; 839 840 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 841 if (!msg) 842 goto out; 843 844 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); 845 if (err < 0) 846 goto err_out; 847 848 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); 849 return; 850 851 err_out: 852 nlmsg_free(msg); 853 out: 854 rtnl_set_sk_err(net, RTNLGRP_NSID, err); 855 } 856 857 static int __init net_ns_init(void) 858 { 859 struct net_generic *ng; 860 861 #ifdef CONFIG_NET_NS 862 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 863 SMP_CACHE_BYTES, 864 SLAB_PANIC, NULL); 865 866 /* Create workqueue for cleanup */ 867 netns_wq = create_singlethread_workqueue("netns"); 868 if (!netns_wq) 869 panic("Could not create netns workq"); 870 #endif 871 872 ng = net_alloc_generic(); 873 if (!ng) 874 panic("Could not allocate generic netns"); 875 876 rcu_assign_pointer(init_net.gen, ng); 877 878 mutex_lock(&net_mutex); 879 if (setup_net(&init_net, &init_user_ns)) 880 panic("Could not setup the initial network namespace"); 881 882 init_net_initialized = true; 883 884 rtnl_lock(); 885 list_add_tail_rcu(&init_net.list, &net_namespace_list); 886 rtnl_unlock(); 887 888 mutex_unlock(&net_mutex); 889 890 register_pernet_subsys(&net_ns_ops); 891 892 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, 893 RTNL_FLAG_DOIT_UNLOCKED); 894 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, 895 RTNL_FLAG_DOIT_UNLOCKED); 896 897 return 0; 898 } 899 900 pure_initcall(net_ns_init); 901 902 #ifdef CONFIG_NET_NS 903 static int __register_pernet_operations(struct list_head *list, 904 struct pernet_operations *ops) 905 { 906 struct net *net; 907 int error; 908 LIST_HEAD(net_exit_list); 909 910 list_add_tail(&ops->list, list); 911 if (ops->init || (ops->id && ops->size)) { 912 for_each_net(net) { 913 error = ops_init(ops, net); 914 if (error) 915 goto out_undo; 916 list_add_tail(&net->exit_list, &net_exit_list); 917 } 918 } 919 return 0; 920 921 out_undo: 922 /* If I have an error cleanup all namespaces I initialized */ 923 list_del(&ops->list); 924 ops_exit_list(ops, &net_exit_list); 925 ops_free_list(ops, &net_exit_list); 926 return error; 927 } 928 929 static void __unregister_pernet_operations(struct pernet_operations *ops) 930 { 931 struct net *net; 932 LIST_HEAD(net_exit_list); 933 934 list_del(&ops->list); 935 for_each_net(net) 936 list_add_tail(&net->exit_list, &net_exit_list); 937 ops_exit_list(ops, &net_exit_list); 938 ops_free_list(ops, &net_exit_list); 939 } 940 941 #else 942 943 static int __register_pernet_operations(struct list_head *list, 944 struct pernet_operations *ops) 945 { 946 if (!init_net_initialized) { 947 list_add_tail(&ops->list, list); 948 return 0; 949 } 950 951 return ops_init(ops, &init_net); 952 } 953 954 static void __unregister_pernet_operations(struct pernet_operations *ops) 955 { 956 if (!init_net_initialized) { 957 list_del(&ops->list); 958 } else { 959 LIST_HEAD(net_exit_list); 960 list_add(&init_net.exit_list, &net_exit_list); 961 ops_exit_list(ops, &net_exit_list); 962 ops_free_list(ops, &net_exit_list); 963 } 964 } 965 966 #endif /* CONFIG_NET_NS */ 967 968 static DEFINE_IDA(net_generic_ids); 969 970 static int register_pernet_operations(struct list_head *list, 971 struct pernet_operations *ops) 972 { 973 int error; 974 975 if (ops->id) { 976 again: 977 error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id); 978 if (error < 0) { 979 if (error == -EAGAIN) { 980 ida_pre_get(&net_generic_ids, GFP_KERNEL); 981 goto again; 982 } 983 return error; 984 } 985 max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); 986 } 987 error = __register_pernet_operations(list, ops); 988 if (error) { 989 rcu_barrier(); 990 if (ops->id) 991 ida_remove(&net_generic_ids, *ops->id); 992 } 993 994 return error; 995 } 996 997 static void unregister_pernet_operations(struct pernet_operations *ops) 998 { 999 1000 __unregister_pernet_operations(ops); 1001 rcu_barrier(); 1002 if (ops->id) 1003 ida_remove(&net_generic_ids, *ops->id); 1004 } 1005 1006 /** 1007 * register_pernet_subsys - register a network namespace subsystem 1008 * @ops: pernet operations structure for the subsystem 1009 * 1010 * Register a subsystem which has init and exit functions 1011 * that are called when network namespaces are created and 1012 * destroyed respectively. 1013 * 1014 * When registered all network namespace init functions are 1015 * called for every existing network namespace. Allowing kernel 1016 * modules to have a race free view of the set of network namespaces. 1017 * 1018 * When a new network namespace is created all of the init 1019 * methods are called in the order in which they were registered. 1020 * 1021 * When a network namespace is destroyed all of the exit methods 1022 * are called in the reverse of the order with which they were 1023 * registered. 1024 */ 1025 int register_pernet_subsys(struct pernet_operations *ops) 1026 { 1027 int error; 1028 mutex_lock(&net_mutex); 1029 error = register_pernet_operations(first_device, ops); 1030 mutex_unlock(&net_mutex); 1031 return error; 1032 } 1033 EXPORT_SYMBOL_GPL(register_pernet_subsys); 1034 1035 /** 1036 * unregister_pernet_subsys - unregister a network namespace subsystem 1037 * @ops: pernet operations structure to manipulate 1038 * 1039 * Remove the pernet operations structure from the list to be 1040 * used when network namespaces are created or destroyed. In 1041 * addition run the exit method for all existing network 1042 * namespaces. 1043 */ 1044 void unregister_pernet_subsys(struct pernet_operations *ops) 1045 { 1046 mutex_lock(&net_mutex); 1047 unregister_pernet_operations(ops); 1048 mutex_unlock(&net_mutex); 1049 } 1050 EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 1051 1052 /** 1053 * register_pernet_device - register a network namespace device 1054 * @ops: pernet operations structure for the subsystem 1055 * 1056 * Register a device which has init and exit functions 1057 * that are called when network namespaces are created and 1058 * destroyed respectively. 1059 * 1060 * When registered all network namespace init functions are 1061 * called for every existing network namespace. Allowing kernel 1062 * modules to have a race free view of the set of network namespaces. 1063 * 1064 * When a new network namespace is created all of the init 1065 * methods are called in the order in which they were registered. 1066 * 1067 * When a network namespace is destroyed all of the exit methods 1068 * are called in the reverse of the order with which they were 1069 * registered. 1070 */ 1071 int register_pernet_device(struct pernet_operations *ops) 1072 { 1073 int error; 1074 mutex_lock(&net_mutex); 1075 error = register_pernet_operations(&pernet_list, ops); 1076 if (!error && (first_device == &pernet_list)) 1077 first_device = &ops->list; 1078 mutex_unlock(&net_mutex); 1079 return error; 1080 } 1081 EXPORT_SYMBOL_GPL(register_pernet_device); 1082 1083 /** 1084 * unregister_pernet_device - unregister a network namespace netdevice 1085 * @ops: pernet operations structure to manipulate 1086 * 1087 * Remove the pernet operations structure from the list to be 1088 * used when network namespaces are created or destroyed. In 1089 * addition run the exit method for all existing network 1090 * namespaces. 1091 */ 1092 void unregister_pernet_device(struct pernet_operations *ops) 1093 { 1094 mutex_lock(&net_mutex); 1095 if (&ops->list == first_device) 1096 first_device = first_device->next; 1097 unregister_pernet_operations(ops); 1098 mutex_unlock(&net_mutex); 1099 } 1100 EXPORT_SYMBOL_GPL(unregister_pernet_device); 1101 1102 #ifdef CONFIG_NET_NS 1103 static struct ns_common *netns_get(struct task_struct *task) 1104 { 1105 struct net *net = NULL; 1106 struct nsproxy *nsproxy; 1107 1108 task_lock(task); 1109 nsproxy = task->nsproxy; 1110 if (nsproxy) 1111 net = get_net(nsproxy->net_ns); 1112 task_unlock(task); 1113 1114 return net ? &net->ns : NULL; 1115 } 1116 1117 static inline struct net *to_net_ns(struct ns_common *ns) 1118 { 1119 return container_of(ns, struct net, ns); 1120 } 1121 1122 static void netns_put(struct ns_common *ns) 1123 { 1124 put_net(to_net_ns(ns)); 1125 } 1126 1127 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) 1128 { 1129 struct net *net = to_net_ns(ns); 1130 1131 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 1132 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 1133 return -EPERM; 1134 1135 put_net(nsproxy->net_ns); 1136 nsproxy->net_ns = get_net(net); 1137 return 0; 1138 } 1139 1140 static struct user_namespace *netns_owner(struct ns_common *ns) 1141 { 1142 return to_net_ns(ns)->user_ns; 1143 } 1144 1145 const struct proc_ns_operations netns_operations = { 1146 .name = "net", 1147 .type = CLONE_NEWNET, 1148 .get = netns_get, 1149 .put = netns_put, 1150 .install = netns_install, 1151 .owner = netns_owner, 1152 }; 1153 #endif 1154