1 // SPDX-License-Identifier: GPL-2.0-only 2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 #include <linux/workqueue.h> 5 #include <linux/rtnetlink.h> 6 #include <linux/cache.h> 7 #include <linux/slab.h> 8 #include <linux/list.h> 9 #include <linux/delay.h> 10 #include <linux/sched.h> 11 #include <linux/idr.h> 12 #include <linux/rculist.h> 13 #include <linux/nsproxy.h> 14 #include <linux/fs.h> 15 #include <linux/proc_ns.h> 16 #include <linux/file.h> 17 #include <linux/export.h> 18 #include <linux/user_namespace.h> 19 #include <linux/net_namespace.h> 20 #include <linux/sched/task.h> 21 #include <linux/uidgid.h> 22 23 #include <net/sock.h> 24 #include <net/netlink.h> 25 #include <net/net_namespace.h> 26 #include <net/netns/generic.h> 27 28 /* 29 * Our network namespace constructor/destructor lists 30 */ 31 32 static LIST_HEAD(pernet_list); 33 static struct list_head *first_device = &pernet_list; 34 35 LIST_HEAD(net_namespace_list); 36 EXPORT_SYMBOL_GPL(net_namespace_list); 37 38 /* Protects net_namespace_list. Nests iside rtnl_lock() */ 39 DECLARE_RWSEM(net_rwsem); 40 EXPORT_SYMBOL_GPL(net_rwsem); 41 42 #ifdef CONFIG_KEYS 43 static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) }; 44 #endif 45 46 struct net init_net = { 47 .count = REFCOUNT_INIT(1), 48 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), 49 #ifdef CONFIG_KEYS 50 .key_domain = &init_net_key_domain, 51 #endif 52 }; 53 EXPORT_SYMBOL(init_net); 54 55 static bool init_net_initialized; 56 /* 57 * pernet_ops_rwsem: protects: pernet_list, net_generic_ids, 58 * init_net_initialized and first_device pointer. 59 * This is internal net namespace object. Please, don't use it 60 * outside. 61 */ 62 DECLARE_RWSEM(pernet_ops_rwsem); 63 EXPORT_SYMBOL_GPL(pernet_ops_rwsem); 64 65 #define MIN_PERNET_OPS_ID \ 66 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *)) 67 68 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 69 70 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; 71 72 static struct net_generic *net_alloc_generic(void) 73 { 74 struct net_generic *ng; 75 unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); 76 77 ng = kzalloc(generic_size, GFP_KERNEL); 78 if (ng) 79 ng->s.len = max_gen_ptrs; 80 81 return ng; 82 } 83 84 static int net_assign_generic(struct net *net, unsigned int id, void *data) 85 { 86 struct net_generic *ng, *old_ng; 87 88 BUG_ON(id < MIN_PERNET_OPS_ID); 89 90 old_ng = rcu_dereference_protected(net->gen, 91 lockdep_is_held(&pernet_ops_rwsem)); 92 if (old_ng->s.len > id) { 93 old_ng->ptr[id] = data; 94 return 0; 95 } 96 97 ng = net_alloc_generic(); 98 if (ng == NULL) 99 return -ENOMEM; 100 101 /* 102 * Some synchronisation notes: 103 * 104 * The net_generic explores the net->gen array inside rcu 105 * read section. Besides once set the net->gen->ptr[x] 106 * pointer never changes (see rules in netns/generic.h). 107 * 108 * That said, we simply duplicate this array and schedule 109 * the old copy for kfree after a grace period. 110 */ 111 112 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID], 113 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *)); 114 ng->ptr[id] = data; 115 116 rcu_assign_pointer(net->gen, ng); 117 kfree_rcu(old_ng, s.rcu); 118 return 0; 119 } 120 121 static int ops_init(const struct pernet_operations *ops, struct net *net) 122 { 123 int err = -ENOMEM; 124 void *data = NULL; 125 126 if (ops->id && ops->size) { 127 data = kzalloc(ops->size, GFP_KERNEL); 128 if (!data) 129 goto out; 130 131 err = net_assign_generic(net, *ops->id, data); 132 if (err) 133 goto cleanup; 134 } 135 err = 0; 136 if (ops->init) 137 err = ops->init(net); 138 if (!err) 139 return 0; 140 141 cleanup: 142 kfree(data); 143 144 out: 145 return err; 146 } 147 148 static void ops_free(const struct pernet_operations *ops, struct net *net) 149 { 150 if (ops->id && ops->size) { 151 kfree(net_generic(net, *ops->id)); 152 } 153 } 154 155 static void ops_pre_exit_list(const struct pernet_operations *ops, 156 struct list_head *net_exit_list) 157 { 158 struct net *net; 159 160 if (ops->pre_exit) { 161 list_for_each_entry(net, net_exit_list, exit_list) 162 ops->pre_exit(net); 163 } 164 } 165 166 static void ops_exit_list(const struct pernet_operations *ops, 167 struct list_head *net_exit_list) 168 { 169 struct net *net; 170 if (ops->exit) { 171 list_for_each_entry(net, net_exit_list, exit_list) 172 ops->exit(net); 173 } 174 if (ops->exit_batch) 175 ops->exit_batch(net_exit_list); 176 } 177 178 static void ops_free_list(const struct pernet_operations *ops, 179 struct list_head *net_exit_list) 180 { 181 struct net *net; 182 if (ops->size && ops->id) { 183 list_for_each_entry(net, net_exit_list, exit_list) 184 ops_free(ops, net); 185 } 186 } 187 188 /* should be called with nsid_lock held */ 189 static int alloc_netid(struct net *net, struct net *peer, int reqid) 190 { 191 int min = 0, max = 0; 192 193 if (reqid >= 0) { 194 min = reqid; 195 max = reqid + 1; 196 } 197 198 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); 199 } 200 201 /* This function is used by idr_for_each(). If net is equal to peer, the 202 * function returns the id so that idr_for_each() stops. Because we cannot 203 * returns the id 0 (idr_for_each() will not stop), we return the magic value 204 * NET_ID_ZERO (-1) for it. 205 */ 206 #define NET_ID_ZERO -1 207 static int net_eq_idr(int id, void *net, void *peer) 208 { 209 if (net_eq(net, peer)) 210 return id ? : NET_ID_ZERO; 211 return 0; 212 } 213 214 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc 215 * is set to true, thus the caller knows that the new id must be notified via 216 * rtnl. 217 */ 218 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) 219 { 220 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); 221 bool alloc_it = *alloc; 222 223 *alloc = false; 224 225 /* Magic value for id 0. */ 226 if (id == NET_ID_ZERO) 227 return 0; 228 if (id > 0) 229 return id; 230 231 if (alloc_it) { 232 id = alloc_netid(net, peer, -1); 233 *alloc = true; 234 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 235 } 236 237 return NETNSA_NSID_NOT_ASSIGNED; 238 } 239 240 /* should be called with nsid_lock held */ 241 static int __peernet2id(struct net *net, struct net *peer) 242 { 243 bool no = false; 244 245 return __peernet2id_alloc(net, peer, &no); 246 } 247 248 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid, 249 struct nlmsghdr *nlh); 250 /* This function returns the id of a peer netns. If no id is assigned, one will 251 * be allocated and returned. 252 */ 253 int peernet2id_alloc(struct net *net, struct net *peer) 254 { 255 bool alloc = false, alive = false; 256 int id; 257 258 if (refcount_read(&net->count) == 0) 259 return NETNSA_NSID_NOT_ASSIGNED; 260 spin_lock_bh(&net->nsid_lock); 261 /* 262 * When peer is obtained from RCU lists, we may race with 263 * its cleanup. Check whether it's alive, and this guarantees 264 * we never hash a peer back to net->netns_ids, after it has 265 * just been idr_remove()'d from there in cleanup_net(). 266 */ 267 if (maybe_get_net(peer)) 268 alive = alloc = true; 269 id = __peernet2id_alloc(net, peer, &alloc); 270 spin_unlock_bh(&net->nsid_lock); 271 if (alloc && id >= 0) 272 rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL); 273 if (alive) 274 put_net(peer); 275 return id; 276 } 277 EXPORT_SYMBOL_GPL(peernet2id_alloc); 278 279 /* This function returns, if assigned, the id of a peer netns. */ 280 int peernet2id(struct net *net, struct net *peer) 281 { 282 int id; 283 284 spin_lock_bh(&net->nsid_lock); 285 id = __peernet2id(net, peer); 286 spin_unlock_bh(&net->nsid_lock); 287 return id; 288 } 289 EXPORT_SYMBOL(peernet2id); 290 291 /* This function returns true is the peer netns has an id assigned into the 292 * current netns. 293 */ 294 bool peernet_has_id(struct net *net, struct net *peer) 295 { 296 return peernet2id(net, peer) >= 0; 297 } 298 299 struct net *get_net_ns_by_id(struct net *net, int id) 300 { 301 struct net *peer; 302 303 if (id < 0) 304 return NULL; 305 306 rcu_read_lock(); 307 peer = idr_find(&net->netns_ids, id); 308 if (peer) 309 peer = maybe_get_net(peer); 310 rcu_read_unlock(); 311 312 return peer; 313 } 314 315 /* 316 * setup_net runs the initializers for the network namespace object. 317 */ 318 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) 319 { 320 /* Must be called with pernet_ops_rwsem held */ 321 const struct pernet_operations *ops, *saved_ops; 322 int error = 0; 323 LIST_HEAD(net_exit_list); 324 325 refcount_set(&net->count, 1); 326 refcount_set(&net->passive, 1); 327 get_random_bytes(&net->hash_mix, sizeof(u32)); 328 net->dev_base_seq = 1; 329 net->user_ns = user_ns; 330 idr_init(&net->netns_ids); 331 spin_lock_init(&net->nsid_lock); 332 mutex_init(&net->ipv4.ra_mutex); 333 334 list_for_each_entry(ops, &pernet_list, list) { 335 error = ops_init(ops, net); 336 if (error < 0) 337 goto out_undo; 338 } 339 down_write(&net_rwsem); 340 list_add_tail_rcu(&net->list, &net_namespace_list); 341 up_write(&net_rwsem); 342 out: 343 return error; 344 345 out_undo: 346 /* Walk through the list backwards calling the exit functions 347 * for the pernet modules whose init functions did not fail. 348 */ 349 list_add(&net->exit_list, &net_exit_list); 350 saved_ops = ops; 351 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 352 ops_pre_exit_list(ops, &net_exit_list); 353 354 synchronize_rcu(); 355 356 ops = saved_ops; 357 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 358 ops_exit_list(ops, &net_exit_list); 359 360 ops = saved_ops; 361 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 362 ops_free_list(ops, &net_exit_list); 363 364 rcu_barrier(); 365 goto out; 366 } 367 368 static int __net_init net_defaults_init_net(struct net *net) 369 { 370 net->core.sysctl_somaxconn = SOMAXCONN; 371 return 0; 372 } 373 374 static struct pernet_operations net_defaults_ops = { 375 .init = net_defaults_init_net, 376 }; 377 378 static __init int net_defaults_init(void) 379 { 380 if (register_pernet_subsys(&net_defaults_ops)) 381 panic("Cannot initialize net default settings"); 382 383 return 0; 384 } 385 386 core_initcall(net_defaults_init); 387 388 #ifdef CONFIG_NET_NS 389 static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 390 { 391 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); 392 } 393 394 static void dec_net_namespaces(struct ucounts *ucounts) 395 { 396 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); 397 } 398 399 static struct kmem_cache *net_cachep __ro_after_init; 400 static struct workqueue_struct *netns_wq; 401 402 static struct net *net_alloc(void) 403 { 404 struct net *net = NULL; 405 struct net_generic *ng; 406 407 ng = net_alloc_generic(); 408 if (!ng) 409 goto out; 410 411 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); 412 if (!net) 413 goto out_free; 414 415 #ifdef CONFIG_KEYS 416 net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL); 417 if (!net->key_domain) 418 goto out_free_2; 419 refcount_set(&net->key_domain->usage, 1); 420 #endif 421 422 rcu_assign_pointer(net->gen, ng); 423 out: 424 return net; 425 426 #ifdef CONFIG_KEYS 427 out_free_2: 428 kmem_cache_free(net_cachep, net); 429 net = NULL; 430 #endif 431 out_free: 432 kfree(ng); 433 goto out; 434 } 435 436 static void net_free(struct net *net) 437 { 438 kfree(rcu_access_pointer(net->gen)); 439 kmem_cache_free(net_cachep, net); 440 } 441 442 void net_drop_ns(void *p) 443 { 444 struct net *ns = p; 445 if (ns && refcount_dec_and_test(&ns->passive)) 446 net_free(ns); 447 } 448 449 struct net *copy_net_ns(unsigned long flags, 450 struct user_namespace *user_ns, struct net *old_net) 451 { 452 struct ucounts *ucounts; 453 struct net *net; 454 int rv; 455 456 if (!(flags & CLONE_NEWNET)) 457 return get_net(old_net); 458 459 ucounts = inc_net_namespaces(user_ns); 460 if (!ucounts) 461 return ERR_PTR(-ENOSPC); 462 463 net = net_alloc(); 464 if (!net) { 465 rv = -ENOMEM; 466 goto dec_ucounts; 467 } 468 refcount_set(&net->passive, 1); 469 net->ucounts = ucounts; 470 get_user_ns(user_ns); 471 472 rv = down_read_killable(&pernet_ops_rwsem); 473 if (rv < 0) 474 goto put_userns; 475 476 rv = setup_net(net, user_ns); 477 478 up_read(&pernet_ops_rwsem); 479 480 if (rv < 0) { 481 put_userns: 482 key_remove_domain(net->key_domain); 483 put_user_ns(user_ns); 484 net_drop_ns(net); 485 dec_ucounts: 486 dec_net_namespaces(ucounts); 487 return ERR_PTR(rv); 488 } 489 return net; 490 } 491 492 /** 493 * net_ns_get_ownership - get sysfs ownership data for @net 494 * @net: network namespace in question (can be NULL) 495 * @uid: kernel user ID for sysfs objects 496 * @gid: kernel group ID for sysfs objects 497 * 498 * Returns the uid/gid pair of root in the user namespace associated with the 499 * given network namespace. 500 */ 501 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid) 502 { 503 if (net) { 504 kuid_t ns_root_uid = make_kuid(net->user_ns, 0); 505 kgid_t ns_root_gid = make_kgid(net->user_ns, 0); 506 507 if (uid_valid(ns_root_uid)) 508 *uid = ns_root_uid; 509 510 if (gid_valid(ns_root_gid)) 511 *gid = ns_root_gid; 512 } else { 513 *uid = GLOBAL_ROOT_UID; 514 *gid = GLOBAL_ROOT_GID; 515 } 516 } 517 EXPORT_SYMBOL_GPL(net_ns_get_ownership); 518 519 static void unhash_nsid(struct net *net, struct net *last) 520 { 521 struct net *tmp; 522 /* This function is only called from cleanup_net() work, 523 * and this work is the only process, that may delete 524 * a net from net_namespace_list. So, when the below 525 * is executing, the list may only grow. Thus, we do not 526 * use for_each_net_rcu() or net_rwsem. 527 */ 528 for_each_net(tmp) { 529 int id; 530 531 spin_lock_bh(&tmp->nsid_lock); 532 id = __peernet2id(tmp, net); 533 if (id >= 0) 534 idr_remove(&tmp->netns_ids, id); 535 spin_unlock_bh(&tmp->nsid_lock); 536 if (id >= 0) 537 rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL); 538 if (tmp == last) 539 break; 540 } 541 spin_lock_bh(&net->nsid_lock); 542 idr_destroy(&net->netns_ids); 543 spin_unlock_bh(&net->nsid_lock); 544 } 545 546 static LLIST_HEAD(cleanup_list); 547 548 static void cleanup_net(struct work_struct *work) 549 { 550 const struct pernet_operations *ops; 551 struct net *net, *tmp, *last; 552 struct llist_node *net_kill_list; 553 LIST_HEAD(net_exit_list); 554 555 /* Atomically snapshot the list of namespaces to cleanup */ 556 net_kill_list = llist_del_all(&cleanup_list); 557 558 down_read(&pernet_ops_rwsem); 559 560 /* Don't let anyone else find us. */ 561 down_write(&net_rwsem); 562 llist_for_each_entry(net, net_kill_list, cleanup_list) 563 list_del_rcu(&net->list); 564 /* Cache last net. After we unlock rtnl, no one new net 565 * added to net_namespace_list can assign nsid pointer 566 * to a net from net_kill_list (see peernet2id_alloc()). 567 * So, we skip them in unhash_nsid(). 568 * 569 * Note, that unhash_nsid() does not delete nsid links 570 * between net_kill_list's nets, as they've already 571 * deleted from net_namespace_list. But, this would be 572 * useless anyway, as netns_ids are destroyed there. 573 */ 574 last = list_last_entry(&net_namespace_list, struct net, list); 575 up_write(&net_rwsem); 576 577 llist_for_each_entry(net, net_kill_list, cleanup_list) { 578 unhash_nsid(net, last); 579 list_add_tail(&net->exit_list, &net_exit_list); 580 } 581 582 /* Run all of the network namespace pre_exit methods */ 583 list_for_each_entry_reverse(ops, &pernet_list, list) 584 ops_pre_exit_list(ops, &net_exit_list); 585 586 /* 587 * Another CPU might be rcu-iterating the list, wait for it. 588 * This needs to be before calling the exit() notifiers, so 589 * the rcu_barrier() below isn't sufficient alone. 590 * Also the pre_exit() and exit() methods need this barrier. 591 */ 592 synchronize_rcu(); 593 594 /* Run all of the network namespace exit methods */ 595 list_for_each_entry_reverse(ops, &pernet_list, list) 596 ops_exit_list(ops, &net_exit_list); 597 598 /* Free the net generic variables */ 599 list_for_each_entry_reverse(ops, &pernet_list, list) 600 ops_free_list(ops, &net_exit_list); 601 602 up_read(&pernet_ops_rwsem); 603 604 /* Ensure there are no outstanding rcu callbacks using this 605 * network namespace. 606 */ 607 rcu_barrier(); 608 609 /* Finally it is safe to free my network namespace structure */ 610 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 611 list_del_init(&net->exit_list); 612 dec_net_namespaces(net->ucounts); 613 key_remove_domain(net->key_domain); 614 put_user_ns(net->user_ns); 615 net_drop_ns(net); 616 } 617 } 618 619 /** 620 * net_ns_barrier - wait until concurrent net_cleanup_work is done 621 * 622 * cleanup_net runs from work queue and will first remove namespaces 623 * from the global list, then run net exit functions. 624 * 625 * Call this in module exit path to make sure that all netns 626 * ->exit ops have been invoked before the function is removed. 627 */ 628 void net_ns_barrier(void) 629 { 630 down_write(&pernet_ops_rwsem); 631 up_write(&pernet_ops_rwsem); 632 } 633 EXPORT_SYMBOL(net_ns_barrier); 634 635 static DECLARE_WORK(net_cleanup_work, cleanup_net); 636 637 void __put_net(struct net *net) 638 { 639 /* Cleanup the network namespace in process context */ 640 if (llist_add(&net->cleanup_list, &cleanup_list)) 641 queue_work(netns_wq, &net_cleanup_work); 642 } 643 EXPORT_SYMBOL_GPL(__put_net); 644 645 struct net *get_net_ns_by_fd(int fd) 646 { 647 struct file *file; 648 struct ns_common *ns; 649 struct net *net; 650 651 file = proc_ns_fget(fd); 652 if (IS_ERR(file)) 653 return ERR_CAST(file); 654 655 ns = get_proc_ns(file_inode(file)); 656 if (ns->ops == &netns_operations) 657 net = get_net(container_of(ns, struct net, ns)); 658 else 659 net = ERR_PTR(-EINVAL); 660 661 fput(file); 662 return net; 663 } 664 665 #else 666 struct net *get_net_ns_by_fd(int fd) 667 { 668 return ERR_PTR(-EINVAL); 669 } 670 #endif 671 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 672 673 struct net *get_net_ns_by_pid(pid_t pid) 674 { 675 struct task_struct *tsk; 676 struct net *net; 677 678 /* Lookup the network namespace */ 679 net = ERR_PTR(-ESRCH); 680 rcu_read_lock(); 681 tsk = find_task_by_vpid(pid); 682 if (tsk) { 683 struct nsproxy *nsproxy; 684 task_lock(tsk); 685 nsproxy = tsk->nsproxy; 686 if (nsproxy) 687 net = get_net(nsproxy->net_ns); 688 task_unlock(tsk); 689 } 690 rcu_read_unlock(); 691 return net; 692 } 693 EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 694 695 static __net_init int net_ns_net_init(struct net *net) 696 { 697 #ifdef CONFIG_NET_NS 698 net->ns.ops = &netns_operations; 699 #endif 700 return ns_alloc_inum(&net->ns); 701 } 702 703 static __net_exit void net_ns_net_exit(struct net *net) 704 { 705 ns_free_inum(&net->ns); 706 } 707 708 static struct pernet_operations __net_initdata net_ns_ops = { 709 .init = net_ns_net_init, 710 .exit = net_ns_net_exit, 711 }; 712 713 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { 714 [NETNSA_NONE] = { .type = NLA_UNSPEC }, 715 [NETNSA_NSID] = { .type = NLA_S32 }, 716 [NETNSA_PID] = { .type = NLA_U32 }, 717 [NETNSA_FD] = { .type = NLA_U32 }, 718 [NETNSA_TARGET_NSID] = { .type = NLA_S32 }, 719 }; 720 721 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, 722 struct netlink_ext_ack *extack) 723 { 724 struct net *net = sock_net(skb->sk); 725 struct nlattr *tb[NETNSA_MAX + 1]; 726 struct nlattr *nla; 727 struct net *peer; 728 int nsid, err; 729 730 err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb, 731 NETNSA_MAX, rtnl_net_policy, extack); 732 if (err < 0) 733 return err; 734 if (!tb[NETNSA_NSID]) { 735 NL_SET_ERR_MSG(extack, "nsid is missing"); 736 return -EINVAL; 737 } 738 nsid = nla_get_s32(tb[NETNSA_NSID]); 739 740 if (tb[NETNSA_PID]) { 741 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 742 nla = tb[NETNSA_PID]; 743 } else if (tb[NETNSA_FD]) { 744 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 745 nla = tb[NETNSA_FD]; 746 } else { 747 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 748 return -EINVAL; 749 } 750 if (IS_ERR(peer)) { 751 NL_SET_BAD_ATTR(extack, nla); 752 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 753 return PTR_ERR(peer); 754 } 755 756 spin_lock_bh(&net->nsid_lock); 757 if (__peernet2id(net, peer) >= 0) { 758 spin_unlock_bh(&net->nsid_lock); 759 err = -EEXIST; 760 NL_SET_BAD_ATTR(extack, nla); 761 NL_SET_ERR_MSG(extack, 762 "Peer netns already has a nsid assigned"); 763 goto out; 764 } 765 766 err = alloc_netid(net, peer, nsid); 767 spin_unlock_bh(&net->nsid_lock); 768 if (err >= 0) { 769 rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid, 770 nlh); 771 err = 0; 772 } else if (err == -ENOSPC && nsid >= 0) { 773 err = -EEXIST; 774 NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]); 775 NL_SET_ERR_MSG(extack, "The specified nsid is already used"); 776 } 777 out: 778 put_net(peer); 779 return err; 780 } 781 782 static int rtnl_net_get_size(void) 783 { 784 return NLMSG_ALIGN(sizeof(struct rtgenmsg)) 785 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ 786 + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */ 787 ; 788 } 789 790 struct net_fill_args { 791 u32 portid; 792 u32 seq; 793 int flags; 794 int cmd; 795 int nsid; 796 bool add_ref; 797 int ref_nsid; 798 }; 799 800 static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args) 801 { 802 struct nlmsghdr *nlh; 803 struct rtgenmsg *rth; 804 805 nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth), 806 args->flags); 807 if (!nlh) 808 return -EMSGSIZE; 809 810 rth = nlmsg_data(nlh); 811 rth->rtgen_family = AF_UNSPEC; 812 813 if (nla_put_s32(skb, NETNSA_NSID, args->nsid)) 814 goto nla_put_failure; 815 816 if (args->add_ref && 817 nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid)) 818 goto nla_put_failure; 819 820 nlmsg_end(skb, nlh); 821 return 0; 822 823 nla_put_failure: 824 nlmsg_cancel(skb, nlh); 825 return -EMSGSIZE; 826 } 827 828 static int rtnl_net_valid_getid_req(struct sk_buff *skb, 829 const struct nlmsghdr *nlh, 830 struct nlattr **tb, 831 struct netlink_ext_ack *extack) 832 { 833 int i, err; 834 835 if (!netlink_strict_get_check(skb)) 836 return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), 837 tb, NETNSA_MAX, rtnl_net_policy, 838 extack); 839 840 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb, 841 NETNSA_MAX, rtnl_net_policy, 842 extack); 843 if (err) 844 return err; 845 846 for (i = 0; i <= NETNSA_MAX; i++) { 847 if (!tb[i]) 848 continue; 849 850 switch (i) { 851 case NETNSA_PID: 852 case NETNSA_FD: 853 case NETNSA_NSID: 854 case NETNSA_TARGET_NSID: 855 break; 856 default: 857 NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request"); 858 return -EINVAL; 859 } 860 } 861 862 return 0; 863 } 864 865 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, 866 struct netlink_ext_ack *extack) 867 { 868 struct net *net = sock_net(skb->sk); 869 struct nlattr *tb[NETNSA_MAX + 1]; 870 struct net_fill_args fillargs = { 871 .portid = NETLINK_CB(skb).portid, 872 .seq = nlh->nlmsg_seq, 873 .cmd = RTM_NEWNSID, 874 }; 875 struct net *peer, *target = net; 876 struct nlattr *nla; 877 struct sk_buff *msg; 878 int err; 879 880 err = rtnl_net_valid_getid_req(skb, nlh, tb, extack); 881 if (err < 0) 882 return err; 883 if (tb[NETNSA_PID]) { 884 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 885 nla = tb[NETNSA_PID]; 886 } else if (tb[NETNSA_FD]) { 887 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 888 nla = tb[NETNSA_FD]; 889 } else if (tb[NETNSA_NSID]) { 890 peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID])); 891 if (!peer) 892 peer = ERR_PTR(-ENOENT); 893 nla = tb[NETNSA_NSID]; 894 } else { 895 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 896 return -EINVAL; 897 } 898 899 if (IS_ERR(peer)) { 900 NL_SET_BAD_ATTR(extack, nla); 901 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 902 return PTR_ERR(peer); 903 } 904 905 if (tb[NETNSA_TARGET_NSID]) { 906 int id = nla_get_s32(tb[NETNSA_TARGET_NSID]); 907 908 target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id); 909 if (IS_ERR(target)) { 910 NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]); 911 NL_SET_ERR_MSG(extack, 912 "Target netns reference is invalid"); 913 err = PTR_ERR(target); 914 goto out; 915 } 916 fillargs.add_ref = true; 917 fillargs.ref_nsid = peernet2id(net, peer); 918 } 919 920 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 921 if (!msg) { 922 err = -ENOMEM; 923 goto out; 924 } 925 926 fillargs.nsid = peernet2id(target, peer); 927 err = rtnl_net_fill(msg, &fillargs); 928 if (err < 0) 929 goto err_out; 930 931 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); 932 goto out; 933 934 err_out: 935 nlmsg_free(msg); 936 out: 937 if (fillargs.add_ref) 938 put_net(target); 939 put_net(peer); 940 return err; 941 } 942 943 struct rtnl_net_dump_cb { 944 struct net *tgt_net; 945 struct net *ref_net; 946 struct sk_buff *skb; 947 struct net_fill_args fillargs; 948 int idx; 949 int s_idx; 950 }; 951 952 static int rtnl_net_dumpid_one(int id, void *peer, void *data) 953 { 954 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; 955 int ret; 956 957 if (net_cb->idx < net_cb->s_idx) 958 goto cont; 959 960 net_cb->fillargs.nsid = id; 961 if (net_cb->fillargs.add_ref) 962 net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer); 963 ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs); 964 if (ret < 0) 965 return ret; 966 967 cont: 968 net_cb->idx++; 969 return 0; 970 } 971 972 static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk, 973 struct rtnl_net_dump_cb *net_cb, 974 struct netlink_callback *cb) 975 { 976 struct netlink_ext_ack *extack = cb->extack; 977 struct nlattr *tb[NETNSA_MAX + 1]; 978 int err, i; 979 980 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb, 981 NETNSA_MAX, rtnl_net_policy, 982 extack); 983 if (err < 0) 984 return err; 985 986 for (i = 0; i <= NETNSA_MAX; i++) { 987 if (!tb[i]) 988 continue; 989 990 if (i == NETNSA_TARGET_NSID) { 991 struct net *net; 992 993 net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i])); 994 if (IS_ERR(net)) { 995 NL_SET_BAD_ATTR(extack, tb[i]); 996 NL_SET_ERR_MSG(extack, 997 "Invalid target network namespace id"); 998 return PTR_ERR(net); 999 } 1000 net_cb->fillargs.add_ref = true; 1001 net_cb->ref_net = net_cb->tgt_net; 1002 net_cb->tgt_net = net; 1003 } else { 1004 NL_SET_BAD_ATTR(extack, tb[i]); 1005 NL_SET_ERR_MSG(extack, 1006 "Unsupported attribute in dump request"); 1007 return -EINVAL; 1008 } 1009 } 1010 1011 return 0; 1012 } 1013 1014 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) 1015 { 1016 struct rtnl_net_dump_cb net_cb = { 1017 .tgt_net = sock_net(skb->sk), 1018 .skb = skb, 1019 .fillargs = { 1020 .portid = NETLINK_CB(cb->skb).portid, 1021 .seq = cb->nlh->nlmsg_seq, 1022 .flags = NLM_F_MULTI, 1023 .cmd = RTM_NEWNSID, 1024 }, 1025 .idx = 0, 1026 .s_idx = cb->args[0], 1027 }; 1028 int err = 0; 1029 1030 if (cb->strict_check) { 1031 err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb); 1032 if (err < 0) 1033 goto end; 1034 } 1035 1036 spin_lock_bh(&net_cb.tgt_net->nsid_lock); 1037 if (net_cb.fillargs.add_ref && 1038 !net_eq(net_cb.ref_net, net_cb.tgt_net) && 1039 !spin_trylock_bh(&net_cb.ref_net->nsid_lock)) { 1040 spin_unlock_bh(&net_cb.tgt_net->nsid_lock); 1041 err = -EAGAIN; 1042 goto end; 1043 } 1044 idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb); 1045 if (net_cb.fillargs.add_ref && 1046 !net_eq(net_cb.ref_net, net_cb.tgt_net)) 1047 spin_unlock_bh(&net_cb.ref_net->nsid_lock); 1048 spin_unlock_bh(&net_cb.tgt_net->nsid_lock); 1049 1050 cb->args[0] = net_cb.idx; 1051 end: 1052 if (net_cb.fillargs.add_ref) 1053 put_net(net_cb.tgt_net); 1054 return err < 0 ? err : skb->len; 1055 } 1056 1057 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid, 1058 struct nlmsghdr *nlh) 1059 { 1060 struct net_fill_args fillargs = { 1061 .portid = portid, 1062 .seq = nlh ? nlh->nlmsg_seq : 0, 1063 .cmd = cmd, 1064 .nsid = id, 1065 }; 1066 struct sk_buff *msg; 1067 int err = -ENOMEM; 1068 1069 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 1070 if (!msg) 1071 goto out; 1072 1073 err = rtnl_net_fill(msg, &fillargs); 1074 if (err < 0) 1075 goto err_out; 1076 1077 rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, 0); 1078 return; 1079 1080 err_out: 1081 nlmsg_free(msg); 1082 out: 1083 rtnl_set_sk_err(net, RTNLGRP_NSID, err); 1084 } 1085 1086 static int __init net_ns_init(void) 1087 { 1088 struct net_generic *ng; 1089 1090 #ifdef CONFIG_NET_NS 1091 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 1092 SMP_CACHE_BYTES, 1093 SLAB_PANIC|SLAB_ACCOUNT, NULL); 1094 1095 /* Create workqueue for cleanup */ 1096 netns_wq = create_singlethread_workqueue("netns"); 1097 if (!netns_wq) 1098 panic("Could not create netns workq"); 1099 #endif 1100 1101 ng = net_alloc_generic(); 1102 if (!ng) 1103 panic("Could not allocate generic netns"); 1104 1105 rcu_assign_pointer(init_net.gen, ng); 1106 1107 down_write(&pernet_ops_rwsem); 1108 if (setup_net(&init_net, &init_user_ns)) 1109 panic("Could not setup the initial network namespace"); 1110 1111 init_net_initialized = true; 1112 up_write(&pernet_ops_rwsem); 1113 1114 if (register_pernet_subsys(&net_ns_ops)) 1115 panic("Could not register network namespace subsystems"); 1116 1117 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, 1118 RTNL_FLAG_DOIT_UNLOCKED); 1119 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, 1120 RTNL_FLAG_DOIT_UNLOCKED); 1121 1122 return 0; 1123 } 1124 1125 pure_initcall(net_ns_init); 1126 1127 #ifdef CONFIG_NET_NS 1128 static int __register_pernet_operations(struct list_head *list, 1129 struct pernet_operations *ops) 1130 { 1131 struct net *net; 1132 int error; 1133 LIST_HEAD(net_exit_list); 1134 1135 list_add_tail(&ops->list, list); 1136 if (ops->init || (ops->id && ops->size)) { 1137 /* We held write locked pernet_ops_rwsem, and parallel 1138 * setup_net() and cleanup_net() are not possible. 1139 */ 1140 for_each_net(net) { 1141 error = ops_init(ops, net); 1142 if (error) 1143 goto out_undo; 1144 list_add_tail(&net->exit_list, &net_exit_list); 1145 } 1146 } 1147 return 0; 1148 1149 out_undo: 1150 /* If I have an error cleanup all namespaces I initialized */ 1151 list_del(&ops->list); 1152 ops_pre_exit_list(ops, &net_exit_list); 1153 synchronize_rcu(); 1154 ops_exit_list(ops, &net_exit_list); 1155 ops_free_list(ops, &net_exit_list); 1156 return error; 1157 } 1158 1159 static void __unregister_pernet_operations(struct pernet_operations *ops) 1160 { 1161 struct net *net; 1162 LIST_HEAD(net_exit_list); 1163 1164 list_del(&ops->list); 1165 /* See comment in __register_pernet_operations() */ 1166 for_each_net(net) 1167 list_add_tail(&net->exit_list, &net_exit_list); 1168 ops_pre_exit_list(ops, &net_exit_list); 1169 synchronize_rcu(); 1170 ops_exit_list(ops, &net_exit_list); 1171 ops_free_list(ops, &net_exit_list); 1172 } 1173 1174 #else 1175 1176 static int __register_pernet_operations(struct list_head *list, 1177 struct pernet_operations *ops) 1178 { 1179 if (!init_net_initialized) { 1180 list_add_tail(&ops->list, list); 1181 return 0; 1182 } 1183 1184 return ops_init(ops, &init_net); 1185 } 1186 1187 static void __unregister_pernet_operations(struct pernet_operations *ops) 1188 { 1189 if (!init_net_initialized) { 1190 list_del(&ops->list); 1191 } else { 1192 LIST_HEAD(net_exit_list); 1193 list_add(&init_net.exit_list, &net_exit_list); 1194 ops_pre_exit_list(ops, &net_exit_list); 1195 synchronize_rcu(); 1196 ops_exit_list(ops, &net_exit_list); 1197 ops_free_list(ops, &net_exit_list); 1198 } 1199 } 1200 1201 #endif /* CONFIG_NET_NS */ 1202 1203 static DEFINE_IDA(net_generic_ids); 1204 1205 static int register_pernet_operations(struct list_head *list, 1206 struct pernet_operations *ops) 1207 { 1208 int error; 1209 1210 if (ops->id) { 1211 error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID, 1212 GFP_KERNEL); 1213 if (error < 0) 1214 return error; 1215 *ops->id = error; 1216 max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); 1217 } 1218 error = __register_pernet_operations(list, ops); 1219 if (error) { 1220 rcu_barrier(); 1221 if (ops->id) 1222 ida_free(&net_generic_ids, *ops->id); 1223 } 1224 1225 return error; 1226 } 1227 1228 static void unregister_pernet_operations(struct pernet_operations *ops) 1229 { 1230 __unregister_pernet_operations(ops); 1231 rcu_barrier(); 1232 if (ops->id) 1233 ida_free(&net_generic_ids, *ops->id); 1234 } 1235 1236 /** 1237 * register_pernet_subsys - register a network namespace subsystem 1238 * @ops: pernet operations structure for the subsystem 1239 * 1240 * Register a subsystem which has init and exit functions 1241 * that are called when network namespaces are created and 1242 * destroyed respectively. 1243 * 1244 * When registered all network namespace init functions are 1245 * called for every existing network namespace. Allowing kernel 1246 * modules to have a race free view of the set of network namespaces. 1247 * 1248 * When a new network namespace is created all of the init 1249 * methods are called in the order in which they were registered. 1250 * 1251 * When a network namespace is destroyed all of the exit methods 1252 * are called in the reverse of the order with which they were 1253 * registered. 1254 */ 1255 int register_pernet_subsys(struct pernet_operations *ops) 1256 { 1257 int error; 1258 down_write(&pernet_ops_rwsem); 1259 error = register_pernet_operations(first_device, ops); 1260 up_write(&pernet_ops_rwsem); 1261 return error; 1262 } 1263 EXPORT_SYMBOL_GPL(register_pernet_subsys); 1264 1265 /** 1266 * unregister_pernet_subsys - unregister a network namespace subsystem 1267 * @ops: pernet operations structure to manipulate 1268 * 1269 * Remove the pernet operations structure from the list to be 1270 * used when network namespaces are created or destroyed. In 1271 * addition run the exit method for all existing network 1272 * namespaces. 1273 */ 1274 void unregister_pernet_subsys(struct pernet_operations *ops) 1275 { 1276 down_write(&pernet_ops_rwsem); 1277 unregister_pernet_operations(ops); 1278 up_write(&pernet_ops_rwsem); 1279 } 1280 EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 1281 1282 /** 1283 * register_pernet_device - register a network namespace device 1284 * @ops: pernet operations structure for the subsystem 1285 * 1286 * Register a device which has init and exit functions 1287 * that are called when network namespaces are created and 1288 * destroyed respectively. 1289 * 1290 * When registered all network namespace init functions are 1291 * called for every existing network namespace. Allowing kernel 1292 * modules to have a race free view of the set of network namespaces. 1293 * 1294 * When a new network namespace is created all of the init 1295 * methods are called in the order in which they were registered. 1296 * 1297 * When a network namespace is destroyed all of the exit methods 1298 * are called in the reverse of the order with which they were 1299 * registered. 1300 */ 1301 int register_pernet_device(struct pernet_operations *ops) 1302 { 1303 int error; 1304 down_write(&pernet_ops_rwsem); 1305 error = register_pernet_operations(&pernet_list, ops); 1306 if (!error && (first_device == &pernet_list)) 1307 first_device = &ops->list; 1308 up_write(&pernet_ops_rwsem); 1309 return error; 1310 } 1311 EXPORT_SYMBOL_GPL(register_pernet_device); 1312 1313 /** 1314 * unregister_pernet_device - unregister a network namespace netdevice 1315 * @ops: pernet operations structure to manipulate 1316 * 1317 * Remove the pernet operations structure from the list to be 1318 * used when network namespaces are created or destroyed. In 1319 * addition run the exit method for all existing network 1320 * namespaces. 1321 */ 1322 void unregister_pernet_device(struct pernet_operations *ops) 1323 { 1324 down_write(&pernet_ops_rwsem); 1325 if (&ops->list == first_device) 1326 first_device = first_device->next; 1327 unregister_pernet_operations(ops); 1328 up_write(&pernet_ops_rwsem); 1329 } 1330 EXPORT_SYMBOL_GPL(unregister_pernet_device); 1331 1332 #ifdef CONFIG_NET_NS 1333 static struct ns_common *netns_get(struct task_struct *task) 1334 { 1335 struct net *net = NULL; 1336 struct nsproxy *nsproxy; 1337 1338 task_lock(task); 1339 nsproxy = task->nsproxy; 1340 if (nsproxy) 1341 net = get_net(nsproxy->net_ns); 1342 task_unlock(task); 1343 1344 return net ? &net->ns : NULL; 1345 } 1346 1347 static inline struct net *to_net_ns(struct ns_common *ns) 1348 { 1349 return container_of(ns, struct net, ns); 1350 } 1351 1352 static void netns_put(struct ns_common *ns) 1353 { 1354 put_net(to_net_ns(ns)); 1355 } 1356 1357 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) 1358 { 1359 struct net *net = to_net_ns(ns); 1360 1361 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 1362 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 1363 return -EPERM; 1364 1365 put_net(nsproxy->net_ns); 1366 nsproxy->net_ns = get_net(net); 1367 return 0; 1368 } 1369 1370 static struct user_namespace *netns_owner(struct ns_common *ns) 1371 { 1372 return to_net_ns(ns)->user_ns; 1373 } 1374 1375 const struct proc_ns_operations netns_operations = { 1376 .name = "net", 1377 .type = CLONE_NEWNET, 1378 .get = netns_get, 1379 .put = netns_put, 1380 .install = netns_install, 1381 .owner = netns_owner, 1382 }; 1383 #endif 1384