1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/workqueue.h> 4 #include <linux/rtnetlink.h> 5 #include <linux/cache.h> 6 #include <linux/slab.h> 7 #include <linux/list.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/idr.h> 11 #include <linux/rculist.h> 12 #include <linux/nsproxy.h> 13 #include <linux/fs.h> 14 #include <linux/proc_ns.h> 15 #include <linux/file.h> 16 #include <linux/export.h> 17 #include <linux/user_namespace.h> 18 #include <linux/net_namespace.h> 19 #include <linux/sched/task.h> 20 21 #include <net/sock.h> 22 #include <net/netlink.h> 23 #include <net/net_namespace.h> 24 #include <net/netns/generic.h> 25 26 /* 27 * Our network namespace constructor/destructor lists 28 */ 29 30 static LIST_HEAD(pernet_list); 31 static struct list_head *first_device = &pernet_list; 32 DEFINE_MUTEX(net_mutex); 33 34 LIST_HEAD(net_namespace_list); 35 EXPORT_SYMBOL_GPL(net_namespace_list); 36 37 struct net init_net = { 38 .count = REFCOUNT_INIT(1), 39 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), 40 }; 41 EXPORT_SYMBOL(init_net); 42 43 static bool init_net_initialized; 44 /* 45 * net_sem: protects: pernet_list, net_generic_ids, 46 * init_net_initialized and first_device pointer. 47 */ 48 DECLARE_RWSEM(net_sem); 49 50 #define MIN_PERNET_OPS_ID \ 51 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *)) 52 53 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 54 55 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; 56 57 static struct net_generic *net_alloc_generic(void) 58 { 59 struct net_generic *ng; 60 unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); 61 62 ng = kzalloc(generic_size, GFP_KERNEL); 63 if (ng) 64 ng->s.len = max_gen_ptrs; 65 66 return ng; 67 } 68 69 static int net_assign_generic(struct net *net, unsigned int id, void *data) 70 { 71 struct net_generic *ng, *old_ng; 72 73 BUG_ON(!mutex_is_locked(&net_mutex)); 74 BUG_ON(id < MIN_PERNET_OPS_ID); 75 76 old_ng = rcu_dereference_protected(net->gen, 77 lockdep_is_held(&net_mutex)); 78 if (old_ng->s.len > id) { 79 old_ng->ptr[id] = data; 80 return 0; 81 } 82 83 ng = net_alloc_generic(); 84 if (ng == NULL) 85 return -ENOMEM; 86 87 /* 88 * Some synchronisation notes: 89 * 90 * The net_generic explores the net->gen array inside rcu 91 * read section. Besides once set the net->gen->ptr[x] 92 * pointer never changes (see rules in netns/generic.h). 93 * 94 * That said, we simply duplicate this array and schedule 95 * the old copy for kfree after a grace period. 96 */ 97 98 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID], 99 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *)); 100 ng->ptr[id] = data; 101 102 rcu_assign_pointer(net->gen, ng); 103 kfree_rcu(old_ng, s.rcu); 104 return 0; 105 } 106 107 static int ops_init(const struct pernet_operations *ops, struct net *net) 108 { 109 int err = -ENOMEM; 110 void *data = NULL; 111 112 if (ops->id && ops->size) { 113 data = kzalloc(ops->size, GFP_KERNEL); 114 if (!data) 115 goto out; 116 117 err = net_assign_generic(net, *ops->id, data); 118 if (err) 119 goto cleanup; 120 } 121 err = 0; 122 if (ops->init) 123 err = ops->init(net); 124 if (!err) 125 return 0; 126 127 cleanup: 128 kfree(data); 129 130 out: 131 return err; 132 } 133 134 static void ops_free(const struct pernet_operations *ops, struct net *net) 135 { 136 if (ops->id && ops->size) { 137 kfree(net_generic(net, *ops->id)); 138 } 139 } 140 141 static void ops_exit_list(const struct pernet_operations *ops, 142 struct list_head *net_exit_list) 143 { 144 struct net *net; 145 if (ops->exit) { 146 list_for_each_entry(net, net_exit_list, exit_list) 147 ops->exit(net); 148 } 149 if (ops->exit_batch) 150 ops->exit_batch(net_exit_list); 151 } 152 153 static void ops_free_list(const struct pernet_operations *ops, 154 struct list_head *net_exit_list) 155 { 156 struct net *net; 157 if (ops->size && ops->id) { 158 list_for_each_entry(net, net_exit_list, exit_list) 159 ops_free(ops, net); 160 } 161 } 162 163 /* should be called with nsid_lock held */ 164 static int alloc_netid(struct net *net, struct net *peer, int reqid) 165 { 166 int min = 0, max = 0; 167 168 if (reqid >= 0) { 169 min = reqid; 170 max = reqid + 1; 171 } 172 173 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); 174 } 175 176 /* This function is used by idr_for_each(). If net is equal to peer, the 177 * function returns the id so that idr_for_each() stops. Because we cannot 178 * returns the id 0 (idr_for_each() will not stop), we return the magic value 179 * NET_ID_ZERO (-1) for it. 180 */ 181 #define NET_ID_ZERO -1 182 static int net_eq_idr(int id, void *net, void *peer) 183 { 184 if (net_eq(net, peer)) 185 return id ? : NET_ID_ZERO; 186 return 0; 187 } 188 189 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc 190 * is set to true, thus the caller knows that the new id must be notified via 191 * rtnl. 192 */ 193 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) 194 { 195 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); 196 bool alloc_it = *alloc; 197 198 *alloc = false; 199 200 /* Magic value for id 0. */ 201 if (id == NET_ID_ZERO) 202 return 0; 203 if (id > 0) 204 return id; 205 206 if (alloc_it) { 207 id = alloc_netid(net, peer, -1); 208 *alloc = true; 209 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 210 } 211 212 return NETNSA_NSID_NOT_ASSIGNED; 213 } 214 215 /* should be called with nsid_lock held */ 216 static int __peernet2id(struct net *net, struct net *peer) 217 { 218 bool no = false; 219 220 return __peernet2id_alloc(net, peer, &no); 221 } 222 223 static void rtnl_net_notifyid(struct net *net, int cmd, int id); 224 /* This function returns the id of a peer netns. If no id is assigned, one will 225 * be allocated and returned. 226 */ 227 int peernet2id_alloc(struct net *net, struct net *peer) 228 { 229 bool alloc = false, alive = false; 230 int id; 231 232 if (refcount_read(&net->count) == 0) 233 return NETNSA_NSID_NOT_ASSIGNED; 234 spin_lock_bh(&net->nsid_lock); 235 /* 236 * When peer is obtained from RCU lists, we may race with 237 * its cleanup. Check whether it's alive, and this guarantees 238 * we never hash a peer back to net->netns_ids, after it has 239 * just been idr_remove()'d from there in cleanup_net(). 240 */ 241 if (maybe_get_net(peer)) 242 alive = alloc = true; 243 id = __peernet2id_alloc(net, peer, &alloc); 244 spin_unlock_bh(&net->nsid_lock); 245 if (alloc && id >= 0) 246 rtnl_net_notifyid(net, RTM_NEWNSID, id); 247 if (alive) 248 put_net(peer); 249 return id; 250 } 251 EXPORT_SYMBOL_GPL(peernet2id_alloc); 252 253 /* This function returns, if assigned, the id of a peer netns. */ 254 int peernet2id(struct net *net, struct net *peer) 255 { 256 int id; 257 258 spin_lock_bh(&net->nsid_lock); 259 id = __peernet2id(net, peer); 260 spin_unlock_bh(&net->nsid_lock); 261 return id; 262 } 263 EXPORT_SYMBOL(peernet2id); 264 265 /* This function returns true is the peer netns has an id assigned into the 266 * current netns. 267 */ 268 bool peernet_has_id(struct net *net, struct net *peer) 269 { 270 return peernet2id(net, peer) >= 0; 271 } 272 273 struct net *get_net_ns_by_id(struct net *net, int id) 274 { 275 struct net *peer; 276 277 if (id < 0) 278 return NULL; 279 280 rcu_read_lock(); 281 peer = idr_find(&net->netns_ids, id); 282 if (peer) 283 peer = maybe_get_net(peer); 284 rcu_read_unlock(); 285 286 return peer; 287 } 288 289 /* 290 * setup_net runs the initializers for the network namespace object. 291 */ 292 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) 293 { 294 /* Must be called with net_sem held */ 295 const struct pernet_operations *ops, *saved_ops; 296 int error = 0; 297 LIST_HEAD(net_exit_list); 298 299 refcount_set(&net->count, 1); 300 refcount_set(&net->passive, 1); 301 net->dev_base_seq = 1; 302 net->user_ns = user_ns; 303 idr_init(&net->netns_ids); 304 spin_lock_init(&net->nsid_lock); 305 306 list_for_each_entry(ops, &pernet_list, list) { 307 error = ops_init(ops, net); 308 if (error < 0) 309 goto out_undo; 310 } 311 rtnl_lock(); 312 list_add_tail_rcu(&net->list, &net_namespace_list); 313 rtnl_unlock(); 314 out: 315 return error; 316 317 out_undo: 318 /* Walk through the list backwards calling the exit functions 319 * for the pernet modules whose init functions did not fail. 320 */ 321 list_add(&net->exit_list, &net_exit_list); 322 saved_ops = ops; 323 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 324 ops_exit_list(ops, &net_exit_list); 325 326 ops = saved_ops; 327 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 328 ops_free_list(ops, &net_exit_list); 329 330 rcu_barrier(); 331 goto out; 332 } 333 334 static int __net_init net_defaults_init_net(struct net *net) 335 { 336 net->core.sysctl_somaxconn = SOMAXCONN; 337 return 0; 338 } 339 340 static struct pernet_operations net_defaults_ops = { 341 .init = net_defaults_init_net, 342 }; 343 344 static __init int net_defaults_init(void) 345 { 346 if (register_pernet_subsys(&net_defaults_ops)) 347 panic("Cannot initialize net default settings"); 348 349 return 0; 350 } 351 352 core_initcall(net_defaults_init); 353 354 #ifdef CONFIG_NET_NS 355 static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 356 { 357 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); 358 } 359 360 static void dec_net_namespaces(struct ucounts *ucounts) 361 { 362 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); 363 } 364 365 static struct kmem_cache *net_cachep; 366 static struct workqueue_struct *netns_wq; 367 368 static struct net *net_alloc(void) 369 { 370 struct net *net = NULL; 371 struct net_generic *ng; 372 373 ng = net_alloc_generic(); 374 if (!ng) 375 goto out; 376 377 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); 378 if (!net) 379 goto out_free; 380 381 rcu_assign_pointer(net->gen, ng); 382 out: 383 return net; 384 385 out_free: 386 kfree(ng); 387 goto out; 388 } 389 390 static void net_free(struct net *net) 391 { 392 kfree(rcu_access_pointer(net->gen)); 393 kmem_cache_free(net_cachep, net); 394 } 395 396 void net_drop_ns(void *p) 397 { 398 struct net *ns = p; 399 if (ns && refcount_dec_and_test(&ns->passive)) 400 net_free(ns); 401 } 402 403 struct net *copy_net_ns(unsigned long flags, 404 struct user_namespace *user_ns, struct net *old_net) 405 { 406 struct ucounts *ucounts; 407 struct net *net; 408 int rv; 409 410 if (!(flags & CLONE_NEWNET)) 411 return get_net(old_net); 412 413 ucounts = inc_net_namespaces(user_ns); 414 if (!ucounts) 415 return ERR_PTR(-ENOSPC); 416 417 net = net_alloc(); 418 if (!net) { 419 rv = -ENOMEM; 420 goto dec_ucounts; 421 } 422 refcount_set(&net->passive, 1); 423 net->ucounts = ucounts; 424 get_user_ns(user_ns); 425 426 rv = down_read_killable(&net_sem); 427 if (rv < 0) 428 goto put_userns; 429 rv = mutex_lock_killable(&net_mutex); 430 if (rv < 0) 431 goto up_read; 432 rv = setup_net(net, user_ns); 433 mutex_unlock(&net_mutex); 434 up_read: 435 up_read(&net_sem); 436 if (rv < 0) { 437 put_userns: 438 put_user_ns(user_ns); 439 net_drop_ns(net); 440 dec_ucounts: 441 dec_net_namespaces(ucounts); 442 return ERR_PTR(rv); 443 } 444 return net; 445 } 446 447 static void unhash_nsid(struct net *net, struct net *last) 448 { 449 struct net *tmp; 450 /* This function is only called from cleanup_net() work, 451 * and this work is the only process, that may delete 452 * a net from net_namespace_list. So, when the below 453 * is executing, the list may only grow. Thus, we do not 454 * use for_each_net_rcu() or rtnl_lock(). 455 */ 456 for_each_net(tmp) { 457 int id; 458 459 spin_lock_bh(&tmp->nsid_lock); 460 id = __peernet2id(tmp, net); 461 if (id >= 0) 462 idr_remove(&tmp->netns_ids, id); 463 spin_unlock_bh(&tmp->nsid_lock); 464 if (id >= 0) 465 rtnl_net_notifyid(tmp, RTM_DELNSID, id); 466 if (tmp == last) 467 break; 468 } 469 spin_lock_bh(&net->nsid_lock); 470 idr_destroy(&net->netns_ids); 471 spin_unlock_bh(&net->nsid_lock); 472 } 473 474 static DEFINE_SPINLOCK(cleanup_list_lock); 475 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 476 477 static void cleanup_net(struct work_struct *work) 478 { 479 const struct pernet_operations *ops; 480 struct net *net, *tmp, *last; 481 struct list_head net_kill_list; 482 LIST_HEAD(net_exit_list); 483 484 /* Atomically snapshot the list of namespaces to cleanup */ 485 spin_lock_irq(&cleanup_list_lock); 486 list_replace_init(&cleanup_list, &net_kill_list); 487 spin_unlock_irq(&cleanup_list_lock); 488 489 down_read(&net_sem); 490 mutex_lock(&net_mutex); 491 492 /* Don't let anyone else find us. */ 493 rtnl_lock(); 494 list_for_each_entry(net, &net_kill_list, cleanup_list) 495 list_del_rcu(&net->list); 496 /* Cache last net. After we unlock rtnl, no one new net 497 * added to net_namespace_list can assign nsid pointer 498 * to a net from net_kill_list (see peernet2id_alloc()). 499 * So, we skip them in unhash_nsid(). 500 * 501 * Note, that unhash_nsid() does not delete nsid links 502 * between net_kill_list's nets, as they've already 503 * deleted from net_namespace_list. But, this would be 504 * useless anyway, as netns_ids are destroyed there. 505 */ 506 last = list_last_entry(&net_namespace_list, struct net, list); 507 rtnl_unlock(); 508 509 list_for_each_entry(net, &net_kill_list, cleanup_list) { 510 unhash_nsid(net, last); 511 list_add_tail(&net->exit_list, &net_exit_list); 512 } 513 514 /* 515 * Another CPU might be rcu-iterating the list, wait for it. 516 * This needs to be before calling the exit() notifiers, so 517 * the rcu_barrier() below isn't sufficient alone. 518 */ 519 synchronize_rcu(); 520 521 /* Run all of the network namespace exit methods */ 522 list_for_each_entry_reverse(ops, &pernet_list, list) 523 ops_exit_list(ops, &net_exit_list); 524 525 /* Free the net generic variables */ 526 list_for_each_entry_reverse(ops, &pernet_list, list) 527 ops_free_list(ops, &net_exit_list); 528 529 mutex_unlock(&net_mutex); 530 up_read(&net_sem); 531 532 /* Ensure there are no outstanding rcu callbacks using this 533 * network namespace. 534 */ 535 rcu_barrier(); 536 537 /* Finally it is safe to free my network namespace structure */ 538 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 539 list_del_init(&net->exit_list); 540 dec_net_namespaces(net->ucounts); 541 put_user_ns(net->user_ns); 542 net_drop_ns(net); 543 } 544 } 545 546 /** 547 * net_ns_barrier - wait until concurrent net_cleanup_work is done 548 * 549 * cleanup_net runs from work queue and will first remove namespaces 550 * from the global list, then run net exit functions. 551 * 552 * Call this in module exit path to make sure that all netns 553 * ->exit ops have been invoked before the function is removed. 554 */ 555 void net_ns_barrier(void) 556 { 557 down_write(&net_sem); 558 mutex_lock(&net_mutex); 559 mutex_unlock(&net_mutex); 560 up_write(&net_sem); 561 } 562 EXPORT_SYMBOL(net_ns_barrier); 563 564 static DECLARE_WORK(net_cleanup_work, cleanup_net); 565 566 void __put_net(struct net *net) 567 { 568 /* Cleanup the network namespace in process context */ 569 unsigned long flags; 570 571 spin_lock_irqsave(&cleanup_list_lock, flags); 572 list_add(&net->cleanup_list, &cleanup_list); 573 spin_unlock_irqrestore(&cleanup_list_lock, flags); 574 575 queue_work(netns_wq, &net_cleanup_work); 576 } 577 EXPORT_SYMBOL_GPL(__put_net); 578 579 struct net *get_net_ns_by_fd(int fd) 580 { 581 struct file *file; 582 struct ns_common *ns; 583 struct net *net; 584 585 file = proc_ns_fget(fd); 586 if (IS_ERR(file)) 587 return ERR_CAST(file); 588 589 ns = get_proc_ns(file_inode(file)); 590 if (ns->ops == &netns_operations) 591 net = get_net(container_of(ns, struct net, ns)); 592 else 593 net = ERR_PTR(-EINVAL); 594 595 fput(file); 596 return net; 597 } 598 599 #else 600 struct net *get_net_ns_by_fd(int fd) 601 { 602 return ERR_PTR(-EINVAL); 603 } 604 #endif 605 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 606 607 struct net *get_net_ns_by_pid(pid_t pid) 608 { 609 struct task_struct *tsk; 610 struct net *net; 611 612 /* Lookup the network namespace */ 613 net = ERR_PTR(-ESRCH); 614 rcu_read_lock(); 615 tsk = find_task_by_vpid(pid); 616 if (tsk) { 617 struct nsproxy *nsproxy; 618 task_lock(tsk); 619 nsproxy = tsk->nsproxy; 620 if (nsproxy) 621 net = get_net(nsproxy->net_ns); 622 task_unlock(tsk); 623 } 624 rcu_read_unlock(); 625 return net; 626 } 627 EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 628 629 static __net_init int net_ns_net_init(struct net *net) 630 { 631 #ifdef CONFIG_NET_NS 632 net->ns.ops = &netns_operations; 633 #endif 634 return ns_alloc_inum(&net->ns); 635 } 636 637 static __net_exit void net_ns_net_exit(struct net *net) 638 { 639 ns_free_inum(&net->ns); 640 } 641 642 static struct pernet_operations __net_initdata net_ns_ops = { 643 .init = net_ns_net_init, 644 .exit = net_ns_net_exit, 645 }; 646 647 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { 648 [NETNSA_NONE] = { .type = NLA_UNSPEC }, 649 [NETNSA_NSID] = { .type = NLA_S32 }, 650 [NETNSA_PID] = { .type = NLA_U32 }, 651 [NETNSA_FD] = { .type = NLA_U32 }, 652 }; 653 654 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, 655 struct netlink_ext_ack *extack) 656 { 657 struct net *net = sock_net(skb->sk); 658 struct nlattr *tb[NETNSA_MAX + 1]; 659 struct nlattr *nla; 660 struct net *peer; 661 int nsid, err; 662 663 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 664 rtnl_net_policy, extack); 665 if (err < 0) 666 return err; 667 if (!tb[NETNSA_NSID]) { 668 NL_SET_ERR_MSG(extack, "nsid is missing"); 669 return -EINVAL; 670 } 671 nsid = nla_get_s32(tb[NETNSA_NSID]); 672 673 if (tb[NETNSA_PID]) { 674 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 675 nla = tb[NETNSA_PID]; 676 } else if (tb[NETNSA_FD]) { 677 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 678 nla = tb[NETNSA_FD]; 679 } else { 680 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 681 return -EINVAL; 682 } 683 if (IS_ERR(peer)) { 684 NL_SET_BAD_ATTR(extack, nla); 685 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 686 return PTR_ERR(peer); 687 } 688 689 spin_lock_bh(&net->nsid_lock); 690 if (__peernet2id(net, peer) >= 0) { 691 spin_unlock_bh(&net->nsid_lock); 692 err = -EEXIST; 693 NL_SET_BAD_ATTR(extack, nla); 694 NL_SET_ERR_MSG(extack, 695 "Peer netns already has a nsid assigned"); 696 goto out; 697 } 698 699 err = alloc_netid(net, peer, nsid); 700 spin_unlock_bh(&net->nsid_lock); 701 if (err >= 0) { 702 rtnl_net_notifyid(net, RTM_NEWNSID, err); 703 err = 0; 704 } else if (err == -ENOSPC && nsid >= 0) { 705 err = -EEXIST; 706 NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]); 707 NL_SET_ERR_MSG(extack, "The specified nsid is already used"); 708 } 709 out: 710 put_net(peer); 711 return err; 712 } 713 714 static int rtnl_net_get_size(void) 715 { 716 return NLMSG_ALIGN(sizeof(struct rtgenmsg)) 717 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ 718 ; 719 } 720 721 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, 722 int cmd, struct net *net, int nsid) 723 { 724 struct nlmsghdr *nlh; 725 struct rtgenmsg *rth; 726 727 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); 728 if (!nlh) 729 return -EMSGSIZE; 730 731 rth = nlmsg_data(nlh); 732 rth->rtgen_family = AF_UNSPEC; 733 734 if (nla_put_s32(skb, NETNSA_NSID, nsid)) 735 goto nla_put_failure; 736 737 nlmsg_end(skb, nlh); 738 return 0; 739 740 nla_put_failure: 741 nlmsg_cancel(skb, nlh); 742 return -EMSGSIZE; 743 } 744 745 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, 746 struct netlink_ext_ack *extack) 747 { 748 struct net *net = sock_net(skb->sk); 749 struct nlattr *tb[NETNSA_MAX + 1]; 750 struct nlattr *nla; 751 struct sk_buff *msg; 752 struct net *peer; 753 int err, id; 754 755 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 756 rtnl_net_policy, extack); 757 if (err < 0) 758 return err; 759 if (tb[NETNSA_PID]) { 760 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 761 nla = tb[NETNSA_PID]; 762 } else if (tb[NETNSA_FD]) { 763 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 764 nla = tb[NETNSA_FD]; 765 } else { 766 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 767 return -EINVAL; 768 } 769 770 if (IS_ERR(peer)) { 771 NL_SET_BAD_ATTR(extack, nla); 772 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 773 return PTR_ERR(peer); 774 } 775 776 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 777 if (!msg) { 778 err = -ENOMEM; 779 goto out; 780 } 781 782 id = peernet2id(net, peer); 783 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 784 RTM_NEWNSID, net, id); 785 if (err < 0) 786 goto err_out; 787 788 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); 789 goto out; 790 791 err_out: 792 nlmsg_free(msg); 793 out: 794 put_net(peer); 795 return err; 796 } 797 798 struct rtnl_net_dump_cb { 799 struct net *net; 800 struct sk_buff *skb; 801 struct netlink_callback *cb; 802 int idx; 803 int s_idx; 804 }; 805 806 static int rtnl_net_dumpid_one(int id, void *peer, void *data) 807 { 808 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; 809 int ret; 810 811 if (net_cb->idx < net_cb->s_idx) 812 goto cont; 813 814 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, 815 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, 816 RTM_NEWNSID, net_cb->net, id); 817 if (ret < 0) 818 return ret; 819 820 cont: 821 net_cb->idx++; 822 return 0; 823 } 824 825 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) 826 { 827 struct net *net = sock_net(skb->sk); 828 struct rtnl_net_dump_cb net_cb = { 829 .net = net, 830 .skb = skb, 831 .cb = cb, 832 .idx = 0, 833 .s_idx = cb->args[0], 834 }; 835 836 spin_lock_bh(&net->nsid_lock); 837 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 838 spin_unlock_bh(&net->nsid_lock); 839 840 cb->args[0] = net_cb.idx; 841 return skb->len; 842 } 843 844 static void rtnl_net_notifyid(struct net *net, int cmd, int id) 845 { 846 struct sk_buff *msg; 847 int err = -ENOMEM; 848 849 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 850 if (!msg) 851 goto out; 852 853 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); 854 if (err < 0) 855 goto err_out; 856 857 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); 858 return; 859 860 err_out: 861 nlmsg_free(msg); 862 out: 863 rtnl_set_sk_err(net, RTNLGRP_NSID, err); 864 } 865 866 static int __init net_ns_init(void) 867 { 868 struct net_generic *ng; 869 870 #ifdef CONFIG_NET_NS 871 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 872 SMP_CACHE_BYTES, 873 SLAB_PANIC, NULL); 874 875 /* Create workqueue for cleanup */ 876 netns_wq = create_singlethread_workqueue("netns"); 877 if (!netns_wq) 878 panic("Could not create netns workq"); 879 #endif 880 881 ng = net_alloc_generic(); 882 if (!ng) 883 panic("Could not allocate generic netns"); 884 885 rcu_assign_pointer(init_net.gen, ng); 886 887 down_write(&net_sem); 888 if (setup_net(&init_net, &init_user_ns)) 889 panic("Could not setup the initial network namespace"); 890 891 init_net_initialized = true; 892 up_write(&net_sem); 893 894 register_pernet_subsys(&net_ns_ops); 895 896 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, 897 RTNL_FLAG_DOIT_UNLOCKED); 898 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, 899 RTNL_FLAG_DOIT_UNLOCKED); 900 901 return 0; 902 } 903 904 pure_initcall(net_ns_init); 905 906 #ifdef CONFIG_NET_NS 907 static int __register_pernet_operations(struct list_head *list, 908 struct pernet_operations *ops) 909 { 910 struct net *net; 911 int error; 912 LIST_HEAD(net_exit_list); 913 914 list_add_tail(&ops->list, list); 915 if (ops->init || (ops->id && ops->size)) { 916 for_each_net(net) { 917 error = ops_init(ops, net); 918 if (error) 919 goto out_undo; 920 list_add_tail(&net->exit_list, &net_exit_list); 921 } 922 } 923 return 0; 924 925 out_undo: 926 /* If I have an error cleanup all namespaces I initialized */ 927 list_del(&ops->list); 928 ops_exit_list(ops, &net_exit_list); 929 ops_free_list(ops, &net_exit_list); 930 return error; 931 } 932 933 static void __unregister_pernet_operations(struct pernet_operations *ops) 934 { 935 struct net *net; 936 LIST_HEAD(net_exit_list); 937 938 list_del(&ops->list); 939 for_each_net(net) 940 list_add_tail(&net->exit_list, &net_exit_list); 941 ops_exit_list(ops, &net_exit_list); 942 ops_free_list(ops, &net_exit_list); 943 } 944 945 #else 946 947 static int __register_pernet_operations(struct list_head *list, 948 struct pernet_operations *ops) 949 { 950 if (!init_net_initialized) { 951 list_add_tail(&ops->list, list); 952 return 0; 953 } 954 955 return ops_init(ops, &init_net); 956 } 957 958 static void __unregister_pernet_operations(struct pernet_operations *ops) 959 { 960 if (!init_net_initialized) { 961 list_del(&ops->list); 962 } else { 963 LIST_HEAD(net_exit_list); 964 list_add(&init_net.exit_list, &net_exit_list); 965 ops_exit_list(ops, &net_exit_list); 966 ops_free_list(ops, &net_exit_list); 967 } 968 } 969 970 #endif /* CONFIG_NET_NS */ 971 972 static DEFINE_IDA(net_generic_ids); 973 974 static int register_pernet_operations(struct list_head *list, 975 struct pernet_operations *ops) 976 { 977 int error; 978 979 if (ops->id) { 980 again: 981 error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id); 982 if (error < 0) { 983 if (error == -EAGAIN) { 984 ida_pre_get(&net_generic_ids, GFP_KERNEL); 985 goto again; 986 } 987 return error; 988 } 989 max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); 990 } 991 error = __register_pernet_operations(list, ops); 992 if (error) { 993 rcu_barrier(); 994 if (ops->id) 995 ida_remove(&net_generic_ids, *ops->id); 996 } 997 998 return error; 999 } 1000 1001 static void unregister_pernet_operations(struct pernet_operations *ops) 1002 { 1003 1004 __unregister_pernet_operations(ops); 1005 rcu_barrier(); 1006 if (ops->id) 1007 ida_remove(&net_generic_ids, *ops->id); 1008 } 1009 1010 /** 1011 * register_pernet_subsys - register a network namespace subsystem 1012 * @ops: pernet operations structure for the subsystem 1013 * 1014 * Register a subsystem which has init and exit functions 1015 * that are called when network namespaces are created and 1016 * destroyed respectively. 1017 * 1018 * When registered all network namespace init functions are 1019 * called for every existing network namespace. Allowing kernel 1020 * modules to have a race free view of the set of network namespaces. 1021 * 1022 * When a new network namespace is created all of the init 1023 * methods are called in the order in which they were registered. 1024 * 1025 * When a network namespace is destroyed all of the exit methods 1026 * are called in the reverse of the order with which they were 1027 * registered. 1028 */ 1029 int register_pernet_subsys(struct pernet_operations *ops) 1030 { 1031 int error; 1032 down_write(&net_sem); 1033 error = register_pernet_operations(first_device, ops); 1034 up_write(&net_sem); 1035 return error; 1036 } 1037 EXPORT_SYMBOL_GPL(register_pernet_subsys); 1038 1039 /** 1040 * unregister_pernet_subsys - unregister a network namespace subsystem 1041 * @ops: pernet operations structure to manipulate 1042 * 1043 * Remove the pernet operations structure from the list to be 1044 * used when network namespaces are created or destroyed. In 1045 * addition run the exit method for all existing network 1046 * namespaces. 1047 */ 1048 void unregister_pernet_subsys(struct pernet_operations *ops) 1049 { 1050 down_write(&net_sem); 1051 unregister_pernet_operations(ops); 1052 up_write(&net_sem); 1053 } 1054 EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 1055 1056 /** 1057 * register_pernet_device - register a network namespace device 1058 * @ops: pernet operations structure for the subsystem 1059 * 1060 * Register a device which has init and exit functions 1061 * that are called when network namespaces are created and 1062 * destroyed respectively. 1063 * 1064 * When registered all network namespace init functions are 1065 * called for every existing network namespace. Allowing kernel 1066 * modules to have a race free view of the set of network namespaces. 1067 * 1068 * When a new network namespace is created all of the init 1069 * methods are called in the order in which they were registered. 1070 * 1071 * When a network namespace is destroyed all of the exit methods 1072 * are called in the reverse of the order with which they were 1073 * registered. 1074 */ 1075 int register_pernet_device(struct pernet_operations *ops) 1076 { 1077 int error; 1078 down_write(&net_sem); 1079 error = register_pernet_operations(&pernet_list, ops); 1080 if (!error && (first_device == &pernet_list)) 1081 first_device = &ops->list; 1082 up_write(&net_sem); 1083 return error; 1084 } 1085 EXPORT_SYMBOL_GPL(register_pernet_device); 1086 1087 /** 1088 * unregister_pernet_device - unregister a network namespace netdevice 1089 * @ops: pernet operations structure to manipulate 1090 * 1091 * Remove the pernet operations structure from the list to be 1092 * used when network namespaces are created or destroyed. In 1093 * addition run the exit method for all existing network 1094 * namespaces. 1095 */ 1096 void unregister_pernet_device(struct pernet_operations *ops) 1097 { 1098 down_write(&net_sem); 1099 if (&ops->list == first_device) 1100 first_device = first_device->next; 1101 unregister_pernet_operations(ops); 1102 up_write(&net_sem); 1103 } 1104 EXPORT_SYMBOL_GPL(unregister_pernet_device); 1105 1106 #ifdef CONFIG_NET_NS 1107 static struct ns_common *netns_get(struct task_struct *task) 1108 { 1109 struct net *net = NULL; 1110 struct nsproxy *nsproxy; 1111 1112 task_lock(task); 1113 nsproxy = task->nsproxy; 1114 if (nsproxy) 1115 net = get_net(nsproxy->net_ns); 1116 task_unlock(task); 1117 1118 return net ? &net->ns : NULL; 1119 } 1120 1121 static inline struct net *to_net_ns(struct ns_common *ns) 1122 { 1123 return container_of(ns, struct net, ns); 1124 } 1125 1126 static void netns_put(struct ns_common *ns) 1127 { 1128 put_net(to_net_ns(ns)); 1129 } 1130 1131 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) 1132 { 1133 struct net *net = to_net_ns(ns); 1134 1135 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 1136 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 1137 return -EPERM; 1138 1139 put_net(nsproxy->net_ns); 1140 nsproxy->net_ns = get_net(net); 1141 return 0; 1142 } 1143 1144 static struct user_namespace *netns_owner(struct ns_common *ns) 1145 { 1146 return to_net_ns(ns)->user_ns; 1147 } 1148 1149 const struct proc_ns_operations netns_operations = { 1150 .name = "net", 1151 .type = CLONE_NEWNET, 1152 .get = netns_get, 1153 .put = netns_put, 1154 .install = netns_install, 1155 .owner = netns_owner, 1156 }; 1157 #endif 1158