1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/workqueue.h> 4 #include <linux/rtnetlink.h> 5 #include <linux/cache.h> 6 #include <linux/slab.h> 7 #include <linux/list.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/idr.h> 11 #include <linux/rculist.h> 12 #include <linux/nsproxy.h> 13 #include <linux/fs.h> 14 #include <linux/proc_ns.h> 15 #include <linux/file.h> 16 #include <linux/export.h> 17 #include <linux/user_namespace.h> 18 #include <linux/net_namespace.h> 19 #include <linux/sched/task.h> 20 21 #include <net/sock.h> 22 #include <net/netlink.h> 23 #include <net/net_namespace.h> 24 #include <net/netns/generic.h> 25 26 /* 27 * Our network namespace constructor/destructor lists 28 */ 29 30 static LIST_HEAD(pernet_list); 31 static struct list_head *first_device = &pernet_list; 32 DEFINE_MUTEX(net_mutex); 33 34 LIST_HEAD(net_namespace_list); 35 EXPORT_SYMBOL_GPL(net_namespace_list); 36 37 struct net init_net = { 38 .count = REFCOUNT_INIT(1), 39 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), 40 }; 41 EXPORT_SYMBOL(init_net); 42 43 static bool init_net_initialized; 44 /* 45 * net_sem: protects: pernet_list, net_generic_ids, 46 * init_net_initialized and first_device pointer. 47 */ 48 DECLARE_RWSEM(net_sem); 49 50 #define MIN_PERNET_OPS_ID \ 51 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *)) 52 53 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 54 55 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; 56 57 static struct net_generic *net_alloc_generic(void) 58 { 59 struct net_generic *ng; 60 unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); 61 62 ng = kzalloc(generic_size, GFP_KERNEL); 63 if (ng) 64 ng->s.len = max_gen_ptrs; 65 66 return ng; 67 } 68 69 static int net_assign_generic(struct net *net, unsigned int id, void *data) 70 { 71 struct net_generic *ng, *old_ng; 72 73 BUG_ON(!mutex_is_locked(&net_mutex)); 74 BUG_ON(id < MIN_PERNET_OPS_ID); 75 76 old_ng = rcu_dereference_protected(net->gen, 77 lockdep_is_held(&net_mutex)); 78 if (old_ng->s.len > id) { 79 old_ng->ptr[id] = data; 80 return 0; 81 } 82 83 ng = net_alloc_generic(); 84 if (ng == NULL) 85 return -ENOMEM; 86 87 /* 88 * Some synchronisation notes: 89 * 90 * The net_generic explores the net->gen array inside rcu 91 * read section. Besides once set the net->gen->ptr[x] 92 * pointer never changes (see rules in netns/generic.h). 93 * 94 * That said, we simply duplicate this array and schedule 95 * the old copy for kfree after a grace period. 96 */ 97 98 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID], 99 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *)); 100 ng->ptr[id] = data; 101 102 rcu_assign_pointer(net->gen, ng); 103 kfree_rcu(old_ng, s.rcu); 104 return 0; 105 } 106 107 static int ops_init(const struct pernet_operations *ops, struct net *net) 108 { 109 int err = -ENOMEM; 110 void *data = NULL; 111 112 if (ops->id && ops->size) { 113 data = kzalloc(ops->size, GFP_KERNEL); 114 if (!data) 115 goto out; 116 117 err = net_assign_generic(net, *ops->id, data); 118 if (err) 119 goto cleanup; 120 } 121 err = 0; 122 if (ops->init) 123 err = ops->init(net); 124 if (!err) 125 return 0; 126 127 cleanup: 128 kfree(data); 129 130 out: 131 return err; 132 } 133 134 static void ops_free(const struct pernet_operations *ops, struct net *net) 135 { 136 if (ops->id && ops->size) { 137 kfree(net_generic(net, *ops->id)); 138 } 139 } 140 141 static void ops_exit_list(const struct pernet_operations *ops, 142 struct list_head *net_exit_list) 143 { 144 struct net *net; 145 if (ops->exit) { 146 list_for_each_entry(net, net_exit_list, exit_list) 147 ops->exit(net); 148 } 149 if (ops->exit_batch) 150 ops->exit_batch(net_exit_list); 151 } 152 153 static void ops_free_list(const struct pernet_operations *ops, 154 struct list_head *net_exit_list) 155 { 156 struct net *net; 157 if (ops->size && ops->id) { 158 list_for_each_entry(net, net_exit_list, exit_list) 159 ops_free(ops, net); 160 } 161 } 162 163 /* should be called with nsid_lock held */ 164 static int alloc_netid(struct net *net, struct net *peer, int reqid) 165 { 166 int min = 0, max = 0; 167 168 if (reqid >= 0) { 169 min = reqid; 170 max = reqid + 1; 171 } 172 173 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); 174 } 175 176 /* This function is used by idr_for_each(). If net is equal to peer, the 177 * function returns the id so that idr_for_each() stops. Because we cannot 178 * returns the id 0 (idr_for_each() will not stop), we return the magic value 179 * NET_ID_ZERO (-1) for it. 180 */ 181 #define NET_ID_ZERO -1 182 static int net_eq_idr(int id, void *net, void *peer) 183 { 184 if (net_eq(net, peer)) 185 return id ? : NET_ID_ZERO; 186 return 0; 187 } 188 189 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc 190 * is set to true, thus the caller knows that the new id must be notified via 191 * rtnl. 192 */ 193 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) 194 { 195 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); 196 bool alloc_it = *alloc; 197 198 *alloc = false; 199 200 /* Magic value for id 0. */ 201 if (id == NET_ID_ZERO) 202 return 0; 203 if (id > 0) 204 return id; 205 206 if (alloc_it) { 207 id = alloc_netid(net, peer, -1); 208 *alloc = true; 209 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 210 } 211 212 return NETNSA_NSID_NOT_ASSIGNED; 213 } 214 215 /* should be called with nsid_lock held */ 216 static int __peernet2id(struct net *net, struct net *peer) 217 { 218 bool no = false; 219 220 return __peernet2id_alloc(net, peer, &no); 221 } 222 223 static void rtnl_net_notifyid(struct net *net, int cmd, int id); 224 /* This function returns the id of a peer netns. If no id is assigned, one will 225 * be allocated and returned. 226 */ 227 int peernet2id_alloc(struct net *net, struct net *peer) 228 { 229 bool alloc = false, alive = false; 230 int id; 231 232 if (refcount_read(&net->count) == 0) 233 return NETNSA_NSID_NOT_ASSIGNED; 234 spin_lock_bh(&net->nsid_lock); 235 /* 236 * When peer is obtained from RCU lists, we may race with 237 * its cleanup. Check whether it's alive, and this guarantees 238 * we never hash a peer back to net->netns_ids, after it has 239 * just been idr_remove()'d from there in cleanup_net(). 240 */ 241 if (maybe_get_net(peer)) 242 alive = alloc = true; 243 id = __peernet2id_alloc(net, peer, &alloc); 244 spin_unlock_bh(&net->nsid_lock); 245 if (alloc && id >= 0) 246 rtnl_net_notifyid(net, RTM_NEWNSID, id); 247 if (alive) 248 put_net(peer); 249 return id; 250 } 251 EXPORT_SYMBOL_GPL(peernet2id_alloc); 252 253 /* This function returns, if assigned, the id of a peer netns. */ 254 int peernet2id(struct net *net, struct net *peer) 255 { 256 int id; 257 258 spin_lock_bh(&net->nsid_lock); 259 id = __peernet2id(net, peer); 260 spin_unlock_bh(&net->nsid_lock); 261 return id; 262 } 263 EXPORT_SYMBOL(peernet2id); 264 265 /* This function returns true is the peer netns has an id assigned into the 266 * current netns. 267 */ 268 bool peernet_has_id(struct net *net, struct net *peer) 269 { 270 return peernet2id(net, peer) >= 0; 271 } 272 273 struct net *get_net_ns_by_id(struct net *net, int id) 274 { 275 struct net *peer; 276 277 if (id < 0) 278 return NULL; 279 280 rcu_read_lock(); 281 peer = idr_find(&net->netns_ids, id); 282 if (peer) 283 peer = maybe_get_net(peer); 284 rcu_read_unlock(); 285 286 return peer; 287 } 288 289 /* 290 * setup_net runs the initializers for the network namespace object. 291 */ 292 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) 293 { 294 /* Must be called with net_sem held */ 295 const struct pernet_operations *ops, *saved_ops; 296 int error = 0; 297 LIST_HEAD(net_exit_list); 298 299 refcount_set(&net->count, 1); 300 refcount_set(&net->passive, 1); 301 net->dev_base_seq = 1; 302 net->user_ns = user_ns; 303 idr_init(&net->netns_ids); 304 spin_lock_init(&net->nsid_lock); 305 306 list_for_each_entry(ops, &pernet_list, list) { 307 error = ops_init(ops, net); 308 if (error < 0) 309 goto out_undo; 310 } 311 rtnl_lock(); 312 list_add_tail_rcu(&net->list, &net_namespace_list); 313 rtnl_unlock(); 314 out: 315 return error; 316 317 out_undo: 318 /* Walk through the list backwards calling the exit functions 319 * for the pernet modules whose init functions did not fail. 320 */ 321 list_add(&net->exit_list, &net_exit_list); 322 saved_ops = ops; 323 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 324 ops_exit_list(ops, &net_exit_list); 325 326 ops = saved_ops; 327 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 328 ops_free_list(ops, &net_exit_list); 329 330 rcu_barrier(); 331 goto out; 332 } 333 334 static int __net_init net_defaults_init_net(struct net *net) 335 { 336 net->core.sysctl_somaxconn = SOMAXCONN; 337 return 0; 338 } 339 340 static struct pernet_operations net_defaults_ops = { 341 .init = net_defaults_init_net, 342 }; 343 344 static __init int net_defaults_init(void) 345 { 346 if (register_pernet_subsys(&net_defaults_ops)) 347 panic("Cannot initialize net default settings"); 348 349 return 0; 350 } 351 352 core_initcall(net_defaults_init); 353 354 #ifdef CONFIG_NET_NS 355 static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 356 { 357 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); 358 } 359 360 static void dec_net_namespaces(struct ucounts *ucounts) 361 { 362 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); 363 } 364 365 static struct kmem_cache *net_cachep; 366 static struct workqueue_struct *netns_wq; 367 368 static struct net *net_alloc(void) 369 { 370 struct net *net = NULL; 371 struct net_generic *ng; 372 373 ng = net_alloc_generic(); 374 if (!ng) 375 goto out; 376 377 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); 378 if (!net) 379 goto out_free; 380 381 rcu_assign_pointer(net->gen, ng); 382 out: 383 return net; 384 385 out_free: 386 kfree(ng); 387 goto out; 388 } 389 390 static void net_free(struct net *net) 391 { 392 kfree(rcu_access_pointer(net->gen)); 393 kmem_cache_free(net_cachep, net); 394 } 395 396 void net_drop_ns(void *p) 397 { 398 struct net *ns = p; 399 if (ns && refcount_dec_and_test(&ns->passive)) 400 net_free(ns); 401 } 402 403 struct net *copy_net_ns(unsigned long flags, 404 struct user_namespace *user_ns, struct net *old_net) 405 { 406 struct ucounts *ucounts; 407 struct net *net; 408 int rv; 409 410 if (!(flags & CLONE_NEWNET)) 411 return get_net(old_net); 412 413 ucounts = inc_net_namespaces(user_ns); 414 if (!ucounts) 415 return ERR_PTR(-ENOSPC); 416 417 net = net_alloc(); 418 if (!net) { 419 rv = -ENOMEM; 420 goto dec_ucounts; 421 } 422 refcount_set(&net->passive, 1); 423 net->ucounts = ucounts; 424 get_user_ns(user_ns); 425 426 rv = down_read_killable(&net_sem); 427 if (rv < 0) 428 goto put_userns; 429 rv = mutex_lock_killable(&net_mutex); 430 if (rv < 0) 431 goto up_read; 432 rv = setup_net(net, user_ns); 433 mutex_unlock(&net_mutex); 434 up_read: 435 up_read(&net_sem); 436 if (rv < 0) { 437 put_userns: 438 put_user_ns(user_ns); 439 net_drop_ns(net); 440 dec_ucounts: 441 dec_net_namespaces(ucounts); 442 return ERR_PTR(rv); 443 } 444 return net; 445 } 446 447 static void unhash_nsid(struct net *net, struct net *last) 448 { 449 struct net *tmp; 450 /* This function is only called from cleanup_net() work, 451 * and this work is the only process, that may delete 452 * a net from net_namespace_list. So, when the below 453 * is executing, the list may only grow. Thus, we do not 454 * use for_each_net_rcu() or rtnl_lock(). 455 */ 456 for_each_net(tmp) { 457 int id; 458 459 spin_lock_bh(&tmp->nsid_lock); 460 id = __peernet2id(tmp, net); 461 if (id >= 0) 462 idr_remove(&tmp->netns_ids, id); 463 spin_unlock_bh(&tmp->nsid_lock); 464 if (id >= 0) 465 rtnl_net_notifyid(tmp, RTM_DELNSID, id); 466 if (tmp == last) 467 break; 468 } 469 spin_lock_bh(&net->nsid_lock); 470 idr_destroy(&net->netns_ids); 471 spin_unlock_bh(&net->nsid_lock); 472 } 473 474 static DEFINE_SPINLOCK(cleanup_list_lock); 475 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 476 477 static void cleanup_net(struct work_struct *work) 478 { 479 const struct pernet_operations *ops; 480 struct net *net, *tmp, *last; 481 struct list_head net_kill_list; 482 LIST_HEAD(net_exit_list); 483 484 /* Atomically snapshot the list of namespaces to cleanup */ 485 spin_lock_irq(&cleanup_list_lock); 486 list_replace_init(&cleanup_list, &net_kill_list); 487 spin_unlock_irq(&cleanup_list_lock); 488 489 down_read(&net_sem); 490 mutex_lock(&net_mutex); 491 492 /* Don't let anyone else find us. */ 493 rtnl_lock(); 494 list_for_each_entry(net, &net_kill_list, cleanup_list) 495 list_del_rcu(&net->list); 496 /* Cache last net. After we unlock rtnl, no one new net 497 * added to net_namespace_list can assign nsid pointer 498 * to a net from net_kill_list (see peernet2id_alloc()). 499 * So, we skip them in unhash_nsid(). 500 * 501 * Note, that unhash_nsid() does not delete nsid links 502 * between net_kill_list's nets, as they've already 503 * deleted from net_namespace_list. But, this would be 504 * useless anyway, as netns_ids are destroyed there. 505 */ 506 last = list_last_entry(&net_namespace_list, struct net, list); 507 rtnl_unlock(); 508 509 list_for_each_entry(net, &net_kill_list, cleanup_list) { 510 unhash_nsid(net, last); 511 list_add_tail(&net->exit_list, &net_exit_list); 512 } 513 514 /* 515 * Another CPU might be rcu-iterating the list, wait for it. 516 * This needs to be before calling the exit() notifiers, so 517 * the rcu_barrier() below isn't sufficient alone. 518 */ 519 synchronize_rcu(); 520 521 /* Run all of the network namespace exit methods */ 522 list_for_each_entry_reverse(ops, &pernet_list, list) 523 ops_exit_list(ops, &net_exit_list); 524 525 mutex_unlock(&net_mutex); 526 527 /* Free the net generic variables */ 528 list_for_each_entry_reverse(ops, &pernet_list, list) 529 ops_free_list(ops, &net_exit_list); 530 531 up_read(&net_sem); 532 533 /* Ensure there are no outstanding rcu callbacks using this 534 * network namespace. 535 */ 536 rcu_barrier(); 537 538 /* Finally it is safe to free my network namespace structure */ 539 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 540 list_del_init(&net->exit_list); 541 dec_net_namespaces(net->ucounts); 542 put_user_ns(net->user_ns); 543 net_drop_ns(net); 544 } 545 } 546 547 /** 548 * net_ns_barrier - wait until concurrent net_cleanup_work is done 549 * 550 * cleanup_net runs from work queue and will first remove namespaces 551 * from the global list, then run net exit functions. 552 * 553 * Call this in module exit path to make sure that all netns 554 * ->exit ops have been invoked before the function is removed. 555 */ 556 void net_ns_barrier(void) 557 { 558 down_write(&net_sem); 559 mutex_lock(&net_mutex); 560 mutex_unlock(&net_mutex); 561 up_write(&net_sem); 562 } 563 EXPORT_SYMBOL(net_ns_barrier); 564 565 static DECLARE_WORK(net_cleanup_work, cleanup_net); 566 567 void __put_net(struct net *net) 568 { 569 /* Cleanup the network namespace in process context */ 570 unsigned long flags; 571 572 spin_lock_irqsave(&cleanup_list_lock, flags); 573 list_add(&net->cleanup_list, &cleanup_list); 574 spin_unlock_irqrestore(&cleanup_list_lock, flags); 575 576 queue_work(netns_wq, &net_cleanup_work); 577 } 578 EXPORT_SYMBOL_GPL(__put_net); 579 580 struct net *get_net_ns_by_fd(int fd) 581 { 582 struct file *file; 583 struct ns_common *ns; 584 struct net *net; 585 586 file = proc_ns_fget(fd); 587 if (IS_ERR(file)) 588 return ERR_CAST(file); 589 590 ns = get_proc_ns(file_inode(file)); 591 if (ns->ops == &netns_operations) 592 net = get_net(container_of(ns, struct net, ns)); 593 else 594 net = ERR_PTR(-EINVAL); 595 596 fput(file); 597 return net; 598 } 599 600 #else 601 struct net *get_net_ns_by_fd(int fd) 602 { 603 return ERR_PTR(-EINVAL); 604 } 605 #endif 606 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 607 608 struct net *get_net_ns_by_pid(pid_t pid) 609 { 610 struct task_struct *tsk; 611 struct net *net; 612 613 /* Lookup the network namespace */ 614 net = ERR_PTR(-ESRCH); 615 rcu_read_lock(); 616 tsk = find_task_by_vpid(pid); 617 if (tsk) { 618 struct nsproxy *nsproxy; 619 task_lock(tsk); 620 nsproxy = tsk->nsproxy; 621 if (nsproxy) 622 net = get_net(nsproxy->net_ns); 623 task_unlock(tsk); 624 } 625 rcu_read_unlock(); 626 return net; 627 } 628 EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 629 630 static __net_init int net_ns_net_init(struct net *net) 631 { 632 #ifdef CONFIG_NET_NS 633 net->ns.ops = &netns_operations; 634 #endif 635 return ns_alloc_inum(&net->ns); 636 } 637 638 static __net_exit void net_ns_net_exit(struct net *net) 639 { 640 ns_free_inum(&net->ns); 641 } 642 643 static struct pernet_operations __net_initdata net_ns_ops = { 644 .init = net_ns_net_init, 645 .exit = net_ns_net_exit, 646 }; 647 648 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { 649 [NETNSA_NONE] = { .type = NLA_UNSPEC }, 650 [NETNSA_NSID] = { .type = NLA_S32 }, 651 [NETNSA_PID] = { .type = NLA_U32 }, 652 [NETNSA_FD] = { .type = NLA_U32 }, 653 }; 654 655 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, 656 struct netlink_ext_ack *extack) 657 { 658 struct net *net = sock_net(skb->sk); 659 struct nlattr *tb[NETNSA_MAX + 1]; 660 struct nlattr *nla; 661 struct net *peer; 662 int nsid, err; 663 664 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 665 rtnl_net_policy, extack); 666 if (err < 0) 667 return err; 668 if (!tb[NETNSA_NSID]) { 669 NL_SET_ERR_MSG(extack, "nsid is missing"); 670 return -EINVAL; 671 } 672 nsid = nla_get_s32(tb[NETNSA_NSID]); 673 674 if (tb[NETNSA_PID]) { 675 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 676 nla = tb[NETNSA_PID]; 677 } else if (tb[NETNSA_FD]) { 678 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 679 nla = tb[NETNSA_FD]; 680 } else { 681 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 682 return -EINVAL; 683 } 684 if (IS_ERR(peer)) { 685 NL_SET_BAD_ATTR(extack, nla); 686 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 687 return PTR_ERR(peer); 688 } 689 690 spin_lock_bh(&net->nsid_lock); 691 if (__peernet2id(net, peer) >= 0) { 692 spin_unlock_bh(&net->nsid_lock); 693 err = -EEXIST; 694 NL_SET_BAD_ATTR(extack, nla); 695 NL_SET_ERR_MSG(extack, 696 "Peer netns already has a nsid assigned"); 697 goto out; 698 } 699 700 err = alloc_netid(net, peer, nsid); 701 spin_unlock_bh(&net->nsid_lock); 702 if (err >= 0) { 703 rtnl_net_notifyid(net, RTM_NEWNSID, err); 704 err = 0; 705 } else if (err == -ENOSPC && nsid >= 0) { 706 err = -EEXIST; 707 NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]); 708 NL_SET_ERR_MSG(extack, "The specified nsid is already used"); 709 } 710 out: 711 put_net(peer); 712 return err; 713 } 714 715 static int rtnl_net_get_size(void) 716 { 717 return NLMSG_ALIGN(sizeof(struct rtgenmsg)) 718 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ 719 ; 720 } 721 722 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, 723 int cmd, struct net *net, int nsid) 724 { 725 struct nlmsghdr *nlh; 726 struct rtgenmsg *rth; 727 728 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); 729 if (!nlh) 730 return -EMSGSIZE; 731 732 rth = nlmsg_data(nlh); 733 rth->rtgen_family = AF_UNSPEC; 734 735 if (nla_put_s32(skb, NETNSA_NSID, nsid)) 736 goto nla_put_failure; 737 738 nlmsg_end(skb, nlh); 739 return 0; 740 741 nla_put_failure: 742 nlmsg_cancel(skb, nlh); 743 return -EMSGSIZE; 744 } 745 746 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, 747 struct netlink_ext_ack *extack) 748 { 749 struct net *net = sock_net(skb->sk); 750 struct nlattr *tb[NETNSA_MAX + 1]; 751 struct nlattr *nla; 752 struct sk_buff *msg; 753 struct net *peer; 754 int err, id; 755 756 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 757 rtnl_net_policy, extack); 758 if (err < 0) 759 return err; 760 if (tb[NETNSA_PID]) { 761 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 762 nla = tb[NETNSA_PID]; 763 } else if (tb[NETNSA_FD]) { 764 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 765 nla = tb[NETNSA_FD]; 766 } else { 767 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 768 return -EINVAL; 769 } 770 771 if (IS_ERR(peer)) { 772 NL_SET_BAD_ATTR(extack, nla); 773 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 774 return PTR_ERR(peer); 775 } 776 777 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 778 if (!msg) { 779 err = -ENOMEM; 780 goto out; 781 } 782 783 id = peernet2id(net, peer); 784 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 785 RTM_NEWNSID, net, id); 786 if (err < 0) 787 goto err_out; 788 789 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); 790 goto out; 791 792 err_out: 793 nlmsg_free(msg); 794 out: 795 put_net(peer); 796 return err; 797 } 798 799 struct rtnl_net_dump_cb { 800 struct net *net; 801 struct sk_buff *skb; 802 struct netlink_callback *cb; 803 int idx; 804 int s_idx; 805 }; 806 807 static int rtnl_net_dumpid_one(int id, void *peer, void *data) 808 { 809 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; 810 int ret; 811 812 if (net_cb->idx < net_cb->s_idx) 813 goto cont; 814 815 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, 816 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, 817 RTM_NEWNSID, net_cb->net, id); 818 if (ret < 0) 819 return ret; 820 821 cont: 822 net_cb->idx++; 823 return 0; 824 } 825 826 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) 827 { 828 struct net *net = sock_net(skb->sk); 829 struct rtnl_net_dump_cb net_cb = { 830 .net = net, 831 .skb = skb, 832 .cb = cb, 833 .idx = 0, 834 .s_idx = cb->args[0], 835 }; 836 837 spin_lock_bh(&net->nsid_lock); 838 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 839 spin_unlock_bh(&net->nsid_lock); 840 841 cb->args[0] = net_cb.idx; 842 return skb->len; 843 } 844 845 static void rtnl_net_notifyid(struct net *net, int cmd, int id) 846 { 847 struct sk_buff *msg; 848 int err = -ENOMEM; 849 850 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 851 if (!msg) 852 goto out; 853 854 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); 855 if (err < 0) 856 goto err_out; 857 858 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); 859 return; 860 861 err_out: 862 nlmsg_free(msg); 863 out: 864 rtnl_set_sk_err(net, RTNLGRP_NSID, err); 865 } 866 867 static int __init net_ns_init(void) 868 { 869 struct net_generic *ng; 870 871 #ifdef CONFIG_NET_NS 872 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 873 SMP_CACHE_BYTES, 874 SLAB_PANIC, NULL); 875 876 /* Create workqueue for cleanup */ 877 netns_wq = create_singlethread_workqueue("netns"); 878 if (!netns_wq) 879 panic("Could not create netns workq"); 880 #endif 881 882 ng = net_alloc_generic(); 883 if (!ng) 884 panic("Could not allocate generic netns"); 885 886 rcu_assign_pointer(init_net.gen, ng); 887 888 down_write(&net_sem); 889 if (setup_net(&init_net, &init_user_ns)) 890 panic("Could not setup the initial network namespace"); 891 892 init_net_initialized = true; 893 up_write(&net_sem); 894 895 register_pernet_subsys(&net_ns_ops); 896 897 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, 898 RTNL_FLAG_DOIT_UNLOCKED); 899 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, 900 RTNL_FLAG_DOIT_UNLOCKED); 901 902 return 0; 903 } 904 905 pure_initcall(net_ns_init); 906 907 #ifdef CONFIG_NET_NS 908 static int __register_pernet_operations(struct list_head *list, 909 struct pernet_operations *ops) 910 { 911 struct net *net; 912 int error; 913 LIST_HEAD(net_exit_list); 914 915 list_add_tail(&ops->list, list); 916 if (ops->init || (ops->id && ops->size)) { 917 for_each_net(net) { 918 error = ops_init(ops, net); 919 if (error) 920 goto out_undo; 921 list_add_tail(&net->exit_list, &net_exit_list); 922 } 923 } 924 return 0; 925 926 out_undo: 927 /* If I have an error cleanup all namespaces I initialized */ 928 list_del(&ops->list); 929 ops_exit_list(ops, &net_exit_list); 930 ops_free_list(ops, &net_exit_list); 931 return error; 932 } 933 934 static void __unregister_pernet_operations(struct pernet_operations *ops) 935 { 936 struct net *net; 937 LIST_HEAD(net_exit_list); 938 939 list_del(&ops->list); 940 for_each_net(net) 941 list_add_tail(&net->exit_list, &net_exit_list); 942 ops_exit_list(ops, &net_exit_list); 943 ops_free_list(ops, &net_exit_list); 944 } 945 946 #else 947 948 static int __register_pernet_operations(struct list_head *list, 949 struct pernet_operations *ops) 950 { 951 if (!init_net_initialized) { 952 list_add_tail(&ops->list, list); 953 return 0; 954 } 955 956 return ops_init(ops, &init_net); 957 } 958 959 static void __unregister_pernet_operations(struct pernet_operations *ops) 960 { 961 if (!init_net_initialized) { 962 list_del(&ops->list); 963 } else { 964 LIST_HEAD(net_exit_list); 965 list_add(&init_net.exit_list, &net_exit_list); 966 ops_exit_list(ops, &net_exit_list); 967 ops_free_list(ops, &net_exit_list); 968 } 969 } 970 971 #endif /* CONFIG_NET_NS */ 972 973 static DEFINE_IDA(net_generic_ids); 974 975 static int register_pernet_operations(struct list_head *list, 976 struct pernet_operations *ops) 977 { 978 int error; 979 980 if (ops->id) { 981 again: 982 error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id); 983 if (error < 0) { 984 if (error == -EAGAIN) { 985 ida_pre_get(&net_generic_ids, GFP_KERNEL); 986 goto again; 987 } 988 return error; 989 } 990 max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); 991 } 992 error = __register_pernet_operations(list, ops); 993 if (error) { 994 rcu_barrier(); 995 if (ops->id) 996 ida_remove(&net_generic_ids, *ops->id); 997 } 998 999 return error; 1000 } 1001 1002 static void unregister_pernet_operations(struct pernet_operations *ops) 1003 { 1004 1005 __unregister_pernet_operations(ops); 1006 rcu_barrier(); 1007 if (ops->id) 1008 ida_remove(&net_generic_ids, *ops->id); 1009 } 1010 1011 /** 1012 * register_pernet_subsys - register a network namespace subsystem 1013 * @ops: pernet operations structure for the subsystem 1014 * 1015 * Register a subsystem which has init and exit functions 1016 * that are called when network namespaces are created and 1017 * destroyed respectively. 1018 * 1019 * When registered all network namespace init functions are 1020 * called for every existing network namespace. Allowing kernel 1021 * modules to have a race free view of the set of network namespaces. 1022 * 1023 * When a new network namespace is created all of the init 1024 * methods are called in the order in which they were registered. 1025 * 1026 * When a network namespace is destroyed all of the exit methods 1027 * are called in the reverse of the order with which they were 1028 * registered. 1029 */ 1030 int register_pernet_subsys(struct pernet_operations *ops) 1031 { 1032 int error; 1033 down_write(&net_sem); 1034 error = register_pernet_operations(first_device, ops); 1035 up_write(&net_sem); 1036 return error; 1037 } 1038 EXPORT_SYMBOL_GPL(register_pernet_subsys); 1039 1040 /** 1041 * unregister_pernet_subsys - unregister a network namespace subsystem 1042 * @ops: pernet operations structure to manipulate 1043 * 1044 * Remove the pernet operations structure from the list to be 1045 * used when network namespaces are created or destroyed. In 1046 * addition run the exit method for all existing network 1047 * namespaces. 1048 */ 1049 void unregister_pernet_subsys(struct pernet_operations *ops) 1050 { 1051 down_write(&net_sem); 1052 unregister_pernet_operations(ops); 1053 up_write(&net_sem); 1054 } 1055 EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 1056 1057 /** 1058 * register_pernet_device - register a network namespace device 1059 * @ops: pernet operations structure for the subsystem 1060 * 1061 * Register a device which has init and exit functions 1062 * that are called when network namespaces are created and 1063 * destroyed respectively. 1064 * 1065 * When registered all network namespace init functions are 1066 * called for every existing network namespace. Allowing kernel 1067 * modules to have a race free view of the set of network namespaces. 1068 * 1069 * When a new network namespace is created all of the init 1070 * methods are called in the order in which they were registered. 1071 * 1072 * When a network namespace is destroyed all of the exit methods 1073 * are called in the reverse of the order with which they were 1074 * registered. 1075 */ 1076 int register_pernet_device(struct pernet_operations *ops) 1077 { 1078 int error; 1079 down_write(&net_sem); 1080 error = register_pernet_operations(&pernet_list, ops); 1081 if (!error && (first_device == &pernet_list)) 1082 first_device = &ops->list; 1083 up_write(&net_sem); 1084 return error; 1085 } 1086 EXPORT_SYMBOL_GPL(register_pernet_device); 1087 1088 /** 1089 * unregister_pernet_device - unregister a network namespace netdevice 1090 * @ops: pernet operations structure to manipulate 1091 * 1092 * Remove the pernet operations structure from the list to be 1093 * used when network namespaces are created or destroyed. In 1094 * addition run the exit method for all existing network 1095 * namespaces. 1096 */ 1097 void unregister_pernet_device(struct pernet_operations *ops) 1098 { 1099 down_write(&net_sem); 1100 if (&ops->list == first_device) 1101 first_device = first_device->next; 1102 unregister_pernet_operations(ops); 1103 up_write(&net_sem); 1104 } 1105 EXPORT_SYMBOL_GPL(unregister_pernet_device); 1106 1107 #ifdef CONFIG_NET_NS 1108 static struct ns_common *netns_get(struct task_struct *task) 1109 { 1110 struct net *net = NULL; 1111 struct nsproxy *nsproxy; 1112 1113 task_lock(task); 1114 nsproxy = task->nsproxy; 1115 if (nsproxy) 1116 net = get_net(nsproxy->net_ns); 1117 task_unlock(task); 1118 1119 return net ? &net->ns : NULL; 1120 } 1121 1122 static inline struct net *to_net_ns(struct ns_common *ns) 1123 { 1124 return container_of(ns, struct net, ns); 1125 } 1126 1127 static void netns_put(struct ns_common *ns) 1128 { 1129 put_net(to_net_ns(ns)); 1130 } 1131 1132 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) 1133 { 1134 struct net *net = to_net_ns(ns); 1135 1136 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 1137 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 1138 return -EPERM; 1139 1140 put_net(nsproxy->net_ns); 1141 nsproxy->net_ns = get_net(net); 1142 return 0; 1143 } 1144 1145 static struct user_namespace *netns_owner(struct ns_common *ns) 1146 { 1147 return to_net_ns(ns)->user_ns; 1148 } 1149 1150 const struct proc_ns_operations netns_operations = { 1151 .name = "net", 1152 .type = CLONE_NEWNET, 1153 .get = netns_get, 1154 .put = netns_put, 1155 .install = netns_install, 1156 .owner = netns_owner, 1157 }; 1158 #endif 1159