xref: /openbmc/linux/net/core/net_namespace.c (revision 19efbd93)
1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2 
3 #include <linux/workqueue.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/list.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/idr.h>
11 #include <linux/rculist.h>
12 #include <linux/nsproxy.h>
13 #include <linux/fs.h>
14 #include <linux/proc_ns.h>
15 #include <linux/file.h>
16 #include <linux/export.h>
17 #include <linux/user_namespace.h>
18 #include <linux/net_namespace.h>
19 #include <linux/sched/task.h>
20 
21 #include <net/sock.h>
22 #include <net/netlink.h>
23 #include <net/net_namespace.h>
24 #include <net/netns/generic.h>
25 
26 /*
27  *	Our network namespace constructor/destructor lists
28  */
29 
30 static LIST_HEAD(pernet_list);
31 static struct list_head *first_device = &pernet_list;
32 
33 LIST_HEAD(net_namespace_list);
34 EXPORT_SYMBOL_GPL(net_namespace_list);
35 
36 struct net init_net = {
37 	.count		= REFCOUNT_INIT(1),
38 	.dev_base_head	= LIST_HEAD_INIT(init_net.dev_base_head),
39 };
40 EXPORT_SYMBOL(init_net);
41 
42 static bool init_net_initialized;
43 static unsigned nr_sync_pernet_ops;
44 /*
45  * net_sem: protects: pernet_list, net_generic_ids, nr_sync_pernet_ops,
46  * init_net_initialized and first_device pointer.
47  */
48 DECLARE_RWSEM(net_sem);
49 
50 #define MIN_PERNET_OPS_ID	\
51 	((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
52 
53 #define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */
54 
55 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
56 
57 static struct net_generic *net_alloc_generic(void)
58 {
59 	struct net_generic *ng;
60 	unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
61 
62 	ng = kzalloc(generic_size, GFP_KERNEL);
63 	if (ng)
64 		ng->s.len = max_gen_ptrs;
65 
66 	return ng;
67 }
68 
69 static int net_assign_generic(struct net *net, unsigned int id, void *data)
70 {
71 	struct net_generic *ng, *old_ng;
72 
73 	BUG_ON(id < MIN_PERNET_OPS_ID);
74 
75 	old_ng = rcu_dereference_protected(net->gen,
76 					   lockdep_is_held(&net_sem));
77 	if (old_ng->s.len > id) {
78 		old_ng->ptr[id] = data;
79 		return 0;
80 	}
81 
82 	ng = net_alloc_generic();
83 	if (ng == NULL)
84 		return -ENOMEM;
85 
86 	/*
87 	 * Some synchronisation notes:
88 	 *
89 	 * The net_generic explores the net->gen array inside rcu
90 	 * read section. Besides once set the net->gen->ptr[x]
91 	 * pointer never changes (see rules in netns/generic.h).
92 	 *
93 	 * That said, we simply duplicate this array and schedule
94 	 * the old copy for kfree after a grace period.
95 	 */
96 
97 	memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
98 	       (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
99 	ng->ptr[id] = data;
100 
101 	rcu_assign_pointer(net->gen, ng);
102 	kfree_rcu(old_ng, s.rcu);
103 	return 0;
104 }
105 
106 static int ops_init(const struct pernet_operations *ops, struct net *net)
107 {
108 	int err = -ENOMEM;
109 	void *data = NULL;
110 
111 	if (ops->id && ops->size) {
112 		data = kzalloc(ops->size, GFP_KERNEL);
113 		if (!data)
114 			goto out;
115 
116 		err = net_assign_generic(net, *ops->id, data);
117 		if (err)
118 			goto cleanup;
119 	}
120 	err = 0;
121 	if (ops->init)
122 		err = ops->init(net);
123 	if (!err)
124 		return 0;
125 
126 cleanup:
127 	kfree(data);
128 
129 out:
130 	return err;
131 }
132 
133 static void ops_free(const struct pernet_operations *ops, struct net *net)
134 {
135 	if (ops->id && ops->size) {
136 		kfree(net_generic(net, *ops->id));
137 	}
138 }
139 
140 static void ops_exit_list(const struct pernet_operations *ops,
141 			  struct list_head *net_exit_list)
142 {
143 	struct net *net;
144 	if (ops->exit) {
145 		list_for_each_entry(net, net_exit_list, exit_list)
146 			ops->exit(net);
147 	}
148 	if (ops->exit_batch)
149 		ops->exit_batch(net_exit_list);
150 }
151 
152 static void ops_free_list(const struct pernet_operations *ops,
153 			  struct list_head *net_exit_list)
154 {
155 	struct net *net;
156 	if (ops->size && ops->id) {
157 		list_for_each_entry(net, net_exit_list, exit_list)
158 			ops_free(ops, net);
159 	}
160 }
161 
162 /* should be called with nsid_lock held */
163 static int alloc_netid(struct net *net, struct net *peer, int reqid)
164 {
165 	int min = 0, max = 0;
166 
167 	if (reqid >= 0) {
168 		min = reqid;
169 		max = reqid + 1;
170 	}
171 
172 	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
173 }
174 
175 /* This function is used by idr_for_each(). If net is equal to peer, the
176  * function returns the id so that idr_for_each() stops. Because we cannot
177  * returns the id 0 (idr_for_each() will not stop), we return the magic value
178  * NET_ID_ZERO (-1) for it.
179  */
180 #define NET_ID_ZERO -1
181 static int net_eq_idr(int id, void *net, void *peer)
182 {
183 	if (net_eq(net, peer))
184 		return id ? : NET_ID_ZERO;
185 	return 0;
186 }
187 
188 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
189  * is set to true, thus the caller knows that the new id must be notified via
190  * rtnl.
191  */
192 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
193 {
194 	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
195 	bool alloc_it = *alloc;
196 
197 	*alloc = false;
198 
199 	/* Magic value for id 0. */
200 	if (id == NET_ID_ZERO)
201 		return 0;
202 	if (id > 0)
203 		return id;
204 
205 	if (alloc_it) {
206 		id = alloc_netid(net, peer, -1);
207 		*alloc = true;
208 		return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
209 	}
210 
211 	return NETNSA_NSID_NOT_ASSIGNED;
212 }
213 
214 /* should be called with nsid_lock held */
215 static int __peernet2id(struct net *net, struct net *peer)
216 {
217 	bool no = false;
218 
219 	return __peernet2id_alloc(net, peer, &no);
220 }
221 
222 static void rtnl_net_notifyid(struct net *net, int cmd, int id);
223 /* This function returns the id of a peer netns. If no id is assigned, one will
224  * be allocated and returned.
225  */
226 int peernet2id_alloc(struct net *net, struct net *peer)
227 {
228 	bool alloc = false, alive = false;
229 	int id;
230 
231 	if (refcount_read(&net->count) == 0)
232 		return NETNSA_NSID_NOT_ASSIGNED;
233 	spin_lock_bh(&net->nsid_lock);
234 	/*
235 	 * When peer is obtained from RCU lists, we may race with
236 	 * its cleanup. Check whether it's alive, and this guarantees
237 	 * we never hash a peer back to net->netns_ids, after it has
238 	 * just been idr_remove()'d from there in cleanup_net().
239 	 */
240 	if (maybe_get_net(peer))
241 		alive = alloc = true;
242 	id = __peernet2id_alloc(net, peer, &alloc);
243 	spin_unlock_bh(&net->nsid_lock);
244 	if (alloc && id >= 0)
245 		rtnl_net_notifyid(net, RTM_NEWNSID, id);
246 	if (alive)
247 		put_net(peer);
248 	return id;
249 }
250 EXPORT_SYMBOL_GPL(peernet2id_alloc);
251 
252 /* This function returns, if assigned, the id of a peer netns. */
253 int peernet2id(struct net *net, struct net *peer)
254 {
255 	int id;
256 
257 	spin_lock_bh(&net->nsid_lock);
258 	id = __peernet2id(net, peer);
259 	spin_unlock_bh(&net->nsid_lock);
260 	return id;
261 }
262 EXPORT_SYMBOL(peernet2id);
263 
264 /* This function returns true is the peer netns has an id assigned into the
265  * current netns.
266  */
267 bool peernet_has_id(struct net *net, struct net *peer)
268 {
269 	return peernet2id(net, peer) >= 0;
270 }
271 
272 struct net *get_net_ns_by_id(struct net *net, int id)
273 {
274 	struct net *peer;
275 
276 	if (id < 0)
277 		return NULL;
278 
279 	rcu_read_lock();
280 	peer = idr_find(&net->netns_ids, id);
281 	if (peer)
282 		peer = maybe_get_net(peer);
283 	rcu_read_unlock();
284 
285 	return peer;
286 }
287 
288 /*
289  * setup_net runs the initializers for the network namespace object.
290  */
291 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
292 {
293 	/* Must be called with net_sem held */
294 	const struct pernet_operations *ops, *saved_ops;
295 	int error = 0;
296 	LIST_HEAD(net_exit_list);
297 
298 	refcount_set(&net->count, 1);
299 	refcount_set(&net->passive, 1);
300 	net->dev_base_seq = 1;
301 	net->user_ns = user_ns;
302 	idr_init(&net->netns_ids);
303 	spin_lock_init(&net->nsid_lock);
304 
305 	list_for_each_entry(ops, &pernet_list, list) {
306 		error = ops_init(ops, net);
307 		if (error < 0)
308 			goto out_undo;
309 	}
310 	rtnl_lock();
311 	list_add_tail_rcu(&net->list, &net_namespace_list);
312 	rtnl_unlock();
313 out:
314 	return error;
315 
316 out_undo:
317 	/* Walk through the list backwards calling the exit functions
318 	 * for the pernet modules whose init functions did not fail.
319 	 */
320 	list_add(&net->exit_list, &net_exit_list);
321 	saved_ops = ops;
322 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
323 		ops_exit_list(ops, &net_exit_list);
324 
325 	ops = saved_ops;
326 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
327 		ops_free_list(ops, &net_exit_list);
328 
329 	rcu_barrier();
330 	goto out;
331 }
332 
333 static int __net_init net_defaults_init_net(struct net *net)
334 {
335 	net->core.sysctl_somaxconn = SOMAXCONN;
336 	return 0;
337 }
338 
339 static struct pernet_operations net_defaults_ops = {
340 	.init = net_defaults_init_net,
341 	.async = true,
342 };
343 
344 static __init int net_defaults_init(void)
345 {
346 	if (register_pernet_subsys(&net_defaults_ops))
347 		panic("Cannot initialize net default settings");
348 
349 	return 0;
350 }
351 
352 core_initcall(net_defaults_init);
353 
354 #ifdef CONFIG_NET_NS
355 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
356 {
357 	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
358 }
359 
360 static void dec_net_namespaces(struct ucounts *ucounts)
361 {
362 	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
363 }
364 
365 static struct kmem_cache *net_cachep;
366 static struct workqueue_struct *netns_wq;
367 
368 static struct net *net_alloc(void)
369 {
370 	struct net *net = NULL;
371 	struct net_generic *ng;
372 
373 	ng = net_alloc_generic();
374 	if (!ng)
375 		goto out;
376 
377 	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
378 	if (!net)
379 		goto out_free;
380 
381 	rcu_assign_pointer(net->gen, ng);
382 out:
383 	return net;
384 
385 out_free:
386 	kfree(ng);
387 	goto out;
388 }
389 
390 static void net_free(struct net *net)
391 {
392 	kfree(rcu_access_pointer(net->gen));
393 	kmem_cache_free(net_cachep, net);
394 }
395 
396 void net_drop_ns(void *p)
397 {
398 	struct net *ns = p;
399 	if (ns && refcount_dec_and_test(&ns->passive))
400 		net_free(ns);
401 }
402 
403 struct net *copy_net_ns(unsigned long flags,
404 			struct user_namespace *user_ns, struct net *old_net)
405 {
406 	struct ucounts *ucounts;
407 	struct net *net;
408 	unsigned write;
409 	int rv;
410 
411 	if (!(flags & CLONE_NEWNET))
412 		return get_net(old_net);
413 
414 	ucounts = inc_net_namespaces(user_ns);
415 	if (!ucounts)
416 		return ERR_PTR(-ENOSPC);
417 
418 	net = net_alloc();
419 	if (!net) {
420 		rv = -ENOMEM;
421 		goto dec_ucounts;
422 	}
423 	refcount_set(&net->passive, 1);
424 	net->ucounts = ucounts;
425 	get_user_ns(user_ns);
426 again:
427 	write = READ_ONCE(nr_sync_pernet_ops);
428 	if (write)
429 		rv = down_write_killable(&net_sem);
430 	else
431 		rv = down_read_killable(&net_sem);
432 	if (rv < 0)
433 		goto put_userns;
434 
435 	if (!write && unlikely(READ_ONCE(nr_sync_pernet_ops))) {
436 		up_read(&net_sem);
437 		goto again;
438 	}
439 	rv = setup_net(net, user_ns);
440 
441 	if (write)
442 		up_write(&net_sem);
443 	else
444 		up_read(&net_sem);
445 
446 	if (rv < 0) {
447 put_userns:
448 		put_user_ns(user_ns);
449 		net_drop_ns(net);
450 dec_ucounts:
451 		dec_net_namespaces(ucounts);
452 		return ERR_PTR(rv);
453 	}
454 	return net;
455 }
456 
457 static void unhash_nsid(struct net *net, struct net *last)
458 {
459 	struct net *tmp;
460 	/* This function is only called from cleanup_net() work,
461 	 * and this work is the only process, that may delete
462 	 * a net from net_namespace_list. So, when the below
463 	 * is executing, the list may only grow. Thus, we do not
464 	 * use for_each_net_rcu() or rtnl_lock().
465 	 */
466 	for_each_net(tmp) {
467 		int id;
468 
469 		spin_lock_bh(&tmp->nsid_lock);
470 		id = __peernet2id(tmp, net);
471 		if (id >= 0)
472 			idr_remove(&tmp->netns_ids, id);
473 		spin_unlock_bh(&tmp->nsid_lock);
474 		if (id >= 0)
475 			rtnl_net_notifyid(tmp, RTM_DELNSID, id);
476 		if (tmp == last)
477 			break;
478 	}
479 	spin_lock_bh(&net->nsid_lock);
480 	idr_destroy(&net->netns_ids);
481 	spin_unlock_bh(&net->nsid_lock);
482 }
483 
484 static DEFINE_SPINLOCK(cleanup_list_lock);
485 static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */
486 
487 static void cleanup_net(struct work_struct *work)
488 {
489 	const struct pernet_operations *ops;
490 	struct net *net, *tmp, *last;
491 	struct list_head net_kill_list;
492 	LIST_HEAD(net_exit_list);
493 	unsigned write;
494 
495 	/* Atomically snapshot the list of namespaces to cleanup */
496 	spin_lock_irq(&cleanup_list_lock);
497 	list_replace_init(&cleanup_list, &net_kill_list);
498 	spin_unlock_irq(&cleanup_list_lock);
499 again:
500 	write = READ_ONCE(nr_sync_pernet_ops);
501 	if (write)
502 		down_write(&net_sem);
503 	else
504 		down_read(&net_sem);
505 
506 	if (!write && unlikely(READ_ONCE(nr_sync_pernet_ops))) {
507 		up_read(&net_sem);
508 		goto again;
509 	}
510 
511 	/* Don't let anyone else find us. */
512 	rtnl_lock();
513 	list_for_each_entry(net, &net_kill_list, cleanup_list)
514 		list_del_rcu(&net->list);
515 	/* Cache last net. After we unlock rtnl, no one new net
516 	 * added to net_namespace_list can assign nsid pointer
517 	 * to a net from net_kill_list (see peernet2id_alloc()).
518 	 * So, we skip them in unhash_nsid().
519 	 *
520 	 * Note, that unhash_nsid() does not delete nsid links
521 	 * between net_kill_list's nets, as they've already
522 	 * deleted from net_namespace_list. But, this would be
523 	 * useless anyway, as netns_ids are destroyed there.
524 	 */
525 	last = list_last_entry(&net_namespace_list, struct net, list);
526 	rtnl_unlock();
527 
528 	list_for_each_entry(net, &net_kill_list, cleanup_list) {
529 		unhash_nsid(net, last);
530 		list_add_tail(&net->exit_list, &net_exit_list);
531 	}
532 
533 	/*
534 	 * Another CPU might be rcu-iterating the list, wait for it.
535 	 * This needs to be before calling the exit() notifiers, so
536 	 * the rcu_barrier() below isn't sufficient alone.
537 	 */
538 	synchronize_rcu();
539 
540 	/* Run all of the network namespace exit methods */
541 	list_for_each_entry_reverse(ops, &pernet_list, list)
542 		ops_exit_list(ops, &net_exit_list);
543 
544 	/* Free the net generic variables */
545 	list_for_each_entry_reverse(ops, &pernet_list, list)
546 		ops_free_list(ops, &net_exit_list);
547 
548 	if (write)
549 		up_write(&net_sem);
550 	else
551 		up_read(&net_sem);
552 
553 	/* Ensure there are no outstanding rcu callbacks using this
554 	 * network namespace.
555 	 */
556 	rcu_barrier();
557 
558 	/* Finally it is safe to free my network namespace structure */
559 	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
560 		list_del_init(&net->exit_list);
561 		dec_net_namespaces(net->ucounts);
562 		put_user_ns(net->user_ns);
563 		net_drop_ns(net);
564 	}
565 }
566 
567 /**
568  * net_ns_barrier - wait until concurrent net_cleanup_work is done
569  *
570  * cleanup_net runs from work queue and will first remove namespaces
571  * from the global list, then run net exit functions.
572  *
573  * Call this in module exit path to make sure that all netns
574  * ->exit ops have been invoked before the function is removed.
575  */
576 void net_ns_barrier(void)
577 {
578 	down_write(&net_sem);
579 	up_write(&net_sem);
580 }
581 EXPORT_SYMBOL(net_ns_barrier);
582 
583 static DECLARE_WORK(net_cleanup_work, cleanup_net);
584 
585 void __put_net(struct net *net)
586 {
587 	/* Cleanup the network namespace in process context */
588 	unsigned long flags;
589 
590 	spin_lock_irqsave(&cleanup_list_lock, flags);
591 	list_add(&net->cleanup_list, &cleanup_list);
592 	spin_unlock_irqrestore(&cleanup_list_lock, flags);
593 
594 	queue_work(netns_wq, &net_cleanup_work);
595 }
596 EXPORT_SYMBOL_GPL(__put_net);
597 
598 struct net *get_net_ns_by_fd(int fd)
599 {
600 	struct file *file;
601 	struct ns_common *ns;
602 	struct net *net;
603 
604 	file = proc_ns_fget(fd);
605 	if (IS_ERR(file))
606 		return ERR_CAST(file);
607 
608 	ns = get_proc_ns(file_inode(file));
609 	if (ns->ops == &netns_operations)
610 		net = get_net(container_of(ns, struct net, ns));
611 	else
612 		net = ERR_PTR(-EINVAL);
613 
614 	fput(file);
615 	return net;
616 }
617 
618 #else
619 struct net *get_net_ns_by_fd(int fd)
620 {
621 	return ERR_PTR(-EINVAL);
622 }
623 #endif
624 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
625 
626 struct net *get_net_ns_by_pid(pid_t pid)
627 {
628 	struct task_struct *tsk;
629 	struct net *net;
630 
631 	/* Lookup the network namespace */
632 	net = ERR_PTR(-ESRCH);
633 	rcu_read_lock();
634 	tsk = find_task_by_vpid(pid);
635 	if (tsk) {
636 		struct nsproxy *nsproxy;
637 		task_lock(tsk);
638 		nsproxy = tsk->nsproxy;
639 		if (nsproxy)
640 			net = get_net(nsproxy->net_ns);
641 		task_unlock(tsk);
642 	}
643 	rcu_read_unlock();
644 	return net;
645 }
646 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
647 
648 static __net_init int net_ns_net_init(struct net *net)
649 {
650 #ifdef CONFIG_NET_NS
651 	net->ns.ops = &netns_operations;
652 #endif
653 	return ns_alloc_inum(&net->ns);
654 }
655 
656 static __net_exit void net_ns_net_exit(struct net *net)
657 {
658 	ns_free_inum(&net->ns);
659 }
660 
661 static struct pernet_operations __net_initdata net_ns_ops = {
662 	.init = net_ns_net_init,
663 	.exit = net_ns_net_exit,
664 	.async = true,
665 };
666 
667 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
668 	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
669 	[NETNSA_NSID]		= { .type = NLA_S32 },
670 	[NETNSA_PID]		= { .type = NLA_U32 },
671 	[NETNSA_FD]		= { .type = NLA_U32 },
672 };
673 
674 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
675 			  struct netlink_ext_ack *extack)
676 {
677 	struct net *net = sock_net(skb->sk);
678 	struct nlattr *tb[NETNSA_MAX + 1];
679 	struct nlattr *nla;
680 	struct net *peer;
681 	int nsid, err;
682 
683 	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
684 			  rtnl_net_policy, extack);
685 	if (err < 0)
686 		return err;
687 	if (!tb[NETNSA_NSID]) {
688 		NL_SET_ERR_MSG(extack, "nsid is missing");
689 		return -EINVAL;
690 	}
691 	nsid = nla_get_s32(tb[NETNSA_NSID]);
692 
693 	if (tb[NETNSA_PID]) {
694 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
695 		nla = tb[NETNSA_PID];
696 	} else if (tb[NETNSA_FD]) {
697 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
698 		nla = tb[NETNSA_FD];
699 	} else {
700 		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
701 		return -EINVAL;
702 	}
703 	if (IS_ERR(peer)) {
704 		NL_SET_BAD_ATTR(extack, nla);
705 		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
706 		return PTR_ERR(peer);
707 	}
708 
709 	spin_lock_bh(&net->nsid_lock);
710 	if (__peernet2id(net, peer) >= 0) {
711 		spin_unlock_bh(&net->nsid_lock);
712 		err = -EEXIST;
713 		NL_SET_BAD_ATTR(extack, nla);
714 		NL_SET_ERR_MSG(extack,
715 			       "Peer netns already has a nsid assigned");
716 		goto out;
717 	}
718 
719 	err = alloc_netid(net, peer, nsid);
720 	spin_unlock_bh(&net->nsid_lock);
721 	if (err >= 0) {
722 		rtnl_net_notifyid(net, RTM_NEWNSID, err);
723 		err = 0;
724 	} else if (err == -ENOSPC && nsid >= 0) {
725 		err = -EEXIST;
726 		NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
727 		NL_SET_ERR_MSG(extack, "The specified nsid is already used");
728 	}
729 out:
730 	put_net(peer);
731 	return err;
732 }
733 
734 static int rtnl_net_get_size(void)
735 {
736 	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
737 	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
738 	       ;
739 }
740 
741 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
742 			 int cmd, struct net *net, int nsid)
743 {
744 	struct nlmsghdr *nlh;
745 	struct rtgenmsg *rth;
746 
747 	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
748 	if (!nlh)
749 		return -EMSGSIZE;
750 
751 	rth = nlmsg_data(nlh);
752 	rth->rtgen_family = AF_UNSPEC;
753 
754 	if (nla_put_s32(skb, NETNSA_NSID, nsid))
755 		goto nla_put_failure;
756 
757 	nlmsg_end(skb, nlh);
758 	return 0;
759 
760 nla_put_failure:
761 	nlmsg_cancel(skb, nlh);
762 	return -EMSGSIZE;
763 }
764 
765 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
766 			  struct netlink_ext_ack *extack)
767 {
768 	struct net *net = sock_net(skb->sk);
769 	struct nlattr *tb[NETNSA_MAX + 1];
770 	struct nlattr *nla;
771 	struct sk_buff *msg;
772 	struct net *peer;
773 	int err, id;
774 
775 	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
776 			  rtnl_net_policy, extack);
777 	if (err < 0)
778 		return err;
779 	if (tb[NETNSA_PID]) {
780 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
781 		nla = tb[NETNSA_PID];
782 	} else if (tb[NETNSA_FD]) {
783 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
784 		nla = tb[NETNSA_FD];
785 	} else {
786 		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
787 		return -EINVAL;
788 	}
789 
790 	if (IS_ERR(peer)) {
791 		NL_SET_BAD_ATTR(extack, nla);
792 		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
793 		return PTR_ERR(peer);
794 	}
795 
796 	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
797 	if (!msg) {
798 		err = -ENOMEM;
799 		goto out;
800 	}
801 
802 	id = peernet2id(net, peer);
803 	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
804 			    RTM_NEWNSID, net, id);
805 	if (err < 0)
806 		goto err_out;
807 
808 	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
809 	goto out;
810 
811 err_out:
812 	nlmsg_free(msg);
813 out:
814 	put_net(peer);
815 	return err;
816 }
817 
818 struct rtnl_net_dump_cb {
819 	struct net *net;
820 	struct sk_buff *skb;
821 	struct netlink_callback *cb;
822 	int idx;
823 	int s_idx;
824 };
825 
826 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
827 {
828 	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
829 	int ret;
830 
831 	if (net_cb->idx < net_cb->s_idx)
832 		goto cont;
833 
834 	ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
835 			    net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
836 			    RTM_NEWNSID, net_cb->net, id);
837 	if (ret < 0)
838 		return ret;
839 
840 cont:
841 	net_cb->idx++;
842 	return 0;
843 }
844 
845 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
846 {
847 	struct net *net = sock_net(skb->sk);
848 	struct rtnl_net_dump_cb net_cb = {
849 		.net = net,
850 		.skb = skb,
851 		.cb = cb,
852 		.idx = 0,
853 		.s_idx = cb->args[0],
854 	};
855 
856 	spin_lock_bh(&net->nsid_lock);
857 	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
858 	spin_unlock_bh(&net->nsid_lock);
859 
860 	cb->args[0] = net_cb.idx;
861 	return skb->len;
862 }
863 
864 static void rtnl_net_notifyid(struct net *net, int cmd, int id)
865 {
866 	struct sk_buff *msg;
867 	int err = -ENOMEM;
868 
869 	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
870 	if (!msg)
871 		goto out;
872 
873 	err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
874 	if (err < 0)
875 		goto err_out;
876 
877 	rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
878 	return;
879 
880 err_out:
881 	nlmsg_free(msg);
882 out:
883 	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
884 }
885 
886 static int __init net_ns_init(void)
887 {
888 	struct net_generic *ng;
889 
890 #ifdef CONFIG_NET_NS
891 	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
892 					SMP_CACHE_BYTES,
893 					SLAB_PANIC, NULL);
894 
895 	/* Create workqueue for cleanup */
896 	netns_wq = create_singlethread_workqueue("netns");
897 	if (!netns_wq)
898 		panic("Could not create netns workq");
899 #endif
900 
901 	ng = net_alloc_generic();
902 	if (!ng)
903 		panic("Could not allocate generic netns");
904 
905 	rcu_assign_pointer(init_net.gen, ng);
906 
907 	down_write(&net_sem);
908 	if (setup_net(&init_net, &init_user_ns))
909 		panic("Could not setup the initial network namespace");
910 
911 	init_net_initialized = true;
912 	up_write(&net_sem);
913 
914 	register_pernet_subsys(&net_ns_ops);
915 
916 	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
917 		      RTNL_FLAG_DOIT_UNLOCKED);
918 	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
919 		      RTNL_FLAG_DOIT_UNLOCKED);
920 
921 	return 0;
922 }
923 
924 pure_initcall(net_ns_init);
925 
926 #ifdef CONFIG_NET_NS
927 static int __register_pernet_operations(struct list_head *list,
928 					struct pernet_operations *ops)
929 {
930 	struct net *net;
931 	int error;
932 	LIST_HEAD(net_exit_list);
933 
934 	list_add_tail(&ops->list, list);
935 	if (ops->init || (ops->id && ops->size)) {
936 		for_each_net(net) {
937 			error = ops_init(ops, net);
938 			if (error)
939 				goto out_undo;
940 			list_add_tail(&net->exit_list, &net_exit_list);
941 		}
942 	}
943 	return 0;
944 
945 out_undo:
946 	/* If I have an error cleanup all namespaces I initialized */
947 	list_del(&ops->list);
948 	ops_exit_list(ops, &net_exit_list);
949 	ops_free_list(ops, &net_exit_list);
950 	return error;
951 }
952 
953 static void __unregister_pernet_operations(struct pernet_operations *ops)
954 {
955 	struct net *net;
956 	LIST_HEAD(net_exit_list);
957 
958 	list_del(&ops->list);
959 	for_each_net(net)
960 		list_add_tail(&net->exit_list, &net_exit_list);
961 	ops_exit_list(ops, &net_exit_list);
962 	ops_free_list(ops, &net_exit_list);
963 }
964 
965 #else
966 
967 static int __register_pernet_operations(struct list_head *list,
968 					struct pernet_operations *ops)
969 {
970 	if (!init_net_initialized) {
971 		list_add_tail(&ops->list, list);
972 		return 0;
973 	}
974 
975 	return ops_init(ops, &init_net);
976 }
977 
978 static void __unregister_pernet_operations(struct pernet_operations *ops)
979 {
980 	if (!init_net_initialized) {
981 		list_del(&ops->list);
982 	} else {
983 		LIST_HEAD(net_exit_list);
984 		list_add(&init_net.exit_list, &net_exit_list);
985 		ops_exit_list(ops, &net_exit_list);
986 		ops_free_list(ops, &net_exit_list);
987 	}
988 }
989 
990 #endif /* CONFIG_NET_NS */
991 
992 static DEFINE_IDA(net_generic_ids);
993 
994 static int register_pernet_operations(struct list_head *list,
995 				      struct pernet_operations *ops)
996 {
997 	int error;
998 
999 	if (ops->id) {
1000 again:
1001 		error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id);
1002 		if (error < 0) {
1003 			if (error == -EAGAIN) {
1004 				ida_pre_get(&net_generic_ids, GFP_KERNEL);
1005 				goto again;
1006 			}
1007 			return error;
1008 		}
1009 		max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
1010 	}
1011 	error = __register_pernet_operations(list, ops);
1012 	if (error) {
1013 		rcu_barrier();
1014 		if (ops->id)
1015 			ida_remove(&net_generic_ids, *ops->id);
1016 	} else if (!ops->async) {
1017 		pr_info_once("Pernet operations %ps are sync.\n", ops);
1018 		nr_sync_pernet_ops++;
1019 	}
1020 
1021 	return error;
1022 }
1023 
1024 static void unregister_pernet_operations(struct pernet_operations *ops)
1025 {
1026 	if (!ops->async)
1027 		BUG_ON(nr_sync_pernet_ops-- == 0);
1028 	__unregister_pernet_operations(ops);
1029 	rcu_barrier();
1030 	if (ops->id)
1031 		ida_remove(&net_generic_ids, *ops->id);
1032 }
1033 
1034 /**
1035  *      register_pernet_subsys - register a network namespace subsystem
1036  *	@ops:  pernet operations structure for the subsystem
1037  *
1038  *	Register a subsystem which has init and exit functions
1039  *	that are called when network namespaces are created and
1040  *	destroyed respectively.
1041  *
1042  *	When registered all network namespace init functions are
1043  *	called for every existing network namespace.  Allowing kernel
1044  *	modules to have a race free view of the set of network namespaces.
1045  *
1046  *	When a new network namespace is created all of the init
1047  *	methods are called in the order in which they were registered.
1048  *
1049  *	When a network namespace is destroyed all of the exit methods
1050  *	are called in the reverse of the order with which they were
1051  *	registered.
1052  */
1053 int register_pernet_subsys(struct pernet_operations *ops)
1054 {
1055 	int error;
1056 	down_write(&net_sem);
1057 	error =  register_pernet_operations(first_device, ops);
1058 	up_write(&net_sem);
1059 	return error;
1060 }
1061 EXPORT_SYMBOL_GPL(register_pernet_subsys);
1062 
1063 /**
1064  *      unregister_pernet_subsys - unregister a network namespace subsystem
1065  *	@ops: pernet operations structure to manipulate
1066  *
1067  *	Remove the pernet operations structure from the list to be
1068  *	used when network namespaces are created or destroyed.  In
1069  *	addition run the exit method for all existing network
1070  *	namespaces.
1071  */
1072 void unregister_pernet_subsys(struct pernet_operations *ops)
1073 {
1074 	down_write(&net_sem);
1075 	unregister_pernet_operations(ops);
1076 	up_write(&net_sem);
1077 }
1078 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1079 
1080 /**
1081  *      register_pernet_device - register a network namespace device
1082  *	@ops:  pernet operations structure for the subsystem
1083  *
1084  *	Register a device which has init and exit functions
1085  *	that are called when network namespaces are created and
1086  *	destroyed respectively.
1087  *
1088  *	When registered all network namespace init functions are
1089  *	called for every existing network namespace.  Allowing kernel
1090  *	modules to have a race free view of the set of network namespaces.
1091  *
1092  *	When a new network namespace is created all of the init
1093  *	methods are called in the order in which they were registered.
1094  *
1095  *	When a network namespace is destroyed all of the exit methods
1096  *	are called in the reverse of the order with which they were
1097  *	registered.
1098  */
1099 int register_pernet_device(struct pernet_operations *ops)
1100 {
1101 	int error;
1102 	down_write(&net_sem);
1103 	error = register_pernet_operations(&pernet_list, ops);
1104 	if (!error && (first_device == &pernet_list))
1105 		first_device = &ops->list;
1106 	up_write(&net_sem);
1107 	return error;
1108 }
1109 EXPORT_SYMBOL_GPL(register_pernet_device);
1110 
1111 /**
1112  *      unregister_pernet_device - unregister a network namespace netdevice
1113  *	@ops: pernet operations structure to manipulate
1114  *
1115  *	Remove the pernet operations structure from the list to be
1116  *	used when network namespaces are created or destroyed.  In
1117  *	addition run the exit method for all existing network
1118  *	namespaces.
1119  */
1120 void unregister_pernet_device(struct pernet_operations *ops)
1121 {
1122 	down_write(&net_sem);
1123 	if (&ops->list == first_device)
1124 		first_device = first_device->next;
1125 	unregister_pernet_operations(ops);
1126 	up_write(&net_sem);
1127 }
1128 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1129 
1130 #ifdef CONFIG_NET_NS
1131 static struct ns_common *netns_get(struct task_struct *task)
1132 {
1133 	struct net *net = NULL;
1134 	struct nsproxy *nsproxy;
1135 
1136 	task_lock(task);
1137 	nsproxy = task->nsproxy;
1138 	if (nsproxy)
1139 		net = get_net(nsproxy->net_ns);
1140 	task_unlock(task);
1141 
1142 	return net ? &net->ns : NULL;
1143 }
1144 
1145 static inline struct net *to_net_ns(struct ns_common *ns)
1146 {
1147 	return container_of(ns, struct net, ns);
1148 }
1149 
1150 static void netns_put(struct ns_common *ns)
1151 {
1152 	put_net(to_net_ns(ns));
1153 }
1154 
1155 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1156 {
1157 	struct net *net = to_net_ns(ns);
1158 
1159 	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1160 	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1161 		return -EPERM;
1162 
1163 	put_net(nsproxy->net_ns);
1164 	nsproxy->net_ns = get_net(net);
1165 	return 0;
1166 }
1167 
1168 static struct user_namespace *netns_owner(struct ns_common *ns)
1169 {
1170 	return to_net_ns(ns)->user_ns;
1171 }
1172 
1173 const struct proc_ns_operations netns_operations = {
1174 	.name		= "net",
1175 	.type		= CLONE_NEWNET,
1176 	.get		= netns_get,
1177 	.put		= netns_put,
1178 	.install	= netns_install,
1179 	.owner		= netns_owner,
1180 };
1181 #endif
1182