xref: /openbmc/linux/net/core/net_namespace.c (revision 447cd7a0)
1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2 
3 #include <linux/workqueue.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/list.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/idr.h>
11 #include <linux/rculist.h>
12 #include <linux/nsproxy.h>
13 #include <linux/fs.h>
14 #include <linux/proc_ns.h>
15 #include <linux/file.h>
16 #include <linux/export.h>
17 #include <linux/user_namespace.h>
18 #include <linux/net_namespace.h>
19 #include <linux/sched/task.h>
20 
21 #include <net/sock.h>
22 #include <net/netlink.h>
23 #include <net/net_namespace.h>
24 #include <net/netns/generic.h>
25 
26 /*
27  *	Our network namespace constructor/destructor lists
28  */
29 
30 static LIST_HEAD(pernet_list);
31 static struct list_head *first_device = &pernet_list;
32 /* Used only if there are !async pernet_operations registered */
33 DEFINE_MUTEX(net_mutex);
34 
35 LIST_HEAD(net_namespace_list);
36 EXPORT_SYMBOL_GPL(net_namespace_list);
37 
38 struct net init_net = {
39 	.count		= REFCOUNT_INIT(1),
40 	.dev_base_head	= LIST_HEAD_INIT(init_net.dev_base_head),
41 };
42 EXPORT_SYMBOL(init_net);
43 
44 static bool init_net_initialized;
45 static unsigned nr_sync_pernet_ops;
46 /*
47  * net_sem: protects: pernet_list, net_generic_ids, nr_sync_pernet_ops,
48  * init_net_initialized and first_device pointer.
49  */
50 DECLARE_RWSEM(net_sem);
51 
52 #define MIN_PERNET_OPS_ID	\
53 	((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
54 
55 #define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */
56 
57 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
58 
59 static struct net_generic *net_alloc_generic(void)
60 {
61 	struct net_generic *ng;
62 	unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
63 
64 	ng = kzalloc(generic_size, GFP_KERNEL);
65 	if (ng)
66 		ng->s.len = max_gen_ptrs;
67 
68 	return ng;
69 }
70 
71 static int net_assign_generic(struct net *net, unsigned int id, void *data)
72 {
73 	struct net_generic *ng, *old_ng;
74 
75 	BUG_ON(id < MIN_PERNET_OPS_ID);
76 
77 	old_ng = rcu_dereference_protected(net->gen,
78 					   lockdep_is_held(&net_sem));
79 	if (old_ng->s.len > id) {
80 		old_ng->ptr[id] = data;
81 		return 0;
82 	}
83 
84 	ng = net_alloc_generic();
85 	if (ng == NULL)
86 		return -ENOMEM;
87 
88 	/*
89 	 * Some synchronisation notes:
90 	 *
91 	 * The net_generic explores the net->gen array inside rcu
92 	 * read section. Besides once set the net->gen->ptr[x]
93 	 * pointer never changes (see rules in netns/generic.h).
94 	 *
95 	 * That said, we simply duplicate this array and schedule
96 	 * the old copy for kfree after a grace period.
97 	 */
98 
99 	memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
100 	       (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
101 	ng->ptr[id] = data;
102 
103 	rcu_assign_pointer(net->gen, ng);
104 	kfree_rcu(old_ng, s.rcu);
105 	return 0;
106 }
107 
108 static int ops_init(const struct pernet_operations *ops, struct net *net)
109 {
110 	int err = -ENOMEM;
111 	void *data = NULL;
112 
113 	if (ops->id && ops->size) {
114 		data = kzalloc(ops->size, GFP_KERNEL);
115 		if (!data)
116 			goto out;
117 
118 		err = net_assign_generic(net, *ops->id, data);
119 		if (err)
120 			goto cleanup;
121 	}
122 	err = 0;
123 	if (ops->init)
124 		err = ops->init(net);
125 	if (!err)
126 		return 0;
127 
128 cleanup:
129 	kfree(data);
130 
131 out:
132 	return err;
133 }
134 
135 static void ops_free(const struct pernet_operations *ops, struct net *net)
136 {
137 	if (ops->id && ops->size) {
138 		kfree(net_generic(net, *ops->id));
139 	}
140 }
141 
142 static void ops_exit_list(const struct pernet_operations *ops,
143 			  struct list_head *net_exit_list)
144 {
145 	struct net *net;
146 	if (ops->exit) {
147 		list_for_each_entry(net, net_exit_list, exit_list)
148 			ops->exit(net);
149 	}
150 	if (ops->exit_batch)
151 		ops->exit_batch(net_exit_list);
152 }
153 
154 static void ops_free_list(const struct pernet_operations *ops,
155 			  struct list_head *net_exit_list)
156 {
157 	struct net *net;
158 	if (ops->size && ops->id) {
159 		list_for_each_entry(net, net_exit_list, exit_list)
160 			ops_free(ops, net);
161 	}
162 }
163 
164 /* should be called with nsid_lock held */
165 static int alloc_netid(struct net *net, struct net *peer, int reqid)
166 {
167 	int min = 0, max = 0;
168 
169 	if (reqid >= 0) {
170 		min = reqid;
171 		max = reqid + 1;
172 	}
173 
174 	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
175 }
176 
177 /* This function is used by idr_for_each(). If net is equal to peer, the
178  * function returns the id so that idr_for_each() stops. Because we cannot
179  * returns the id 0 (idr_for_each() will not stop), we return the magic value
180  * NET_ID_ZERO (-1) for it.
181  */
182 #define NET_ID_ZERO -1
183 static int net_eq_idr(int id, void *net, void *peer)
184 {
185 	if (net_eq(net, peer))
186 		return id ? : NET_ID_ZERO;
187 	return 0;
188 }
189 
190 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
191  * is set to true, thus the caller knows that the new id must be notified via
192  * rtnl.
193  */
194 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
195 {
196 	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
197 	bool alloc_it = *alloc;
198 
199 	*alloc = false;
200 
201 	/* Magic value for id 0. */
202 	if (id == NET_ID_ZERO)
203 		return 0;
204 	if (id > 0)
205 		return id;
206 
207 	if (alloc_it) {
208 		id = alloc_netid(net, peer, -1);
209 		*alloc = true;
210 		return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
211 	}
212 
213 	return NETNSA_NSID_NOT_ASSIGNED;
214 }
215 
216 /* should be called with nsid_lock held */
217 static int __peernet2id(struct net *net, struct net *peer)
218 {
219 	bool no = false;
220 
221 	return __peernet2id_alloc(net, peer, &no);
222 }
223 
224 static void rtnl_net_notifyid(struct net *net, int cmd, int id);
225 /* This function returns the id of a peer netns. If no id is assigned, one will
226  * be allocated and returned.
227  */
228 int peernet2id_alloc(struct net *net, struct net *peer)
229 {
230 	bool alloc = false, alive = false;
231 	int id;
232 
233 	if (refcount_read(&net->count) == 0)
234 		return NETNSA_NSID_NOT_ASSIGNED;
235 	spin_lock_bh(&net->nsid_lock);
236 	/*
237 	 * When peer is obtained from RCU lists, we may race with
238 	 * its cleanup. Check whether it's alive, and this guarantees
239 	 * we never hash a peer back to net->netns_ids, after it has
240 	 * just been idr_remove()'d from there in cleanup_net().
241 	 */
242 	if (maybe_get_net(peer))
243 		alive = alloc = true;
244 	id = __peernet2id_alloc(net, peer, &alloc);
245 	spin_unlock_bh(&net->nsid_lock);
246 	if (alloc && id >= 0)
247 		rtnl_net_notifyid(net, RTM_NEWNSID, id);
248 	if (alive)
249 		put_net(peer);
250 	return id;
251 }
252 EXPORT_SYMBOL_GPL(peernet2id_alloc);
253 
254 /* This function returns, if assigned, the id of a peer netns. */
255 int peernet2id(struct net *net, struct net *peer)
256 {
257 	int id;
258 
259 	spin_lock_bh(&net->nsid_lock);
260 	id = __peernet2id(net, peer);
261 	spin_unlock_bh(&net->nsid_lock);
262 	return id;
263 }
264 EXPORT_SYMBOL(peernet2id);
265 
266 /* This function returns true is the peer netns has an id assigned into the
267  * current netns.
268  */
269 bool peernet_has_id(struct net *net, struct net *peer)
270 {
271 	return peernet2id(net, peer) >= 0;
272 }
273 
274 struct net *get_net_ns_by_id(struct net *net, int id)
275 {
276 	struct net *peer;
277 
278 	if (id < 0)
279 		return NULL;
280 
281 	rcu_read_lock();
282 	peer = idr_find(&net->netns_ids, id);
283 	if (peer)
284 		peer = maybe_get_net(peer);
285 	rcu_read_unlock();
286 
287 	return peer;
288 }
289 
290 /*
291  * setup_net runs the initializers for the network namespace object.
292  */
293 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
294 {
295 	/* Must be called with net_sem held */
296 	const struct pernet_operations *ops, *saved_ops;
297 	int error = 0;
298 	LIST_HEAD(net_exit_list);
299 
300 	refcount_set(&net->count, 1);
301 	refcount_set(&net->passive, 1);
302 	net->dev_base_seq = 1;
303 	net->user_ns = user_ns;
304 	idr_init(&net->netns_ids);
305 	spin_lock_init(&net->nsid_lock);
306 
307 	list_for_each_entry(ops, &pernet_list, list) {
308 		error = ops_init(ops, net);
309 		if (error < 0)
310 			goto out_undo;
311 	}
312 	rtnl_lock();
313 	list_add_tail_rcu(&net->list, &net_namespace_list);
314 	rtnl_unlock();
315 out:
316 	return error;
317 
318 out_undo:
319 	/* Walk through the list backwards calling the exit functions
320 	 * for the pernet modules whose init functions did not fail.
321 	 */
322 	list_add(&net->exit_list, &net_exit_list);
323 	saved_ops = ops;
324 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
325 		ops_exit_list(ops, &net_exit_list);
326 
327 	ops = saved_ops;
328 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
329 		ops_free_list(ops, &net_exit_list);
330 
331 	rcu_barrier();
332 	goto out;
333 }
334 
335 static int __net_init net_defaults_init_net(struct net *net)
336 {
337 	net->core.sysctl_somaxconn = SOMAXCONN;
338 	return 0;
339 }
340 
341 static struct pernet_operations net_defaults_ops = {
342 	.init = net_defaults_init_net,
343 };
344 
345 static __init int net_defaults_init(void)
346 {
347 	if (register_pernet_subsys(&net_defaults_ops))
348 		panic("Cannot initialize net default settings");
349 
350 	return 0;
351 }
352 
353 core_initcall(net_defaults_init);
354 
355 #ifdef CONFIG_NET_NS
356 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
357 {
358 	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
359 }
360 
361 static void dec_net_namespaces(struct ucounts *ucounts)
362 {
363 	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
364 }
365 
366 static struct kmem_cache *net_cachep;
367 static struct workqueue_struct *netns_wq;
368 
369 static struct net *net_alloc(void)
370 {
371 	struct net *net = NULL;
372 	struct net_generic *ng;
373 
374 	ng = net_alloc_generic();
375 	if (!ng)
376 		goto out;
377 
378 	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
379 	if (!net)
380 		goto out_free;
381 
382 	rcu_assign_pointer(net->gen, ng);
383 out:
384 	return net;
385 
386 out_free:
387 	kfree(ng);
388 	goto out;
389 }
390 
391 static void net_free(struct net *net)
392 {
393 	kfree(rcu_access_pointer(net->gen));
394 	kmem_cache_free(net_cachep, net);
395 }
396 
397 void net_drop_ns(void *p)
398 {
399 	struct net *ns = p;
400 	if (ns && refcount_dec_and_test(&ns->passive))
401 		net_free(ns);
402 }
403 
404 struct net *copy_net_ns(unsigned long flags,
405 			struct user_namespace *user_ns, struct net *old_net)
406 {
407 	struct ucounts *ucounts;
408 	struct net *net;
409 	int rv;
410 
411 	if (!(flags & CLONE_NEWNET))
412 		return get_net(old_net);
413 
414 	ucounts = inc_net_namespaces(user_ns);
415 	if (!ucounts)
416 		return ERR_PTR(-ENOSPC);
417 
418 	net = net_alloc();
419 	if (!net) {
420 		rv = -ENOMEM;
421 		goto dec_ucounts;
422 	}
423 	refcount_set(&net->passive, 1);
424 	net->ucounts = ucounts;
425 	get_user_ns(user_ns);
426 
427 	rv = down_read_killable(&net_sem);
428 	if (rv < 0)
429 		goto put_userns;
430 	if (nr_sync_pernet_ops) {
431 		rv = mutex_lock_killable(&net_mutex);
432 		if (rv < 0)
433 			goto up_read;
434 	}
435 	rv = setup_net(net, user_ns);
436 	if (nr_sync_pernet_ops)
437 		mutex_unlock(&net_mutex);
438 up_read:
439 	up_read(&net_sem);
440 	if (rv < 0) {
441 put_userns:
442 		put_user_ns(user_ns);
443 		net_drop_ns(net);
444 dec_ucounts:
445 		dec_net_namespaces(ucounts);
446 		return ERR_PTR(rv);
447 	}
448 	return net;
449 }
450 
451 static void unhash_nsid(struct net *net, struct net *last)
452 {
453 	struct net *tmp;
454 	/* This function is only called from cleanup_net() work,
455 	 * and this work is the only process, that may delete
456 	 * a net from net_namespace_list. So, when the below
457 	 * is executing, the list may only grow. Thus, we do not
458 	 * use for_each_net_rcu() or rtnl_lock().
459 	 */
460 	for_each_net(tmp) {
461 		int id;
462 
463 		spin_lock_bh(&tmp->nsid_lock);
464 		id = __peernet2id(tmp, net);
465 		if (id >= 0)
466 			idr_remove(&tmp->netns_ids, id);
467 		spin_unlock_bh(&tmp->nsid_lock);
468 		if (id >= 0)
469 			rtnl_net_notifyid(tmp, RTM_DELNSID, id);
470 		if (tmp == last)
471 			break;
472 	}
473 	spin_lock_bh(&net->nsid_lock);
474 	idr_destroy(&net->netns_ids);
475 	spin_unlock_bh(&net->nsid_lock);
476 }
477 
478 static DEFINE_SPINLOCK(cleanup_list_lock);
479 static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */
480 
481 static void cleanup_net(struct work_struct *work)
482 {
483 	const struct pernet_operations *ops;
484 	struct net *net, *tmp, *last;
485 	struct list_head net_kill_list;
486 	LIST_HEAD(net_exit_list);
487 
488 	/* Atomically snapshot the list of namespaces to cleanup */
489 	spin_lock_irq(&cleanup_list_lock);
490 	list_replace_init(&cleanup_list, &net_kill_list);
491 	spin_unlock_irq(&cleanup_list_lock);
492 
493 	down_read(&net_sem);
494 	if (nr_sync_pernet_ops)
495 		mutex_lock(&net_mutex);
496 
497 	/* Don't let anyone else find us. */
498 	rtnl_lock();
499 	list_for_each_entry(net, &net_kill_list, cleanup_list)
500 		list_del_rcu(&net->list);
501 	/* Cache last net. After we unlock rtnl, no one new net
502 	 * added to net_namespace_list can assign nsid pointer
503 	 * to a net from net_kill_list (see peernet2id_alloc()).
504 	 * So, we skip them in unhash_nsid().
505 	 *
506 	 * Note, that unhash_nsid() does not delete nsid links
507 	 * between net_kill_list's nets, as they've already
508 	 * deleted from net_namespace_list. But, this would be
509 	 * useless anyway, as netns_ids are destroyed there.
510 	 */
511 	last = list_last_entry(&net_namespace_list, struct net, list);
512 	rtnl_unlock();
513 
514 	list_for_each_entry(net, &net_kill_list, cleanup_list) {
515 		unhash_nsid(net, last);
516 		list_add_tail(&net->exit_list, &net_exit_list);
517 	}
518 
519 	/*
520 	 * Another CPU might be rcu-iterating the list, wait for it.
521 	 * This needs to be before calling the exit() notifiers, so
522 	 * the rcu_barrier() below isn't sufficient alone.
523 	 */
524 	synchronize_rcu();
525 
526 	/* Run all of the network namespace exit methods */
527 	list_for_each_entry_reverse(ops, &pernet_list, list)
528 		ops_exit_list(ops, &net_exit_list);
529 
530 	if (nr_sync_pernet_ops)
531 		mutex_unlock(&net_mutex);
532 
533 	/* Free the net generic variables */
534 	list_for_each_entry_reverse(ops, &pernet_list, list)
535 		ops_free_list(ops, &net_exit_list);
536 
537 	up_read(&net_sem);
538 
539 	/* Ensure there are no outstanding rcu callbacks using this
540 	 * network namespace.
541 	 */
542 	rcu_barrier();
543 
544 	/* Finally it is safe to free my network namespace structure */
545 	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
546 		list_del_init(&net->exit_list);
547 		dec_net_namespaces(net->ucounts);
548 		put_user_ns(net->user_ns);
549 		net_drop_ns(net);
550 	}
551 }
552 
553 /**
554  * net_ns_barrier - wait until concurrent net_cleanup_work is done
555  *
556  * cleanup_net runs from work queue and will first remove namespaces
557  * from the global list, then run net exit functions.
558  *
559  * Call this in module exit path to make sure that all netns
560  * ->exit ops have been invoked before the function is removed.
561  */
562 void net_ns_barrier(void)
563 {
564 	down_write(&net_sem);
565 	mutex_lock(&net_mutex);
566 	mutex_unlock(&net_mutex);
567 	up_write(&net_sem);
568 }
569 EXPORT_SYMBOL(net_ns_barrier);
570 
571 static DECLARE_WORK(net_cleanup_work, cleanup_net);
572 
573 void __put_net(struct net *net)
574 {
575 	/* Cleanup the network namespace in process context */
576 	unsigned long flags;
577 
578 	spin_lock_irqsave(&cleanup_list_lock, flags);
579 	list_add(&net->cleanup_list, &cleanup_list);
580 	spin_unlock_irqrestore(&cleanup_list_lock, flags);
581 
582 	queue_work(netns_wq, &net_cleanup_work);
583 }
584 EXPORT_SYMBOL_GPL(__put_net);
585 
586 struct net *get_net_ns_by_fd(int fd)
587 {
588 	struct file *file;
589 	struct ns_common *ns;
590 	struct net *net;
591 
592 	file = proc_ns_fget(fd);
593 	if (IS_ERR(file))
594 		return ERR_CAST(file);
595 
596 	ns = get_proc_ns(file_inode(file));
597 	if (ns->ops == &netns_operations)
598 		net = get_net(container_of(ns, struct net, ns));
599 	else
600 		net = ERR_PTR(-EINVAL);
601 
602 	fput(file);
603 	return net;
604 }
605 
606 #else
607 struct net *get_net_ns_by_fd(int fd)
608 {
609 	return ERR_PTR(-EINVAL);
610 }
611 #endif
612 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
613 
614 struct net *get_net_ns_by_pid(pid_t pid)
615 {
616 	struct task_struct *tsk;
617 	struct net *net;
618 
619 	/* Lookup the network namespace */
620 	net = ERR_PTR(-ESRCH);
621 	rcu_read_lock();
622 	tsk = find_task_by_vpid(pid);
623 	if (tsk) {
624 		struct nsproxy *nsproxy;
625 		task_lock(tsk);
626 		nsproxy = tsk->nsproxy;
627 		if (nsproxy)
628 			net = get_net(nsproxy->net_ns);
629 		task_unlock(tsk);
630 	}
631 	rcu_read_unlock();
632 	return net;
633 }
634 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
635 
636 static __net_init int net_ns_net_init(struct net *net)
637 {
638 #ifdef CONFIG_NET_NS
639 	net->ns.ops = &netns_operations;
640 #endif
641 	return ns_alloc_inum(&net->ns);
642 }
643 
644 static __net_exit void net_ns_net_exit(struct net *net)
645 {
646 	ns_free_inum(&net->ns);
647 }
648 
649 static struct pernet_operations __net_initdata net_ns_ops = {
650 	.init = net_ns_net_init,
651 	.exit = net_ns_net_exit,
652 };
653 
654 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
655 	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
656 	[NETNSA_NSID]		= { .type = NLA_S32 },
657 	[NETNSA_PID]		= { .type = NLA_U32 },
658 	[NETNSA_FD]		= { .type = NLA_U32 },
659 };
660 
661 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
662 			  struct netlink_ext_ack *extack)
663 {
664 	struct net *net = sock_net(skb->sk);
665 	struct nlattr *tb[NETNSA_MAX + 1];
666 	struct nlattr *nla;
667 	struct net *peer;
668 	int nsid, err;
669 
670 	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
671 			  rtnl_net_policy, extack);
672 	if (err < 0)
673 		return err;
674 	if (!tb[NETNSA_NSID]) {
675 		NL_SET_ERR_MSG(extack, "nsid is missing");
676 		return -EINVAL;
677 	}
678 	nsid = nla_get_s32(tb[NETNSA_NSID]);
679 
680 	if (tb[NETNSA_PID]) {
681 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
682 		nla = tb[NETNSA_PID];
683 	} else if (tb[NETNSA_FD]) {
684 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
685 		nla = tb[NETNSA_FD];
686 	} else {
687 		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
688 		return -EINVAL;
689 	}
690 	if (IS_ERR(peer)) {
691 		NL_SET_BAD_ATTR(extack, nla);
692 		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
693 		return PTR_ERR(peer);
694 	}
695 
696 	spin_lock_bh(&net->nsid_lock);
697 	if (__peernet2id(net, peer) >= 0) {
698 		spin_unlock_bh(&net->nsid_lock);
699 		err = -EEXIST;
700 		NL_SET_BAD_ATTR(extack, nla);
701 		NL_SET_ERR_MSG(extack,
702 			       "Peer netns already has a nsid assigned");
703 		goto out;
704 	}
705 
706 	err = alloc_netid(net, peer, nsid);
707 	spin_unlock_bh(&net->nsid_lock);
708 	if (err >= 0) {
709 		rtnl_net_notifyid(net, RTM_NEWNSID, err);
710 		err = 0;
711 	} else if (err == -ENOSPC && nsid >= 0) {
712 		err = -EEXIST;
713 		NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
714 		NL_SET_ERR_MSG(extack, "The specified nsid is already used");
715 	}
716 out:
717 	put_net(peer);
718 	return err;
719 }
720 
721 static int rtnl_net_get_size(void)
722 {
723 	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
724 	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
725 	       ;
726 }
727 
728 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
729 			 int cmd, struct net *net, int nsid)
730 {
731 	struct nlmsghdr *nlh;
732 	struct rtgenmsg *rth;
733 
734 	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
735 	if (!nlh)
736 		return -EMSGSIZE;
737 
738 	rth = nlmsg_data(nlh);
739 	rth->rtgen_family = AF_UNSPEC;
740 
741 	if (nla_put_s32(skb, NETNSA_NSID, nsid))
742 		goto nla_put_failure;
743 
744 	nlmsg_end(skb, nlh);
745 	return 0;
746 
747 nla_put_failure:
748 	nlmsg_cancel(skb, nlh);
749 	return -EMSGSIZE;
750 }
751 
752 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
753 			  struct netlink_ext_ack *extack)
754 {
755 	struct net *net = sock_net(skb->sk);
756 	struct nlattr *tb[NETNSA_MAX + 1];
757 	struct nlattr *nla;
758 	struct sk_buff *msg;
759 	struct net *peer;
760 	int err, id;
761 
762 	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
763 			  rtnl_net_policy, extack);
764 	if (err < 0)
765 		return err;
766 	if (tb[NETNSA_PID]) {
767 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
768 		nla = tb[NETNSA_PID];
769 	} else if (tb[NETNSA_FD]) {
770 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
771 		nla = tb[NETNSA_FD];
772 	} else {
773 		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
774 		return -EINVAL;
775 	}
776 
777 	if (IS_ERR(peer)) {
778 		NL_SET_BAD_ATTR(extack, nla);
779 		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
780 		return PTR_ERR(peer);
781 	}
782 
783 	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
784 	if (!msg) {
785 		err = -ENOMEM;
786 		goto out;
787 	}
788 
789 	id = peernet2id(net, peer);
790 	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
791 			    RTM_NEWNSID, net, id);
792 	if (err < 0)
793 		goto err_out;
794 
795 	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
796 	goto out;
797 
798 err_out:
799 	nlmsg_free(msg);
800 out:
801 	put_net(peer);
802 	return err;
803 }
804 
805 struct rtnl_net_dump_cb {
806 	struct net *net;
807 	struct sk_buff *skb;
808 	struct netlink_callback *cb;
809 	int idx;
810 	int s_idx;
811 };
812 
813 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
814 {
815 	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
816 	int ret;
817 
818 	if (net_cb->idx < net_cb->s_idx)
819 		goto cont;
820 
821 	ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
822 			    net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
823 			    RTM_NEWNSID, net_cb->net, id);
824 	if (ret < 0)
825 		return ret;
826 
827 cont:
828 	net_cb->idx++;
829 	return 0;
830 }
831 
832 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
833 {
834 	struct net *net = sock_net(skb->sk);
835 	struct rtnl_net_dump_cb net_cb = {
836 		.net = net,
837 		.skb = skb,
838 		.cb = cb,
839 		.idx = 0,
840 		.s_idx = cb->args[0],
841 	};
842 
843 	spin_lock_bh(&net->nsid_lock);
844 	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
845 	spin_unlock_bh(&net->nsid_lock);
846 
847 	cb->args[0] = net_cb.idx;
848 	return skb->len;
849 }
850 
851 static void rtnl_net_notifyid(struct net *net, int cmd, int id)
852 {
853 	struct sk_buff *msg;
854 	int err = -ENOMEM;
855 
856 	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
857 	if (!msg)
858 		goto out;
859 
860 	err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
861 	if (err < 0)
862 		goto err_out;
863 
864 	rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
865 	return;
866 
867 err_out:
868 	nlmsg_free(msg);
869 out:
870 	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
871 }
872 
873 static int __init net_ns_init(void)
874 {
875 	struct net_generic *ng;
876 
877 #ifdef CONFIG_NET_NS
878 	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
879 					SMP_CACHE_BYTES,
880 					SLAB_PANIC, NULL);
881 
882 	/* Create workqueue for cleanup */
883 	netns_wq = create_singlethread_workqueue("netns");
884 	if (!netns_wq)
885 		panic("Could not create netns workq");
886 #endif
887 
888 	ng = net_alloc_generic();
889 	if (!ng)
890 		panic("Could not allocate generic netns");
891 
892 	rcu_assign_pointer(init_net.gen, ng);
893 
894 	down_write(&net_sem);
895 	if (setup_net(&init_net, &init_user_ns))
896 		panic("Could not setup the initial network namespace");
897 
898 	init_net_initialized = true;
899 	up_write(&net_sem);
900 
901 	register_pernet_subsys(&net_ns_ops);
902 
903 	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
904 		      RTNL_FLAG_DOIT_UNLOCKED);
905 	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
906 		      RTNL_FLAG_DOIT_UNLOCKED);
907 
908 	return 0;
909 }
910 
911 pure_initcall(net_ns_init);
912 
913 #ifdef CONFIG_NET_NS
914 static int __register_pernet_operations(struct list_head *list,
915 					struct pernet_operations *ops)
916 {
917 	struct net *net;
918 	int error;
919 	LIST_HEAD(net_exit_list);
920 
921 	list_add_tail(&ops->list, list);
922 	if (ops->init || (ops->id && ops->size)) {
923 		for_each_net(net) {
924 			error = ops_init(ops, net);
925 			if (error)
926 				goto out_undo;
927 			list_add_tail(&net->exit_list, &net_exit_list);
928 		}
929 	}
930 	return 0;
931 
932 out_undo:
933 	/* If I have an error cleanup all namespaces I initialized */
934 	list_del(&ops->list);
935 	ops_exit_list(ops, &net_exit_list);
936 	ops_free_list(ops, &net_exit_list);
937 	return error;
938 }
939 
940 static void __unregister_pernet_operations(struct pernet_operations *ops)
941 {
942 	struct net *net;
943 	LIST_HEAD(net_exit_list);
944 
945 	list_del(&ops->list);
946 	for_each_net(net)
947 		list_add_tail(&net->exit_list, &net_exit_list);
948 	ops_exit_list(ops, &net_exit_list);
949 	ops_free_list(ops, &net_exit_list);
950 }
951 
952 #else
953 
954 static int __register_pernet_operations(struct list_head *list,
955 					struct pernet_operations *ops)
956 {
957 	if (!init_net_initialized) {
958 		list_add_tail(&ops->list, list);
959 		return 0;
960 	}
961 
962 	return ops_init(ops, &init_net);
963 }
964 
965 static void __unregister_pernet_operations(struct pernet_operations *ops)
966 {
967 	if (!init_net_initialized) {
968 		list_del(&ops->list);
969 	} else {
970 		LIST_HEAD(net_exit_list);
971 		list_add(&init_net.exit_list, &net_exit_list);
972 		ops_exit_list(ops, &net_exit_list);
973 		ops_free_list(ops, &net_exit_list);
974 	}
975 }
976 
977 #endif /* CONFIG_NET_NS */
978 
979 static DEFINE_IDA(net_generic_ids);
980 
981 static int register_pernet_operations(struct list_head *list,
982 				      struct pernet_operations *ops)
983 {
984 	int error;
985 
986 	if (ops->id) {
987 again:
988 		error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id);
989 		if (error < 0) {
990 			if (error == -EAGAIN) {
991 				ida_pre_get(&net_generic_ids, GFP_KERNEL);
992 				goto again;
993 			}
994 			return error;
995 		}
996 		max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
997 	}
998 	error = __register_pernet_operations(list, ops);
999 	if (error) {
1000 		rcu_barrier();
1001 		if (ops->id)
1002 			ida_remove(&net_generic_ids, *ops->id);
1003 	} else if (!ops->async) {
1004 		pr_info_once("Pernet operations %ps are sync.\n", ops);
1005 		nr_sync_pernet_ops++;
1006 	}
1007 
1008 	return error;
1009 }
1010 
1011 static void unregister_pernet_operations(struct pernet_operations *ops)
1012 {
1013 	if (!ops->async)
1014 		BUG_ON(nr_sync_pernet_ops-- == 0);
1015 	__unregister_pernet_operations(ops);
1016 	rcu_barrier();
1017 	if (ops->id)
1018 		ida_remove(&net_generic_ids, *ops->id);
1019 }
1020 
1021 /**
1022  *      register_pernet_subsys - register a network namespace subsystem
1023  *	@ops:  pernet operations structure for the subsystem
1024  *
1025  *	Register a subsystem which has init and exit functions
1026  *	that are called when network namespaces are created and
1027  *	destroyed respectively.
1028  *
1029  *	When registered all network namespace init functions are
1030  *	called for every existing network namespace.  Allowing kernel
1031  *	modules to have a race free view of the set of network namespaces.
1032  *
1033  *	When a new network namespace is created all of the init
1034  *	methods are called in the order in which they were registered.
1035  *
1036  *	When a network namespace is destroyed all of the exit methods
1037  *	are called in the reverse of the order with which they were
1038  *	registered.
1039  */
1040 int register_pernet_subsys(struct pernet_operations *ops)
1041 {
1042 	int error;
1043 	down_write(&net_sem);
1044 	error =  register_pernet_operations(first_device, ops);
1045 	up_write(&net_sem);
1046 	return error;
1047 }
1048 EXPORT_SYMBOL_GPL(register_pernet_subsys);
1049 
1050 /**
1051  *      unregister_pernet_subsys - unregister a network namespace subsystem
1052  *	@ops: pernet operations structure to manipulate
1053  *
1054  *	Remove the pernet operations structure from the list to be
1055  *	used when network namespaces are created or destroyed.  In
1056  *	addition run the exit method for all existing network
1057  *	namespaces.
1058  */
1059 void unregister_pernet_subsys(struct pernet_operations *ops)
1060 {
1061 	down_write(&net_sem);
1062 	unregister_pernet_operations(ops);
1063 	up_write(&net_sem);
1064 }
1065 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1066 
1067 /**
1068  *      register_pernet_device - register a network namespace device
1069  *	@ops:  pernet operations structure for the subsystem
1070  *
1071  *	Register a device which has init and exit functions
1072  *	that are called when network namespaces are created and
1073  *	destroyed respectively.
1074  *
1075  *	When registered all network namespace init functions are
1076  *	called for every existing network namespace.  Allowing kernel
1077  *	modules to have a race free view of the set of network namespaces.
1078  *
1079  *	When a new network namespace is created all of the init
1080  *	methods are called in the order in which they were registered.
1081  *
1082  *	When a network namespace is destroyed all of the exit methods
1083  *	are called in the reverse of the order with which they were
1084  *	registered.
1085  */
1086 int register_pernet_device(struct pernet_operations *ops)
1087 {
1088 	int error;
1089 	down_write(&net_sem);
1090 	error = register_pernet_operations(&pernet_list, ops);
1091 	if (!error && (first_device == &pernet_list))
1092 		first_device = &ops->list;
1093 	up_write(&net_sem);
1094 	return error;
1095 }
1096 EXPORT_SYMBOL_GPL(register_pernet_device);
1097 
1098 /**
1099  *      unregister_pernet_device - unregister a network namespace netdevice
1100  *	@ops: pernet operations structure to manipulate
1101  *
1102  *	Remove the pernet operations structure from the list to be
1103  *	used when network namespaces are created or destroyed.  In
1104  *	addition run the exit method for all existing network
1105  *	namespaces.
1106  */
1107 void unregister_pernet_device(struct pernet_operations *ops)
1108 {
1109 	down_write(&net_sem);
1110 	if (&ops->list == first_device)
1111 		first_device = first_device->next;
1112 	unregister_pernet_operations(ops);
1113 	up_write(&net_sem);
1114 }
1115 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1116 
1117 #ifdef CONFIG_NET_NS
1118 static struct ns_common *netns_get(struct task_struct *task)
1119 {
1120 	struct net *net = NULL;
1121 	struct nsproxy *nsproxy;
1122 
1123 	task_lock(task);
1124 	nsproxy = task->nsproxy;
1125 	if (nsproxy)
1126 		net = get_net(nsproxy->net_ns);
1127 	task_unlock(task);
1128 
1129 	return net ? &net->ns : NULL;
1130 }
1131 
1132 static inline struct net *to_net_ns(struct ns_common *ns)
1133 {
1134 	return container_of(ns, struct net, ns);
1135 }
1136 
1137 static void netns_put(struct ns_common *ns)
1138 {
1139 	put_net(to_net_ns(ns));
1140 }
1141 
1142 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1143 {
1144 	struct net *net = to_net_ns(ns);
1145 
1146 	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1147 	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1148 		return -EPERM;
1149 
1150 	put_net(nsproxy->net_ns);
1151 	nsproxy->net_ns = get_net(net);
1152 	return 0;
1153 }
1154 
1155 static struct user_namespace *netns_owner(struct ns_common *ns)
1156 {
1157 	return to_net_ns(ns)->user_ns;
1158 }
1159 
1160 const struct proc_ns_operations netns_operations = {
1161 	.name		= "net",
1162 	.type		= CLONE_NEWNET,
1163 	.get		= netns_get,
1164 	.put		= netns_put,
1165 	.install	= netns_install,
1166 	.owner		= netns_owner,
1167 };
1168 #endif
1169