xref: /openbmc/linux/net/core/net_namespace.c (revision 2b035b39)
1 #include <linux/workqueue.h>
2 #include <linux/rtnetlink.h>
3 #include <linux/cache.h>
4 #include <linux/slab.h>
5 #include <linux/list.h>
6 #include <linux/delay.h>
7 #include <linux/sched.h>
8 #include <linux/idr.h>
9 #include <linux/rculist.h>
10 #include <linux/nsproxy.h>
11 #include <linux/netdevice.h>
12 #include <net/net_namespace.h>
13 #include <net/netns/generic.h>
14 #include <net/rtnetlink.h>
15 
16 /*
17  *	Our network namespace constructor/destructor lists
18  */
19 
20 static LIST_HEAD(pernet_list);
21 static struct list_head *first_device = &pernet_list;
22 static DEFINE_MUTEX(net_mutex);
23 
24 LIST_HEAD(net_namespace_list);
25 EXPORT_SYMBOL_GPL(net_namespace_list);
26 
27 struct net init_net;
28 EXPORT_SYMBOL(init_net);
29 
30 #define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */
31 
32 static void unregister_netdevices(struct net *net, struct list_head *list)
33 {
34 	struct net_device *dev;
35 	/* At exit all network devices most be removed from a network
36 	 * namespace.  Do this in the reverse order of registeration.
37 	 */
38 	for_each_netdev_reverse(net, dev) {
39 		if (dev->rtnl_link_ops)
40 			dev->rtnl_link_ops->dellink(dev, list);
41 		else
42 			unregister_netdevice_queue(dev, list);
43 	}
44 }
45 
46 /*
47  * setup_net runs the initializers for the network namespace object.
48  */
49 static __net_init int setup_net(struct net *net)
50 {
51 	/* Must be called with net_mutex held */
52 	struct pernet_operations *ops;
53 	int error = 0;
54 
55 	atomic_set(&net->count, 1);
56 
57 #ifdef NETNS_REFCNT_DEBUG
58 	atomic_set(&net->use_count, 0);
59 #endif
60 
61 	list_for_each_entry(ops, &pernet_list, list) {
62 		if (ops->init) {
63 			error = ops->init(net);
64 			if (error < 0)
65 				goto out_undo;
66 		}
67 	}
68 out:
69 	return error;
70 
71 out_undo:
72 	/* Walk through the list backwards calling the exit functions
73 	 * for the pernet modules whose init functions did not fail.
74 	 */
75 	list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
76 		if (ops->exit)
77 			ops->exit(net);
78 		if (&ops->list == first_device) {
79 			LIST_HEAD(dev_kill_list);
80 			rtnl_lock();
81 			unregister_netdevices(net, &dev_kill_list);
82 			unregister_netdevice_many(&dev_kill_list);
83 			rtnl_unlock();
84 		}
85 	}
86 
87 	rcu_barrier();
88 	goto out;
89 }
90 
91 static struct net_generic *net_alloc_generic(void)
92 {
93 	struct net_generic *ng;
94 	size_t generic_size = sizeof(struct net_generic) +
95 		INITIAL_NET_GEN_PTRS * sizeof(void *);
96 
97 	ng = kzalloc(generic_size, GFP_KERNEL);
98 	if (ng)
99 		ng->len = INITIAL_NET_GEN_PTRS;
100 
101 	return ng;
102 }
103 
104 #ifdef CONFIG_NET_NS
105 static struct kmem_cache *net_cachep;
106 static struct workqueue_struct *netns_wq;
107 
108 static struct net *net_alloc(void)
109 {
110 	struct net *net = NULL;
111 	struct net_generic *ng;
112 
113 	ng = net_alloc_generic();
114 	if (!ng)
115 		goto out;
116 
117 	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
118 	if (!net)
119 		goto out_free;
120 
121 	rcu_assign_pointer(net->gen, ng);
122 out:
123 	return net;
124 
125 out_free:
126 	kfree(ng);
127 	goto out;
128 }
129 
130 static void net_free(struct net *net)
131 {
132 #ifdef NETNS_REFCNT_DEBUG
133 	if (unlikely(atomic_read(&net->use_count) != 0)) {
134 		printk(KERN_EMERG "network namespace not free! Usage: %d\n",
135 			atomic_read(&net->use_count));
136 		return;
137 	}
138 #endif
139 	kfree(net->gen);
140 	kmem_cache_free(net_cachep, net);
141 }
142 
143 static struct net *net_create(void)
144 {
145 	struct net *net;
146 	int rv;
147 
148 	net = net_alloc();
149 	if (!net)
150 		return ERR_PTR(-ENOMEM);
151 	mutex_lock(&net_mutex);
152 	rv = setup_net(net);
153 	if (rv == 0) {
154 		rtnl_lock();
155 		list_add_tail_rcu(&net->list, &net_namespace_list);
156 		rtnl_unlock();
157 	}
158 	mutex_unlock(&net_mutex);
159 	if (rv < 0) {
160 		net_free(net);
161 		return ERR_PTR(rv);
162 	}
163 	return net;
164 }
165 
166 struct net *copy_net_ns(unsigned long flags, struct net *old_net)
167 {
168 	if (!(flags & CLONE_NEWNET))
169 		return get_net(old_net);
170 	return net_create();
171 }
172 
173 static DEFINE_SPINLOCK(cleanup_list_lock);
174 static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */
175 
176 static void cleanup_net(struct work_struct *work)
177 {
178 	struct pernet_operations *ops;
179 	struct net *net, *tmp;
180 	LIST_HEAD(net_kill_list);
181 
182 	/* Atomically snapshot the list of namespaces to cleanup */
183 	spin_lock_irq(&cleanup_list_lock);
184 	list_replace_init(&cleanup_list, &net_kill_list);
185 	spin_unlock_irq(&cleanup_list_lock);
186 
187 	mutex_lock(&net_mutex);
188 
189 	/* Don't let anyone else find us. */
190 	rtnl_lock();
191 	list_for_each_entry(net, &net_kill_list, cleanup_list)
192 		list_del_rcu(&net->list);
193 	rtnl_unlock();
194 
195 	/*
196 	 * Another CPU might be rcu-iterating the list, wait for it.
197 	 * This needs to be before calling the exit() notifiers, so
198 	 * the rcu_barrier() below isn't sufficient alone.
199 	 */
200 	synchronize_rcu();
201 
202 	/* Run all of the network namespace exit methods */
203 	list_for_each_entry_reverse(ops, &pernet_list, list) {
204 		if (ops->exit) {
205 			list_for_each_entry(net, &net_kill_list, cleanup_list)
206 				ops->exit(net);
207 		}
208 		if (&ops->list == first_device) {
209 			LIST_HEAD(dev_kill_list);
210 			rtnl_lock();
211 			list_for_each_entry(net, &net_kill_list, cleanup_list)
212 				unregister_netdevices(net, &dev_kill_list);
213 			unregister_netdevice_many(&dev_kill_list);
214 			rtnl_unlock();
215 		}
216 	}
217 
218 	mutex_unlock(&net_mutex);
219 
220 	/* Ensure there are no outstanding rcu callbacks using this
221 	 * network namespace.
222 	 */
223 	rcu_barrier();
224 
225 	/* Finally it is safe to free my network namespace structure */
226 	list_for_each_entry_safe(net, tmp, &net_kill_list, cleanup_list) {
227 		list_del_init(&net->cleanup_list);
228 		net_free(net);
229 	}
230 }
231 static DECLARE_WORK(net_cleanup_work, cleanup_net);
232 
233 void __put_net(struct net *net)
234 {
235 	/* Cleanup the network namespace in process context */
236 	unsigned long flags;
237 
238 	spin_lock_irqsave(&cleanup_list_lock, flags);
239 	list_add(&net->cleanup_list, &cleanup_list);
240 	spin_unlock_irqrestore(&cleanup_list_lock, flags);
241 
242 	queue_work(netns_wq, &net_cleanup_work);
243 }
244 EXPORT_SYMBOL_GPL(__put_net);
245 
246 #else
247 struct net *copy_net_ns(unsigned long flags, struct net *old_net)
248 {
249 	if (flags & CLONE_NEWNET)
250 		return ERR_PTR(-EINVAL);
251 	return old_net;
252 }
253 #endif
254 
255 struct net *get_net_ns_by_pid(pid_t pid)
256 {
257 	struct task_struct *tsk;
258 	struct net *net;
259 
260 	/* Lookup the network namespace */
261 	net = ERR_PTR(-ESRCH);
262 	rcu_read_lock();
263 	tsk = find_task_by_vpid(pid);
264 	if (tsk) {
265 		struct nsproxy *nsproxy;
266 		nsproxy = task_nsproxy(tsk);
267 		if (nsproxy)
268 			net = get_net(nsproxy->net_ns);
269 	}
270 	rcu_read_unlock();
271 	return net;
272 }
273 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
274 
275 static int __init net_ns_init(void)
276 {
277 	struct net_generic *ng;
278 
279 #ifdef CONFIG_NET_NS
280 	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
281 					SMP_CACHE_BYTES,
282 					SLAB_PANIC, NULL);
283 
284 	/* Create workqueue for cleanup */
285 	netns_wq = create_singlethread_workqueue("netns");
286 	if (!netns_wq)
287 		panic("Could not create netns workq");
288 #endif
289 
290 	ng = net_alloc_generic();
291 	if (!ng)
292 		panic("Could not allocate generic netns");
293 
294 	rcu_assign_pointer(init_net.gen, ng);
295 
296 	mutex_lock(&net_mutex);
297 	if (setup_net(&init_net))
298 		panic("Could not setup the initial network namespace");
299 
300 	rtnl_lock();
301 	list_add_tail_rcu(&init_net.list, &net_namespace_list);
302 	rtnl_unlock();
303 
304 	mutex_unlock(&net_mutex);
305 
306 	return 0;
307 }
308 
309 pure_initcall(net_ns_init);
310 
311 #ifdef CONFIG_NET_NS
312 static int register_pernet_operations(struct list_head *list,
313 				      struct pernet_operations *ops)
314 {
315 	struct net *net, *undo_net;
316 	int error;
317 
318 	list_add_tail(&ops->list, list);
319 	if (ops->init) {
320 		for_each_net(net) {
321 			error = ops->init(net);
322 			if (error)
323 				goto out_undo;
324 		}
325 	}
326 	return 0;
327 
328 out_undo:
329 	/* If I have an error cleanup all namespaces I initialized */
330 	list_del(&ops->list);
331 	if (ops->exit) {
332 		for_each_net(undo_net) {
333 			if (net_eq(undo_net, net))
334 				goto undone;
335 			ops->exit(undo_net);
336 		}
337 	}
338 undone:
339 	return error;
340 }
341 
342 static void unregister_pernet_operations(struct pernet_operations *ops)
343 {
344 	struct net *net;
345 
346 	list_del(&ops->list);
347 	if (ops->exit)
348 		for_each_net(net)
349 			ops->exit(net);
350 }
351 
352 #else
353 
354 static int register_pernet_operations(struct list_head *list,
355 				      struct pernet_operations *ops)
356 {
357 	if (ops->init == NULL)
358 		return 0;
359 	return ops->init(&init_net);
360 }
361 
362 static void unregister_pernet_operations(struct pernet_operations *ops)
363 {
364 	if (ops->exit)
365 		ops->exit(&init_net);
366 }
367 #endif
368 
369 static DEFINE_IDA(net_generic_ids);
370 
371 /**
372  *      register_pernet_subsys - register a network namespace subsystem
373  *	@ops:  pernet operations structure for the subsystem
374  *
375  *	Register a subsystem which has init and exit functions
376  *	that are called when network namespaces are created and
377  *	destroyed respectively.
378  *
379  *	When registered all network namespace init functions are
380  *	called for every existing network namespace.  Allowing kernel
381  *	modules to have a race free view of the set of network namespaces.
382  *
383  *	When a new network namespace is created all of the init
384  *	methods are called in the order in which they were registered.
385  *
386  *	When a network namespace is destroyed all of the exit methods
387  *	are called in the reverse of the order with which they were
388  *	registered.
389  */
390 int register_pernet_subsys(struct pernet_operations *ops)
391 {
392 	int error;
393 	mutex_lock(&net_mutex);
394 	error =  register_pernet_operations(first_device, ops);
395 	mutex_unlock(&net_mutex);
396 	return error;
397 }
398 EXPORT_SYMBOL_GPL(register_pernet_subsys);
399 
400 /**
401  *      unregister_pernet_subsys - unregister a network namespace subsystem
402  *	@ops: pernet operations structure to manipulate
403  *
404  *	Remove the pernet operations structure from the list to be
405  *	used when network namespaces are created or destroyed.  In
406  *	addition run the exit method for all existing network
407  *	namespaces.
408  */
409 void unregister_pernet_subsys(struct pernet_operations *module)
410 {
411 	mutex_lock(&net_mutex);
412 	unregister_pernet_operations(module);
413 	mutex_unlock(&net_mutex);
414 }
415 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
416 
417 int register_pernet_gen_subsys(int *id, struct pernet_operations *ops)
418 {
419 	int rv;
420 
421 	mutex_lock(&net_mutex);
422 again:
423 	rv = ida_get_new_above(&net_generic_ids, 1, id);
424 	if (rv < 0) {
425 		if (rv == -EAGAIN) {
426 			ida_pre_get(&net_generic_ids, GFP_KERNEL);
427 			goto again;
428 		}
429 		goto out;
430 	}
431 	rv = register_pernet_operations(first_device, ops);
432 	if (rv < 0)
433 		ida_remove(&net_generic_ids, *id);
434 out:
435 	mutex_unlock(&net_mutex);
436 	return rv;
437 }
438 EXPORT_SYMBOL_GPL(register_pernet_gen_subsys);
439 
440 void unregister_pernet_gen_subsys(int id, struct pernet_operations *ops)
441 {
442 	mutex_lock(&net_mutex);
443 	unregister_pernet_operations(ops);
444 	ida_remove(&net_generic_ids, id);
445 	mutex_unlock(&net_mutex);
446 }
447 EXPORT_SYMBOL_GPL(unregister_pernet_gen_subsys);
448 
449 /**
450  *      register_pernet_device - register a network namespace device
451  *	@ops:  pernet operations structure for the subsystem
452  *
453  *	Register a device which has init and exit functions
454  *	that are called when network namespaces are created and
455  *	destroyed respectively.
456  *
457  *	When registered all network namespace init functions are
458  *	called for every existing network namespace.  Allowing kernel
459  *	modules to have a race free view of the set of network namespaces.
460  *
461  *	When a new network namespace is created all of the init
462  *	methods are called in the order in which they were registered.
463  *
464  *	When a network namespace is destroyed all of the exit methods
465  *	are called in the reverse of the order with which they were
466  *	registered.
467  */
468 int register_pernet_device(struct pernet_operations *ops)
469 {
470 	int error;
471 	mutex_lock(&net_mutex);
472 	error = register_pernet_operations(&pernet_list, ops);
473 	if (!error && (first_device == &pernet_list))
474 		first_device = &ops->list;
475 	mutex_unlock(&net_mutex);
476 	return error;
477 }
478 EXPORT_SYMBOL_GPL(register_pernet_device);
479 
480 int register_pernet_gen_device(int *id, struct pernet_operations *ops)
481 {
482 	int error;
483 	mutex_lock(&net_mutex);
484 again:
485 	error = ida_get_new_above(&net_generic_ids, 1, id);
486 	if (error) {
487 		if (error == -EAGAIN) {
488 			ida_pre_get(&net_generic_ids, GFP_KERNEL);
489 			goto again;
490 		}
491 		goto out;
492 	}
493 	error = register_pernet_operations(&pernet_list, ops);
494 	if (error)
495 		ida_remove(&net_generic_ids, *id);
496 	else if (first_device == &pernet_list)
497 		first_device = &ops->list;
498 out:
499 	mutex_unlock(&net_mutex);
500 	return error;
501 }
502 EXPORT_SYMBOL_GPL(register_pernet_gen_device);
503 
504 /**
505  *      unregister_pernet_device - unregister a network namespace netdevice
506  *	@ops: pernet operations structure to manipulate
507  *
508  *	Remove the pernet operations structure from the list to be
509  *	used when network namespaces are created or destroyed.  In
510  *	addition run the exit method for all existing network
511  *	namespaces.
512  */
513 void unregister_pernet_device(struct pernet_operations *ops)
514 {
515 	mutex_lock(&net_mutex);
516 	if (&ops->list == first_device)
517 		first_device = first_device->next;
518 	unregister_pernet_operations(ops);
519 	mutex_unlock(&net_mutex);
520 }
521 EXPORT_SYMBOL_GPL(unregister_pernet_device);
522 
523 void unregister_pernet_gen_device(int id, struct pernet_operations *ops)
524 {
525 	mutex_lock(&net_mutex);
526 	if (&ops->list == first_device)
527 		first_device = first_device->next;
528 	unregister_pernet_operations(ops);
529 	ida_remove(&net_generic_ids, id);
530 	mutex_unlock(&net_mutex);
531 }
532 EXPORT_SYMBOL_GPL(unregister_pernet_gen_device);
533 
534 static void net_generic_release(struct rcu_head *rcu)
535 {
536 	struct net_generic *ng;
537 
538 	ng = container_of(rcu, struct net_generic, rcu);
539 	kfree(ng);
540 }
541 
542 int net_assign_generic(struct net *net, int id, void *data)
543 {
544 	struct net_generic *ng, *old_ng;
545 
546 	BUG_ON(!mutex_is_locked(&net_mutex));
547 	BUG_ON(id == 0);
548 
549 	ng = old_ng = net->gen;
550 	if (old_ng->len >= id)
551 		goto assign;
552 
553 	ng = kzalloc(sizeof(struct net_generic) +
554 			id * sizeof(void *), GFP_KERNEL);
555 	if (ng == NULL)
556 		return -ENOMEM;
557 
558 	/*
559 	 * Some synchronisation notes:
560 	 *
561 	 * The net_generic explores the net->gen array inside rcu
562 	 * read section. Besides once set the net->gen->ptr[x]
563 	 * pointer never changes (see rules in netns/generic.h).
564 	 *
565 	 * That said, we simply duplicate this array and schedule
566 	 * the old copy for kfree after a grace period.
567 	 */
568 
569 	ng->len = id;
570 	memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
571 
572 	rcu_assign_pointer(net->gen, ng);
573 	call_rcu(&old_ng->rcu, net_generic_release);
574 assign:
575 	ng->ptr[id - 1] = data;
576 	return 0;
577 }
578 EXPORT_SYMBOL_GPL(net_assign_generic);
579