xref: /openbmc/linux/kernel/user_namespace.c (revision 5927145e)
1 /*
2  *  This program is free software; you can redistribute it and/or
3  *  modify it under the terms of the GNU General Public License as
4  *  published by the Free Software Foundation, version 2 of the
5  *  License.
6  */
7 
8 #include <linux/export.h>
9 #include <linux/nsproxy.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/user_namespace.h>
13 #include <linux/proc_ns.h>
14 #include <linux/highuid.h>
15 #include <linux/cred.h>
16 #include <linux/securebits.h>
17 #include <linux/keyctl.h>
18 #include <linux/key-type.h>
19 #include <keys/user-type.h>
20 #include <linux/seq_file.h>
21 #include <linux/fs.h>
22 #include <linux/uaccess.h>
23 #include <linux/ctype.h>
24 #include <linux/projid.h>
25 #include <linux/fs_struct.h>
26 #include <linux/bsearch.h>
27 #include <linux/sort.h>
28 
29 static struct kmem_cache *user_ns_cachep __read_mostly;
30 static DEFINE_MUTEX(userns_state_mutex);
31 
32 static bool new_idmap_permitted(const struct file *file,
33 				struct user_namespace *ns, int cap_setid,
34 				struct uid_gid_map *map);
35 static void free_user_ns(struct work_struct *work);
36 
37 static struct ucounts *inc_user_namespaces(struct user_namespace *ns, kuid_t uid)
38 {
39 	return inc_ucount(ns, uid, UCOUNT_USER_NAMESPACES);
40 }
41 
42 static void dec_user_namespaces(struct ucounts *ucounts)
43 {
44 	return dec_ucount(ucounts, UCOUNT_USER_NAMESPACES);
45 }
46 
47 static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
48 {
49 	/* Start with the same capabilities as init but useless for doing
50 	 * anything as the capabilities are bound to the new user namespace.
51 	 */
52 	cred->securebits = SECUREBITS_DEFAULT;
53 	cred->cap_inheritable = CAP_EMPTY_SET;
54 	cred->cap_permitted = CAP_FULL_SET;
55 	cred->cap_effective = CAP_FULL_SET;
56 	cred->cap_ambient = CAP_EMPTY_SET;
57 	cred->cap_bset = CAP_FULL_SET;
58 #ifdef CONFIG_KEYS
59 	key_put(cred->request_key_auth);
60 	cred->request_key_auth = NULL;
61 #endif
62 	/* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
63 	cred->user_ns = user_ns;
64 }
65 
66 /*
67  * Create a new user namespace, deriving the creator from the user in the
68  * passed credentials, and replacing that user with the new root user for the
69  * new namespace.
70  *
71  * This is called by copy_creds(), which will finish setting the target task's
72  * credentials.
73  */
74 int create_user_ns(struct cred *new)
75 {
76 	struct user_namespace *ns, *parent_ns = new->user_ns;
77 	kuid_t owner = new->euid;
78 	kgid_t group = new->egid;
79 	struct ucounts *ucounts;
80 	int ret, i;
81 
82 	ret = -ENOSPC;
83 	if (parent_ns->level > 32)
84 		goto fail;
85 
86 	ucounts = inc_user_namespaces(parent_ns, owner);
87 	if (!ucounts)
88 		goto fail;
89 
90 	/*
91 	 * Verify that we can not violate the policy of which files
92 	 * may be accessed that is specified by the root directory,
93 	 * by verifing that the root directory is at the root of the
94 	 * mount namespace which allows all files to be accessed.
95 	 */
96 	ret = -EPERM;
97 	if (current_chrooted())
98 		goto fail_dec;
99 
100 	/* The creator needs a mapping in the parent user namespace
101 	 * or else we won't be able to reasonably tell userspace who
102 	 * created a user_namespace.
103 	 */
104 	ret = -EPERM;
105 	if (!kuid_has_mapping(parent_ns, owner) ||
106 	    !kgid_has_mapping(parent_ns, group))
107 		goto fail_dec;
108 
109 	ret = -ENOMEM;
110 	ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
111 	if (!ns)
112 		goto fail_dec;
113 
114 	ret = ns_alloc_inum(&ns->ns);
115 	if (ret)
116 		goto fail_free;
117 	ns->ns.ops = &userns_operations;
118 
119 	atomic_set(&ns->count, 1);
120 	/* Leave the new->user_ns reference with the new user namespace. */
121 	ns->parent = parent_ns;
122 	ns->level = parent_ns->level + 1;
123 	ns->owner = owner;
124 	ns->group = group;
125 	INIT_WORK(&ns->work, free_user_ns);
126 	for (i = 0; i < UCOUNT_COUNTS; i++) {
127 		ns->ucount_max[i] = INT_MAX;
128 	}
129 	ns->ucounts = ucounts;
130 
131 	/* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
132 	mutex_lock(&userns_state_mutex);
133 	ns->flags = parent_ns->flags;
134 	mutex_unlock(&userns_state_mutex);
135 
136 #ifdef CONFIG_PERSISTENT_KEYRINGS
137 	init_rwsem(&ns->persistent_keyring_register_sem);
138 #endif
139 	ret = -ENOMEM;
140 	if (!setup_userns_sysctls(ns))
141 		goto fail_keyring;
142 
143 	set_cred_user_ns(new, ns);
144 	return 0;
145 fail_keyring:
146 #ifdef CONFIG_PERSISTENT_KEYRINGS
147 	key_put(ns->persistent_keyring_register);
148 #endif
149 	ns_free_inum(&ns->ns);
150 fail_free:
151 	kmem_cache_free(user_ns_cachep, ns);
152 fail_dec:
153 	dec_user_namespaces(ucounts);
154 fail:
155 	return ret;
156 }
157 
158 int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
159 {
160 	struct cred *cred;
161 	int err = -ENOMEM;
162 
163 	if (!(unshare_flags & CLONE_NEWUSER))
164 		return 0;
165 
166 	cred = prepare_creds();
167 	if (cred) {
168 		err = create_user_ns(cred);
169 		if (err)
170 			put_cred(cred);
171 		else
172 			*new_cred = cred;
173 	}
174 
175 	return err;
176 }
177 
178 static void free_user_ns(struct work_struct *work)
179 {
180 	struct user_namespace *parent, *ns =
181 		container_of(work, struct user_namespace, work);
182 
183 	do {
184 		struct ucounts *ucounts = ns->ucounts;
185 		parent = ns->parent;
186 		if (ns->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
187 			kfree(ns->gid_map.forward);
188 			kfree(ns->gid_map.reverse);
189 		}
190 		if (ns->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
191 			kfree(ns->uid_map.forward);
192 			kfree(ns->uid_map.reverse);
193 		}
194 		if (ns->projid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
195 			kfree(ns->projid_map.forward);
196 			kfree(ns->projid_map.reverse);
197 		}
198 		retire_userns_sysctls(ns);
199 #ifdef CONFIG_PERSISTENT_KEYRINGS
200 		key_put(ns->persistent_keyring_register);
201 #endif
202 		ns_free_inum(&ns->ns);
203 		kmem_cache_free(user_ns_cachep, ns);
204 		dec_user_namespaces(ucounts);
205 		ns = parent;
206 	} while (atomic_dec_and_test(&parent->count));
207 }
208 
209 void __put_user_ns(struct user_namespace *ns)
210 {
211 	schedule_work(&ns->work);
212 }
213 EXPORT_SYMBOL(__put_user_ns);
214 
215 /**
216  * idmap_key struct holds the information necessary to find an idmapping in a
217  * sorted idmap array. It is passed to cmp_map_id() as first argument.
218  */
219 struct idmap_key {
220 	bool map_up; /* true  -> id from kid; false -> kid from id */
221 	u32 id; /* id to find */
222 	u32 count; /* == 0 unless used with map_id_range_down() */
223 };
224 
225 /**
226  * cmp_map_id - Function to be passed to bsearch() to find the requested
227  * idmapping. Expects struct idmap_key to be passed via @k.
228  */
229 static int cmp_map_id(const void *k, const void *e)
230 {
231 	u32 first, last, id2;
232 	const struct idmap_key *key = k;
233 	const struct uid_gid_extent *el = e;
234 
235 	id2 = key->id + key->count - 1;
236 
237 	/* handle map_id_{down,up}() */
238 	if (key->map_up)
239 		first = el->lower_first;
240 	else
241 		first = el->first;
242 
243 	last = first + el->count - 1;
244 
245 	if (key->id >= first && key->id <= last &&
246 	    (id2 >= first && id2 <= last))
247 		return 0;
248 
249 	if (key->id < first || id2 < first)
250 		return -1;
251 
252 	return 1;
253 }
254 
255 /**
256  * map_id_range_down_max - Find idmap via binary search in ordered idmap array.
257  * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
258  */
259 static struct uid_gid_extent *
260 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
261 {
262 	struct idmap_key key;
263 
264 	key.map_up = false;
265 	key.count = count;
266 	key.id = id;
267 
268 	return bsearch(&key, map->forward, extents,
269 		       sizeof(struct uid_gid_extent), cmp_map_id);
270 }
271 
272 /**
273  * map_id_range_down_base - Find idmap via binary search in static extent array.
274  * Can only be called if number of mappings is equal or less than
275  * UID_GID_MAP_MAX_BASE_EXTENTS.
276  */
277 static struct uid_gid_extent *
278 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
279 {
280 	unsigned idx;
281 	u32 first, last, id2;
282 
283 	id2 = id + count - 1;
284 
285 	/* Find the matching extent */
286 	for (idx = 0; idx < extents; idx++) {
287 		first = map->extent[idx].first;
288 		last = first + map->extent[idx].count - 1;
289 		if (id >= first && id <= last &&
290 		    (id2 >= first && id2 <= last))
291 			return &map->extent[idx];
292 	}
293 	return NULL;
294 }
295 
296 static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
297 {
298 	struct uid_gid_extent *extent;
299 	unsigned extents = map->nr_extents;
300 	smp_rmb();
301 
302 	if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
303 		extent = map_id_range_down_base(extents, map, id, count);
304 	else
305 		extent = map_id_range_down_max(extents, map, id, count);
306 
307 	/* Map the id or note failure */
308 	if (extent)
309 		id = (id - extent->first) + extent->lower_first;
310 	else
311 		id = (u32) -1;
312 
313 	return id;
314 }
315 
316 static u32 map_id_down(struct uid_gid_map *map, u32 id)
317 {
318 	return map_id_range_down(map, id, 1);
319 }
320 
321 /**
322  * map_id_up_base - Find idmap via binary search in static extent array.
323  * Can only be called if number of mappings is equal or less than
324  * UID_GID_MAP_MAX_BASE_EXTENTS.
325  */
326 static struct uid_gid_extent *
327 map_id_up_base(unsigned extents, struct uid_gid_map *map, u32 id)
328 {
329 	unsigned idx;
330 	u32 first, last;
331 
332 	/* Find the matching extent */
333 	for (idx = 0; idx < extents; idx++) {
334 		first = map->extent[idx].lower_first;
335 		last = first + map->extent[idx].count - 1;
336 		if (id >= first && id <= last)
337 			return &map->extent[idx];
338 	}
339 	return NULL;
340 }
341 
342 /**
343  * map_id_up_max - Find idmap via binary search in ordered idmap array.
344  * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
345  */
346 static struct uid_gid_extent *
347 map_id_up_max(unsigned extents, struct uid_gid_map *map, u32 id)
348 {
349 	struct idmap_key key;
350 
351 	key.map_up = true;
352 	key.count = 1;
353 	key.id = id;
354 
355 	return bsearch(&key, map->reverse, extents,
356 		       sizeof(struct uid_gid_extent), cmp_map_id);
357 }
358 
359 static u32 map_id_up(struct uid_gid_map *map, u32 id)
360 {
361 	struct uid_gid_extent *extent;
362 	unsigned extents = map->nr_extents;
363 	smp_rmb();
364 
365 	if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
366 		extent = map_id_up_base(extents, map, id);
367 	else
368 		extent = map_id_up_max(extents, map, id);
369 
370 	/* Map the id or note failure */
371 	if (extent)
372 		id = (id - extent->lower_first) + extent->first;
373 	else
374 		id = (u32) -1;
375 
376 	return id;
377 }
378 
379 /**
380  *	make_kuid - Map a user-namespace uid pair into a kuid.
381  *	@ns:  User namespace that the uid is in
382  *	@uid: User identifier
383  *
384  *	Maps a user-namespace uid pair into a kernel internal kuid,
385  *	and returns that kuid.
386  *
387  *	When there is no mapping defined for the user-namespace uid
388  *	pair INVALID_UID is returned.  Callers are expected to test
389  *	for and handle INVALID_UID being returned.  INVALID_UID
390  *	may be tested for using uid_valid().
391  */
392 kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
393 {
394 	/* Map the uid to a global kernel uid */
395 	return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
396 }
397 EXPORT_SYMBOL(make_kuid);
398 
399 /**
400  *	from_kuid - Create a uid from a kuid user-namespace pair.
401  *	@targ: The user namespace we want a uid in.
402  *	@kuid: The kernel internal uid to start with.
403  *
404  *	Map @kuid into the user-namespace specified by @targ and
405  *	return the resulting uid.
406  *
407  *	There is always a mapping into the initial user_namespace.
408  *
409  *	If @kuid has no mapping in @targ (uid_t)-1 is returned.
410  */
411 uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
412 {
413 	/* Map the uid from a global kernel uid */
414 	return map_id_up(&targ->uid_map, __kuid_val(kuid));
415 }
416 EXPORT_SYMBOL(from_kuid);
417 
418 /**
419  *	from_kuid_munged - Create a uid from a kuid user-namespace pair.
420  *	@targ: The user namespace we want a uid in.
421  *	@kuid: The kernel internal uid to start with.
422  *
423  *	Map @kuid into the user-namespace specified by @targ and
424  *	return the resulting uid.
425  *
426  *	There is always a mapping into the initial user_namespace.
427  *
428  *	Unlike from_kuid from_kuid_munged never fails and always
429  *	returns a valid uid.  This makes from_kuid_munged appropriate
430  *	for use in syscalls like stat and getuid where failing the
431  *	system call and failing to provide a valid uid are not an
432  *	options.
433  *
434  *	If @kuid has no mapping in @targ overflowuid is returned.
435  */
436 uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
437 {
438 	uid_t uid;
439 	uid = from_kuid(targ, kuid);
440 
441 	if (uid == (uid_t) -1)
442 		uid = overflowuid;
443 	return uid;
444 }
445 EXPORT_SYMBOL(from_kuid_munged);
446 
447 /**
448  *	make_kgid - Map a user-namespace gid pair into a kgid.
449  *	@ns:  User namespace that the gid is in
450  *	@gid: group identifier
451  *
452  *	Maps a user-namespace gid pair into a kernel internal kgid,
453  *	and returns that kgid.
454  *
455  *	When there is no mapping defined for the user-namespace gid
456  *	pair INVALID_GID is returned.  Callers are expected to test
457  *	for and handle INVALID_GID being returned.  INVALID_GID may be
458  *	tested for using gid_valid().
459  */
460 kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
461 {
462 	/* Map the gid to a global kernel gid */
463 	return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
464 }
465 EXPORT_SYMBOL(make_kgid);
466 
467 /**
468  *	from_kgid - Create a gid from a kgid user-namespace pair.
469  *	@targ: The user namespace we want a gid in.
470  *	@kgid: The kernel internal gid to start with.
471  *
472  *	Map @kgid into the user-namespace specified by @targ and
473  *	return the resulting gid.
474  *
475  *	There is always a mapping into the initial user_namespace.
476  *
477  *	If @kgid has no mapping in @targ (gid_t)-1 is returned.
478  */
479 gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
480 {
481 	/* Map the gid from a global kernel gid */
482 	return map_id_up(&targ->gid_map, __kgid_val(kgid));
483 }
484 EXPORT_SYMBOL(from_kgid);
485 
486 /**
487  *	from_kgid_munged - Create a gid from a kgid user-namespace pair.
488  *	@targ: The user namespace we want a gid in.
489  *	@kgid: The kernel internal gid to start with.
490  *
491  *	Map @kgid into the user-namespace specified by @targ and
492  *	return the resulting gid.
493  *
494  *	There is always a mapping into the initial user_namespace.
495  *
496  *	Unlike from_kgid from_kgid_munged never fails and always
497  *	returns a valid gid.  This makes from_kgid_munged appropriate
498  *	for use in syscalls like stat and getgid where failing the
499  *	system call and failing to provide a valid gid are not options.
500  *
501  *	If @kgid has no mapping in @targ overflowgid is returned.
502  */
503 gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
504 {
505 	gid_t gid;
506 	gid = from_kgid(targ, kgid);
507 
508 	if (gid == (gid_t) -1)
509 		gid = overflowgid;
510 	return gid;
511 }
512 EXPORT_SYMBOL(from_kgid_munged);
513 
514 /**
515  *	make_kprojid - Map a user-namespace projid pair into a kprojid.
516  *	@ns:  User namespace that the projid is in
517  *	@projid: Project identifier
518  *
519  *	Maps a user-namespace uid pair into a kernel internal kuid,
520  *	and returns that kuid.
521  *
522  *	When there is no mapping defined for the user-namespace projid
523  *	pair INVALID_PROJID is returned.  Callers are expected to test
524  *	for and handle handle INVALID_PROJID being returned.  INVALID_PROJID
525  *	may be tested for using projid_valid().
526  */
527 kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
528 {
529 	/* Map the uid to a global kernel uid */
530 	return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
531 }
532 EXPORT_SYMBOL(make_kprojid);
533 
534 /**
535  *	from_kprojid - Create a projid from a kprojid user-namespace pair.
536  *	@targ: The user namespace we want a projid in.
537  *	@kprojid: The kernel internal project identifier to start with.
538  *
539  *	Map @kprojid into the user-namespace specified by @targ and
540  *	return the resulting projid.
541  *
542  *	There is always a mapping into the initial user_namespace.
543  *
544  *	If @kprojid has no mapping in @targ (projid_t)-1 is returned.
545  */
546 projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
547 {
548 	/* Map the uid from a global kernel uid */
549 	return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
550 }
551 EXPORT_SYMBOL(from_kprojid);
552 
553 /**
554  *	from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
555  *	@targ: The user namespace we want a projid in.
556  *	@kprojid: The kernel internal projid to start with.
557  *
558  *	Map @kprojid into the user-namespace specified by @targ and
559  *	return the resulting projid.
560  *
561  *	There is always a mapping into the initial user_namespace.
562  *
563  *	Unlike from_kprojid from_kprojid_munged never fails and always
564  *	returns a valid projid.  This makes from_kprojid_munged
565  *	appropriate for use in syscalls like stat and where
566  *	failing the system call and failing to provide a valid projid are
567  *	not an options.
568  *
569  *	If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
570  */
571 projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
572 {
573 	projid_t projid;
574 	projid = from_kprojid(targ, kprojid);
575 
576 	if (projid == (projid_t) -1)
577 		projid = OVERFLOW_PROJID;
578 	return projid;
579 }
580 EXPORT_SYMBOL(from_kprojid_munged);
581 
582 
583 static int uid_m_show(struct seq_file *seq, void *v)
584 {
585 	struct user_namespace *ns = seq->private;
586 	struct uid_gid_extent *extent = v;
587 	struct user_namespace *lower_ns;
588 	uid_t lower;
589 
590 	lower_ns = seq_user_ns(seq);
591 	if ((lower_ns == ns) && lower_ns->parent)
592 		lower_ns = lower_ns->parent;
593 
594 	lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
595 
596 	seq_printf(seq, "%10u %10u %10u\n",
597 		extent->first,
598 		lower,
599 		extent->count);
600 
601 	return 0;
602 }
603 
604 static int gid_m_show(struct seq_file *seq, void *v)
605 {
606 	struct user_namespace *ns = seq->private;
607 	struct uid_gid_extent *extent = v;
608 	struct user_namespace *lower_ns;
609 	gid_t lower;
610 
611 	lower_ns = seq_user_ns(seq);
612 	if ((lower_ns == ns) && lower_ns->parent)
613 		lower_ns = lower_ns->parent;
614 
615 	lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
616 
617 	seq_printf(seq, "%10u %10u %10u\n",
618 		extent->first,
619 		lower,
620 		extent->count);
621 
622 	return 0;
623 }
624 
625 static int projid_m_show(struct seq_file *seq, void *v)
626 {
627 	struct user_namespace *ns = seq->private;
628 	struct uid_gid_extent *extent = v;
629 	struct user_namespace *lower_ns;
630 	projid_t lower;
631 
632 	lower_ns = seq_user_ns(seq);
633 	if ((lower_ns == ns) && lower_ns->parent)
634 		lower_ns = lower_ns->parent;
635 
636 	lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
637 
638 	seq_printf(seq, "%10u %10u %10u\n",
639 		extent->first,
640 		lower,
641 		extent->count);
642 
643 	return 0;
644 }
645 
646 static void *m_start(struct seq_file *seq, loff_t *ppos,
647 		     struct uid_gid_map *map)
648 {
649 	loff_t pos = *ppos;
650 	unsigned extents = map->nr_extents;
651 	smp_rmb();
652 
653 	if (pos >= extents)
654 		return NULL;
655 
656 	if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
657 		return &map->extent[pos];
658 
659 	return &map->forward[pos];
660 }
661 
662 static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
663 {
664 	struct user_namespace *ns = seq->private;
665 
666 	return m_start(seq, ppos, &ns->uid_map);
667 }
668 
669 static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
670 {
671 	struct user_namespace *ns = seq->private;
672 
673 	return m_start(seq, ppos, &ns->gid_map);
674 }
675 
676 static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
677 {
678 	struct user_namespace *ns = seq->private;
679 
680 	return m_start(seq, ppos, &ns->projid_map);
681 }
682 
683 static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
684 {
685 	(*pos)++;
686 	return seq->op->start(seq, pos);
687 }
688 
689 static void m_stop(struct seq_file *seq, void *v)
690 {
691 	return;
692 }
693 
694 const struct seq_operations proc_uid_seq_operations = {
695 	.start = uid_m_start,
696 	.stop = m_stop,
697 	.next = m_next,
698 	.show = uid_m_show,
699 };
700 
701 const struct seq_operations proc_gid_seq_operations = {
702 	.start = gid_m_start,
703 	.stop = m_stop,
704 	.next = m_next,
705 	.show = gid_m_show,
706 };
707 
708 const struct seq_operations proc_projid_seq_operations = {
709 	.start = projid_m_start,
710 	.stop = m_stop,
711 	.next = m_next,
712 	.show = projid_m_show,
713 };
714 
715 static bool mappings_overlap(struct uid_gid_map *new_map,
716 			     struct uid_gid_extent *extent)
717 {
718 	u32 upper_first, lower_first, upper_last, lower_last;
719 	unsigned idx;
720 
721 	upper_first = extent->first;
722 	lower_first = extent->lower_first;
723 	upper_last = upper_first + extent->count - 1;
724 	lower_last = lower_first + extent->count - 1;
725 
726 	for (idx = 0; idx < new_map->nr_extents; idx++) {
727 		u32 prev_upper_first, prev_lower_first;
728 		u32 prev_upper_last, prev_lower_last;
729 		struct uid_gid_extent *prev;
730 
731 		if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
732 			prev = &new_map->extent[idx];
733 		else
734 			prev = &new_map->forward[idx];
735 
736 		prev_upper_first = prev->first;
737 		prev_lower_first = prev->lower_first;
738 		prev_upper_last = prev_upper_first + prev->count - 1;
739 		prev_lower_last = prev_lower_first + prev->count - 1;
740 
741 		/* Does the upper range intersect a previous extent? */
742 		if ((prev_upper_first <= upper_last) &&
743 		    (prev_upper_last >= upper_first))
744 			return true;
745 
746 		/* Does the lower range intersect a previous extent? */
747 		if ((prev_lower_first <= lower_last) &&
748 		    (prev_lower_last >= lower_first))
749 			return true;
750 	}
751 	return false;
752 }
753 
754 /**
755  * insert_extent - Safely insert a new idmap extent into struct uid_gid_map.
756  * Takes care to allocate a 4K block of memory if the number of mappings exceeds
757  * UID_GID_MAP_MAX_BASE_EXTENTS.
758  */
759 static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
760 {
761 	struct uid_gid_extent *dest;
762 
763 	if (map->nr_extents == UID_GID_MAP_MAX_BASE_EXTENTS) {
764 		struct uid_gid_extent *forward;
765 
766 		/* Allocate memory for 340 mappings. */
767 		forward = kmalloc(sizeof(struct uid_gid_extent) *
768 				 UID_GID_MAP_MAX_EXTENTS, GFP_KERNEL);
769 		if (!forward)
770 			return -ENOMEM;
771 
772 		/* Copy over memory. Only set up memory for the forward pointer.
773 		 * Defer the memory setup for the reverse pointer.
774 		 */
775 		memcpy(forward, map->extent,
776 		       map->nr_extents * sizeof(map->extent[0]));
777 
778 		map->forward = forward;
779 		map->reverse = NULL;
780 	}
781 
782 	if (map->nr_extents < UID_GID_MAP_MAX_BASE_EXTENTS)
783 		dest = &map->extent[map->nr_extents];
784 	else
785 		dest = &map->forward[map->nr_extents];
786 
787 	*dest = *extent;
788 	map->nr_extents++;
789 	return 0;
790 }
791 
792 /* cmp function to sort() forward mappings */
793 static int cmp_extents_forward(const void *a, const void *b)
794 {
795 	const struct uid_gid_extent *e1 = a;
796 	const struct uid_gid_extent *e2 = b;
797 
798 	if (e1->first < e2->first)
799 		return -1;
800 
801 	if (e1->first > e2->first)
802 		return 1;
803 
804 	return 0;
805 }
806 
807 /* cmp function to sort() reverse mappings */
808 static int cmp_extents_reverse(const void *a, const void *b)
809 {
810 	const struct uid_gid_extent *e1 = a;
811 	const struct uid_gid_extent *e2 = b;
812 
813 	if (e1->lower_first < e2->lower_first)
814 		return -1;
815 
816 	if (e1->lower_first > e2->lower_first)
817 		return 1;
818 
819 	return 0;
820 }
821 
822 /**
823  * sort_idmaps - Sorts an array of idmap entries.
824  * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
825  */
826 static int sort_idmaps(struct uid_gid_map *map)
827 {
828 	if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
829 		return 0;
830 
831 	/* Sort forward array. */
832 	sort(map->forward, map->nr_extents, sizeof(struct uid_gid_extent),
833 	     cmp_extents_forward, NULL);
834 
835 	/* Only copy the memory from forward we actually need. */
836 	map->reverse = kmemdup(map->forward,
837 			       map->nr_extents * sizeof(struct uid_gid_extent),
838 			       GFP_KERNEL);
839 	if (!map->reverse)
840 		return -ENOMEM;
841 
842 	/* Sort reverse array. */
843 	sort(map->reverse, map->nr_extents, sizeof(struct uid_gid_extent),
844 	     cmp_extents_reverse, NULL);
845 
846 	return 0;
847 }
848 
849 static ssize_t map_write(struct file *file, const char __user *buf,
850 			 size_t count, loff_t *ppos,
851 			 int cap_setid,
852 			 struct uid_gid_map *map,
853 			 struct uid_gid_map *parent_map)
854 {
855 	struct seq_file *seq = file->private_data;
856 	struct user_namespace *ns = seq->private;
857 	struct uid_gid_map new_map;
858 	unsigned idx;
859 	struct uid_gid_extent extent;
860 	char *kbuf = NULL, *pos, *next_line;
861 	ssize_t ret = -EINVAL;
862 
863 	/*
864 	 * The userns_state_mutex serializes all writes to any given map.
865 	 *
866 	 * Any map is only ever written once.
867 	 *
868 	 * An id map fits within 1 cache line on most architectures.
869 	 *
870 	 * On read nothing needs to be done unless you are on an
871 	 * architecture with a crazy cache coherency model like alpha.
872 	 *
873 	 * There is a one time data dependency between reading the
874 	 * count of the extents and the values of the extents.  The
875 	 * desired behavior is to see the values of the extents that
876 	 * were written before the count of the extents.
877 	 *
878 	 * To achieve this smp_wmb() is used on guarantee the write
879 	 * order and smp_rmb() is guaranteed that we don't have crazy
880 	 * architectures returning stale data.
881 	 */
882 	mutex_lock(&userns_state_mutex);
883 
884 	memset(&new_map, 0, sizeof(struct uid_gid_map));
885 
886 	ret = -EPERM;
887 	/* Only allow one successful write to the map */
888 	if (map->nr_extents != 0)
889 		goto out;
890 
891 	/*
892 	 * Adjusting namespace settings requires capabilities on the target.
893 	 */
894 	if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
895 		goto out;
896 
897 	/* Only allow < page size writes at the beginning of the file */
898 	ret = -EINVAL;
899 	if ((*ppos != 0) || (count >= PAGE_SIZE))
900 		goto out;
901 
902 	/* Slurp in the user data */
903 	kbuf = memdup_user_nul(buf, count);
904 	if (IS_ERR(kbuf)) {
905 		ret = PTR_ERR(kbuf);
906 		kbuf = NULL;
907 		goto out;
908 	}
909 
910 	/* Parse the user data */
911 	ret = -EINVAL;
912 	pos = kbuf;
913 	for (; pos; pos = next_line) {
914 
915 		/* Find the end of line and ensure I don't look past it */
916 		next_line = strchr(pos, '\n');
917 		if (next_line) {
918 			*next_line = '\0';
919 			next_line++;
920 			if (*next_line == '\0')
921 				next_line = NULL;
922 		}
923 
924 		pos = skip_spaces(pos);
925 		extent.first = simple_strtoul(pos, &pos, 10);
926 		if (!isspace(*pos))
927 			goto out;
928 
929 		pos = skip_spaces(pos);
930 		extent.lower_first = simple_strtoul(pos, &pos, 10);
931 		if (!isspace(*pos))
932 			goto out;
933 
934 		pos = skip_spaces(pos);
935 		extent.count = simple_strtoul(pos, &pos, 10);
936 		if (*pos && !isspace(*pos))
937 			goto out;
938 
939 		/* Verify there is not trailing junk on the line */
940 		pos = skip_spaces(pos);
941 		if (*pos != '\0')
942 			goto out;
943 
944 		/* Verify we have been given valid starting values */
945 		if ((extent.first == (u32) -1) ||
946 		    (extent.lower_first == (u32) -1))
947 			goto out;
948 
949 		/* Verify count is not zero and does not cause the
950 		 * extent to wrap
951 		 */
952 		if ((extent.first + extent.count) <= extent.first)
953 			goto out;
954 		if ((extent.lower_first + extent.count) <=
955 		     extent.lower_first)
956 			goto out;
957 
958 		/* Do the ranges in extent overlap any previous extents? */
959 		if (mappings_overlap(&new_map, &extent))
960 			goto out;
961 
962 		if ((new_map.nr_extents + 1) == UID_GID_MAP_MAX_EXTENTS &&
963 		    (next_line != NULL))
964 			goto out;
965 
966 		ret = insert_extent(&new_map, &extent);
967 		if (ret < 0)
968 			goto out;
969 		ret = -EINVAL;
970 	}
971 	/* Be very certaint the new map actually exists */
972 	if (new_map.nr_extents == 0)
973 		goto out;
974 
975 	ret = -EPERM;
976 	/* Validate the user is allowed to use user id's mapped to. */
977 	if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
978 		goto out;
979 
980 	ret = sort_idmaps(&new_map);
981 	if (ret < 0)
982 		goto out;
983 
984 	ret = -EPERM;
985 	/* Map the lower ids from the parent user namespace to the
986 	 * kernel global id space.
987 	 */
988 	for (idx = 0; idx < new_map.nr_extents; idx++) {
989 		struct uid_gid_extent *e;
990 		u32 lower_first;
991 
992 		if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
993 			e = &new_map.extent[idx];
994 		else
995 			e = &new_map.forward[idx];
996 
997 		lower_first = map_id_range_down(parent_map,
998 						e->lower_first,
999 						e->count);
1000 
1001 		/* Fail if we can not map the specified extent to
1002 		 * the kernel global id space.
1003 		 */
1004 		if (lower_first == (u32) -1)
1005 			goto out;
1006 
1007 		e->lower_first = lower_first;
1008 	}
1009 
1010 	/* Install the map */
1011 	if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
1012 		memcpy(map->extent, new_map.extent,
1013 		       new_map.nr_extents * sizeof(new_map.extent[0]));
1014 	} else {
1015 		map->forward = new_map.forward;
1016 		map->reverse = new_map.reverse;
1017 	}
1018 	smp_wmb();
1019 	map->nr_extents = new_map.nr_extents;
1020 
1021 	*ppos = count;
1022 	ret = count;
1023 out:
1024 	if (ret < 0 && new_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
1025 		kfree(new_map.forward);
1026 		kfree(new_map.reverse);
1027 		map->forward = NULL;
1028 		map->reverse = NULL;
1029 		map->nr_extents = 0;
1030 	}
1031 
1032 	mutex_unlock(&userns_state_mutex);
1033 	kfree(kbuf);
1034 	return ret;
1035 }
1036 
1037 ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
1038 			   size_t size, loff_t *ppos)
1039 {
1040 	struct seq_file *seq = file->private_data;
1041 	struct user_namespace *ns = seq->private;
1042 	struct user_namespace *seq_ns = seq_user_ns(seq);
1043 
1044 	if (!ns->parent)
1045 		return -EPERM;
1046 
1047 	if ((seq_ns != ns) && (seq_ns != ns->parent))
1048 		return -EPERM;
1049 
1050 	return map_write(file, buf, size, ppos, CAP_SETUID,
1051 			 &ns->uid_map, &ns->parent->uid_map);
1052 }
1053 
1054 ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
1055 			   size_t size, loff_t *ppos)
1056 {
1057 	struct seq_file *seq = file->private_data;
1058 	struct user_namespace *ns = seq->private;
1059 	struct user_namespace *seq_ns = seq_user_ns(seq);
1060 
1061 	if (!ns->parent)
1062 		return -EPERM;
1063 
1064 	if ((seq_ns != ns) && (seq_ns != ns->parent))
1065 		return -EPERM;
1066 
1067 	return map_write(file, buf, size, ppos, CAP_SETGID,
1068 			 &ns->gid_map, &ns->parent->gid_map);
1069 }
1070 
1071 ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
1072 			      size_t size, loff_t *ppos)
1073 {
1074 	struct seq_file *seq = file->private_data;
1075 	struct user_namespace *ns = seq->private;
1076 	struct user_namespace *seq_ns = seq_user_ns(seq);
1077 
1078 	if (!ns->parent)
1079 		return -EPERM;
1080 
1081 	if ((seq_ns != ns) && (seq_ns != ns->parent))
1082 		return -EPERM;
1083 
1084 	/* Anyone can set any valid project id no capability needed */
1085 	return map_write(file, buf, size, ppos, -1,
1086 			 &ns->projid_map, &ns->parent->projid_map);
1087 }
1088 
1089 static bool new_idmap_permitted(const struct file *file,
1090 				struct user_namespace *ns, int cap_setid,
1091 				struct uid_gid_map *new_map)
1092 {
1093 	const struct cred *cred = file->f_cred;
1094 	/* Don't allow mappings that would allow anything that wouldn't
1095 	 * be allowed without the establishment of unprivileged mappings.
1096 	 */
1097 	if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
1098 	    uid_eq(ns->owner, cred->euid)) {
1099 		u32 id = new_map->extent[0].lower_first;
1100 		if (cap_setid == CAP_SETUID) {
1101 			kuid_t uid = make_kuid(ns->parent, id);
1102 			if (uid_eq(uid, cred->euid))
1103 				return true;
1104 		} else if (cap_setid == CAP_SETGID) {
1105 			kgid_t gid = make_kgid(ns->parent, id);
1106 			if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
1107 			    gid_eq(gid, cred->egid))
1108 				return true;
1109 		}
1110 	}
1111 
1112 	/* Allow anyone to set a mapping that doesn't require privilege */
1113 	if (!cap_valid(cap_setid))
1114 		return true;
1115 
1116 	/* Allow the specified ids if we have the appropriate capability
1117 	 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
1118 	 * And the opener of the id file also had the approprpiate capability.
1119 	 */
1120 	if (ns_capable(ns->parent, cap_setid) &&
1121 	    file_ns_capable(file, ns->parent, cap_setid))
1122 		return true;
1123 
1124 	return false;
1125 }
1126 
1127 int proc_setgroups_show(struct seq_file *seq, void *v)
1128 {
1129 	struct user_namespace *ns = seq->private;
1130 	unsigned long userns_flags = READ_ONCE(ns->flags);
1131 
1132 	seq_printf(seq, "%s\n",
1133 		   (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
1134 		   "allow" : "deny");
1135 	return 0;
1136 }
1137 
1138 ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
1139 			     size_t count, loff_t *ppos)
1140 {
1141 	struct seq_file *seq = file->private_data;
1142 	struct user_namespace *ns = seq->private;
1143 	char kbuf[8], *pos;
1144 	bool setgroups_allowed;
1145 	ssize_t ret;
1146 
1147 	/* Only allow a very narrow range of strings to be written */
1148 	ret = -EINVAL;
1149 	if ((*ppos != 0) || (count >= sizeof(kbuf)))
1150 		goto out;
1151 
1152 	/* What was written? */
1153 	ret = -EFAULT;
1154 	if (copy_from_user(kbuf, buf, count))
1155 		goto out;
1156 	kbuf[count] = '\0';
1157 	pos = kbuf;
1158 
1159 	/* What is being requested? */
1160 	ret = -EINVAL;
1161 	if (strncmp(pos, "allow", 5) == 0) {
1162 		pos += 5;
1163 		setgroups_allowed = true;
1164 	}
1165 	else if (strncmp(pos, "deny", 4) == 0) {
1166 		pos += 4;
1167 		setgroups_allowed = false;
1168 	}
1169 	else
1170 		goto out;
1171 
1172 	/* Verify there is not trailing junk on the line */
1173 	pos = skip_spaces(pos);
1174 	if (*pos != '\0')
1175 		goto out;
1176 
1177 	ret = -EPERM;
1178 	mutex_lock(&userns_state_mutex);
1179 	if (setgroups_allowed) {
1180 		/* Enabling setgroups after setgroups has been disabled
1181 		 * is not allowed.
1182 		 */
1183 		if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
1184 			goto out_unlock;
1185 	} else {
1186 		/* Permanently disabling setgroups after setgroups has
1187 		 * been enabled by writing the gid_map is not allowed.
1188 		 */
1189 		if (ns->gid_map.nr_extents != 0)
1190 			goto out_unlock;
1191 		ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
1192 	}
1193 	mutex_unlock(&userns_state_mutex);
1194 
1195 	/* Report a successful write */
1196 	*ppos = count;
1197 	ret = count;
1198 out:
1199 	return ret;
1200 out_unlock:
1201 	mutex_unlock(&userns_state_mutex);
1202 	goto out;
1203 }
1204 
1205 bool userns_may_setgroups(const struct user_namespace *ns)
1206 {
1207 	bool allowed;
1208 
1209 	mutex_lock(&userns_state_mutex);
1210 	/* It is not safe to use setgroups until a gid mapping in
1211 	 * the user namespace has been established.
1212 	 */
1213 	allowed = ns->gid_map.nr_extents != 0;
1214 	/* Is setgroups allowed? */
1215 	allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
1216 	mutex_unlock(&userns_state_mutex);
1217 
1218 	return allowed;
1219 }
1220 
1221 /*
1222  * Returns true if @child is the same namespace or a descendant of
1223  * @ancestor.
1224  */
1225 bool in_userns(const struct user_namespace *ancestor,
1226 	       const struct user_namespace *child)
1227 {
1228 	const struct user_namespace *ns;
1229 	for (ns = child; ns->level > ancestor->level; ns = ns->parent)
1230 		;
1231 	return (ns == ancestor);
1232 }
1233 
1234 bool current_in_userns(const struct user_namespace *target_ns)
1235 {
1236 	return in_userns(target_ns, current_user_ns());
1237 }
1238 
1239 static inline struct user_namespace *to_user_ns(struct ns_common *ns)
1240 {
1241 	return container_of(ns, struct user_namespace, ns);
1242 }
1243 
1244 static struct ns_common *userns_get(struct task_struct *task)
1245 {
1246 	struct user_namespace *user_ns;
1247 
1248 	rcu_read_lock();
1249 	user_ns = get_user_ns(__task_cred(task)->user_ns);
1250 	rcu_read_unlock();
1251 
1252 	return user_ns ? &user_ns->ns : NULL;
1253 }
1254 
1255 static void userns_put(struct ns_common *ns)
1256 {
1257 	put_user_ns(to_user_ns(ns));
1258 }
1259 
1260 static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1261 {
1262 	struct user_namespace *user_ns = to_user_ns(ns);
1263 	struct cred *cred;
1264 
1265 	/* Don't allow gaining capabilities by reentering
1266 	 * the same user namespace.
1267 	 */
1268 	if (user_ns == current_user_ns())
1269 		return -EINVAL;
1270 
1271 	/* Tasks that share a thread group must share a user namespace */
1272 	if (!thread_group_empty(current))
1273 		return -EINVAL;
1274 
1275 	if (current->fs->users != 1)
1276 		return -EINVAL;
1277 
1278 	if (!ns_capable(user_ns, CAP_SYS_ADMIN))
1279 		return -EPERM;
1280 
1281 	cred = prepare_creds();
1282 	if (!cred)
1283 		return -ENOMEM;
1284 
1285 	put_user_ns(cred->user_ns);
1286 	set_cred_user_ns(cred, get_user_ns(user_ns));
1287 
1288 	return commit_creds(cred);
1289 }
1290 
1291 struct ns_common *ns_get_owner(struct ns_common *ns)
1292 {
1293 	struct user_namespace *my_user_ns = current_user_ns();
1294 	struct user_namespace *owner, *p;
1295 
1296 	/* See if the owner is in the current user namespace */
1297 	owner = p = ns->ops->owner(ns);
1298 	for (;;) {
1299 		if (!p)
1300 			return ERR_PTR(-EPERM);
1301 		if (p == my_user_ns)
1302 			break;
1303 		p = p->parent;
1304 	}
1305 
1306 	return &get_user_ns(owner)->ns;
1307 }
1308 
1309 static struct user_namespace *userns_owner(struct ns_common *ns)
1310 {
1311 	return to_user_ns(ns)->parent;
1312 }
1313 
1314 const struct proc_ns_operations userns_operations = {
1315 	.name		= "user",
1316 	.type		= CLONE_NEWUSER,
1317 	.get		= userns_get,
1318 	.put		= userns_put,
1319 	.install	= userns_install,
1320 	.owner		= userns_owner,
1321 	.get_parent	= ns_get_owner,
1322 };
1323 
1324 static __init int user_namespaces_init(void)
1325 {
1326 	user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
1327 	return 0;
1328 }
1329 subsys_initcall(user_namespaces_init);
1330