xref: /openbmc/linux/kernel/user_namespace.c (revision 55fd7e02)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/export.h>
4 #include <linux/nsproxy.h>
5 #include <linux/slab.h>
6 #include <linux/sched/signal.h>
7 #include <linux/user_namespace.h>
8 #include <linux/proc_ns.h>
9 #include <linux/highuid.h>
10 #include <linux/cred.h>
11 #include <linux/securebits.h>
12 #include <linux/keyctl.h>
13 #include <linux/key-type.h>
14 #include <keys/user-type.h>
15 #include <linux/seq_file.h>
16 #include <linux/fs.h>
17 #include <linux/uaccess.h>
18 #include <linux/ctype.h>
19 #include <linux/projid.h>
20 #include <linux/fs_struct.h>
21 #include <linux/bsearch.h>
22 #include <linux/sort.h>
23 
24 static struct kmem_cache *user_ns_cachep __read_mostly;
25 static DEFINE_MUTEX(userns_state_mutex);
26 
27 static bool new_idmap_permitted(const struct file *file,
28 				struct user_namespace *ns, int cap_setid,
29 				struct uid_gid_map *map);
30 static void free_user_ns(struct work_struct *work);
31 
32 static struct ucounts *inc_user_namespaces(struct user_namespace *ns, kuid_t uid)
33 {
34 	return inc_ucount(ns, uid, UCOUNT_USER_NAMESPACES);
35 }
36 
37 static void dec_user_namespaces(struct ucounts *ucounts)
38 {
39 	return dec_ucount(ucounts, UCOUNT_USER_NAMESPACES);
40 }
41 
42 static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
43 {
44 	/* Start with the same capabilities as init but useless for doing
45 	 * anything as the capabilities are bound to the new user namespace.
46 	 */
47 	cred->securebits = SECUREBITS_DEFAULT;
48 	cred->cap_inheritable = CAP_EMPTY_SET;
49 	cred->cap_permitted = CAP_FULL_SET;
50 	cred->cap_effective = CAP_FULL_SET;
51 	cred->cap_ambient = CAP_EMPTY_SET;
52 	cred->cap_bset = CAP_FULL_SET;
53 #ifdef CONFIG_KEYS
54 	key_put(cred->request_key_auth);
55 	cred->request_key_auth = NULL;
56 #endif
57 	/* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
58 	cred->user_ns = user_ns;
59 }
60 
61 /*
62  * Create a new user namespace, deriving the creator from the user in the
63  * passed credentials, and replacing that user with the new root user for the
64  * new namespace.
65  *
66  * This is called by copy_creds(), which will finish setting the target task's
67  * credentials.
68  */
69 int create_user_ns(struct cred *new)
70 {
71 	struct user_namespace *ns, *parent_ns = new->user_ns;
72 	kuid_t owner = new->euid;
73 	kgid_t group = new->egid;
74 	struct ucounts *ucounts;
75 	int ret, i;
76 
77 	ret = -ENOSPC;
78 	if (parent_ns->level > 32)
79 		goto fail;
80 
81 	ucounts = inc_user_namespaces(parent_ns, owner);
82 	if (!ucounts)
83 		goto fail;
84 
85 	/*
86 	 * Verify that we can not violate the policy of which files
87 	 * may be accessed that is specified by the root directory,
88 	 * by verifing that the root directory is at the root of the
89 	 * mount namespace which allows all files to be accessed.
90 	 */
91 	ret = -EPERM;
92 	if (current_chrooted())
93 		goto fail_dec;
94 
95 	/* The creator needs a mapping in the parent user namespace
96 	 * or else we won't be able to reasonably tell userspace who
97 	 * created a user_namespace.
98 	 */
99 	ret = -EPERM;
100 	if (!kuid_has_mapping(parent_ns, owner) ||
101 	    !kgid_has_mapping(parent_ns, group))
102 		goto fail_dec;
103 
104 	ret = -ENOMEM;
105 	ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
106 	if (!ns)
107 		goto fail_dec;
108 
109 	ret = ns_alloc_inum(&ns->ns);
110 	if (ret)
111 		goto fail_free;
112 	ns->ns.ops = &userns_operations;
113 
114 	atomic_set(&ns->count, 1);
115 	/* Leave the new->user_ns reference with the new user namespace. */
116 	ns->parent = parent_ns;
117 	ns->level = parent_ns->level + 1;
118 	ns->owner = owner;
119 	ns->group = group;
120 	INIT_WORK(&ns->work, free_user_ns);
121 	for (i = 0; i < UCOUNT_COUNTS; i++) {
122 		ns->ucount_max[i] = INT_MAX;
123 	}
124 	ns->ucounts = ucounts;
125 
126 	/* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
127 	mutex_lock(&userns_state_mutex);
128 	ns->flags = parent_ns->flags;
129 	mutex_unlock(&userns_state_mutex);
130 
131 #ifdef CONFIG_KEYS
132 	INIT_LIST_HEAD(&ns->keyring_name_list);
133 	init_rwsem(&ns->keyring_sem);
134 #endif
135 	ret = -ENOMEM;
136 	if (!setup_userns_sysctls(ns))
137 		goto fail_keyring;
138 
139 	set_cred_user_ns(new, ns);
140 	return 0;
141 fail_keyring:
142 #ifdef CONFIG_PERSISTENT_KEYRINGS
143 	key_put(ns->persistent_keyring_register);
144 #endif
145 	ns_free_inum(&ns->ns);
146 fail_free:
147 	kmem_cache_free(user_ns_cachep, ns);
148 fail_dec:
149 	dec_user_namespaces(ucounts);
150 fail:
151 	return ret;
152 }
153 
154 int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
155 {
156 	struct cred *cred;
157 	int err = -ENOMEM;
158 
159 	if (!(unshare_flags & CLONE_NEWUSER))
160 		return 0;
161 
162 	cred = prepare_creds();
163 	if (cred) {
164 		err = create_user_ns(cred);
165 		if (err)
166 			put_cred(cred);
167 		else
168 			*new_cred = cred;
169 	}
170 
171 	return err;
172 }
173 
174 static void free_user_ns(struct work_struct *work)
175 {
176 	struct user_namespace *parent, *ns =
177 		container_of(work, struct user_namespace, work);
178 
179 	do {
180 		struct ucounts *ucounts = ns->ucounts;
181 		parent = ns->parent;
182 		if (ns->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
183 			kfree(ns->gid_map.forward);
184 			kfree(ns->gid_map.reverse);
185 		}
186 		if (ns->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
187 			kfree(ns->uid_map.forward);
188 			kfree(ns->uid_map.reverse);
189 		}
190 		if (ns->projid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
191 			kfree(ns->projid_map.forward);
192 			kfree(ns->projid_map.reverse);
193 		}
194 		retire_userns_sysctls(ns);
195 		key_free_user_ns(ns);
196 		ns_free_inum(&ns->ns);
197 		kmem_cache_free(user_ns_cachep, ns);
198 		dec_user_namespaces(ucounts);
199 		ns = parent;
200 	} while (atomic_dec_and_test(&parent->count));
201 }
202 
203 void __put_user_ns(struct user_namespace *ns)
204 {
205 	schedule_work(&ns->work);
206 }
207 EXPORT_SYMBOL(__put_user_ns);
208 
209 /**
210  * idmap_key struct holds the information necessary to find an idmapping in a
211  * sorted idmap array. It is passed to cmp_map_id() as first argument.
212  */
213 struct idmap_key {
214 	bool map_up; /* true  -> id from kid; false -> kid from id */
215 	u32 id; /* id to find */
216 	u32 count; /* == 0 unless used with map_id_range_down() */
217 };
218 
219 /**
220  * cmp_map_id - Function to be passed to bsearch() to find the requested
221  * idmapping. Expects struct idmap_key to be passed via @k.
222  */
223 static int cmp_map_id(const void *k, const void *e)
224 {
225 	u32 first, last, id2;
226 	const struct idmap_key *key = k;
227 	const struct uid_gid_extent *el = e;
228 
229 	id2 = key->id + key->count - 1;
230 
231 	/* handle map_id_{down,up}() */
232 	if (key->map_up)
233 		first = el->lower_first;
234 	else
235 		first = el->first;
236 
237 	last = first + el->count - 1;
238 
239 	if (key->id >= first && key->id <= last &&
240 	    (id2 >= first && id2 <= last))
241 		return 0;
242 
243 	if (key->id < first || id2 < first)
244 		return -1;
245 
246 	return 1;
247 }
248 
249 /**
250  * map_id_range_down_max - Find idmap via binary search in ordered idmap array.
251  * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
252  */
253 static struct uid_gid_extent *
254 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
255 {
256 	struct idmap_key key;
257 
258 	key.map_up = false;
259 	key.count = count;
260 	key.id = id;
261 
262 	return bsearch(&key, map->forward, extents,
263 		       sizeof(struct uid_gid_extent), cmp_map_id);
264 }
265 
266 /**
267  * map_id_range_down_base - Find idmap via binary search in static extent array.
268  * Can only be called if number of mappings is equal or less than
269  * UID_GID_MAP_MAX_BASE_EXTENTS.
270  */
271 static struct uid_gid_extent *
272 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
273 {
274 	unsigned idx;
275 	u32 first, last, id2;
276 
277 	id2 = id + count - 1;
278 
279 	/* Find the matching extent */
280 	for (idx = 0; idx < extents; idx++) {
281 		first = map->extent[idx].first;
282 		last = first + map->extent[idx].count - 1;
283 		if (id >= first && id <= last &&
284 		    (id2 >= first && id2 <= last))
285 			return &map->extent[idx];
286 	}
287 	return NULL;
288 }
289 
290 static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
291 {
292 	struct uid_gid_extent *extent;
293 	unsigned extents = map->nr_extents;
294 	smp_rmb();
295 
296 	if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
297 		extent = map_id_range_down_base(extents, map, id, count);
298 	else
299 		extent = map_id_range_down_max(extents, map, id, count);
300 
301 	/* Map the id or note failure */
302 	if (extent)
303 		id = (id - extent->first) + extent->lower_first;
304 	else
305 		id = (u32) -1;
306 
307 	return id;
308 }
309 
310 static u32 map_id_down(struct uid_gid_map *map, u32 id)
311 {
312 	return map_id_range_down(map, id, 1);
313 }
314 
315 /**
316  * map_id_up_base - Find idmap via binary search in static extent array.
317  * Can only be called if number of mappings is equal or less than
318  * UID_GID_MAP_MAX_BASE_EXTENTS.
319  */
320 static struct uid_gid_extent *
321 map_id_up_base(unsigned extents, struct uid_gid_map *map, u32 id)
322 {
323 	unsigned idx;
324 	u32 first, last;
325 
326 	/* Find the matching extent */
327 	for (idx = 0; idx < extents; idx++) {
328 		first = map->extent[idx].lower_first;
329 		last = first + map->extent[idx].count - 1;
330 		if (id >= first && id <= last)
331 			return &map->extent[idx];
332 	}
333 	return NULL;
334 }
335 
336 /**
337  * map_id_up_max - Find idmap via binary search in ordered idmap array.
338  * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
339  */
340 static struct uid_gid_extent *
341 map_id_up_max(unsigned extents, struct uid_gid_map *map, u32 id)
342 {
343 	struct idmap_key key;
344 
345 	key.map_up = true;
346 	key.count = 1;
347 	key.id = id;
348 
349 	return bsearch(&key, map->reverse, extents,
350 		       sizeof(struct uid_gid_extent), cmp_map_id);
351 }
352 
353 static u32 map_id_up(struct uid_gid_map *map, u32 id)
354 {
355 	struct uid_gid_extent *extent;
356 	unsigned extents = map->nr_extents;
357 	smp_rmb();
358 
359 	if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
360 		extent = map_id_up_base(extents, map, id);
361 	else
362 		extent = map_id_up_max(extents, map, id);
363 
364 	/* Map the id or note failure */
365 	if (extent)
366 		id = (id - extent->lower_first) + extent->first;
367 	else
368 		id = (u32) -1;
369 
370 	return id;
371 }
372 
373 /**
374  *	make_kuid - Map a user-namespace uid pair into a kuid.
375  *	@ns:  User namespace that the uid is in
376  *	@uid: User identifier
377  *
378  *	Maps a user-namespace uid pair into a kernel internal kuid,
379  *	and returns that kuid.
380  *
381  *	When there is no mapping defined for the user-namespace uid
382  *	pair INVALID_UID is returned.  Callers are expected to test
383  *	for and handle INVALID_UID being returned.  INVALID_UID
384  *	may be tested for using uid_valid().
385  */
386 kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
387 {
388 	/* Map the uid to a global kernel uid */
389 	return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
390 }
391 EXPORT_SYMBOL(make_kuid);
392 
393 /**
394  *	from_kuid - Create a uid from a kuid user-namespace pair.
395  *	@targ: The user namespace we want a uid in.
396  *	@kuid: The kernel internal uid to start with.
397  *
398  *	Map @kuid into the user-namespace specified by @targ and
399  *	return the resulting uid.
400  *
401  *	There is always a mapping into the initial user_namespace.
402  *
403  *	If @kuid has no mapping in @targ (uid_t)-1 is returned.
404  */
405 uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
406 {
407 	/* Map the uid from a global kernel uid */
408 	return map_id_up(&targ->uid_map, __kuid_val(kuid));
409 }
410 EXPORT_SYMBOL(from_kuid);
411 
412 /**
413  *	from_kuid_munged - Create a uid from a kuid user-namespace pair.
414  *	@targ: The user namespace we want a uid in.
415  *	@kuid: The kernel internal uid to start with.
416  *
417  *	Map @kuid into the user-namespace specified by @targ and
418  *	return the resulting uid.
419  *
420  *	There is always a mapping into the initial user_namespace.
421  *
422  *	Unlike from_kuid from_kuid_munged never fails and always
423  *	returns a valid uid.  This makes from_kuid_munged appropriate
424  *	for use in syscalls like stat and getuid where failing the
425  *	system call and failing to provide a valid uid are not an
426  *	options.
427  *
428  *	If @kuid has no mapping in @targ overflowuid is returned.
429  */
430 uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
431 {
432 	uid_t uid;
433 	uid = from_kuid(targ, kuid);
434 
435 	if (uid == (uid_t) -1)
436 		uid = overflowuid;
437 	return uid;
438 }
439 EXPORT_SYMBOL(from_kuid_munged);
440 
441 /**
442  *	make_kgid - Map a user-namespace gid pair into a kgid.
443  *	@ns:  User namespace that the gid is in
444  *	@gid: group identifier
445  *
446  *	Maps a user-namespace gid pair into a kernel internal kgid,
447  *	and returns that kgid.
448  *
449  *	When there is no mapping defined for the user-namespace gid
450  *	pair INVALID_GID is returned.  Callers are expected to test
451  *	for and handle INVALID_GID being returned.  INVALID_GID may be
452  *	tested for using gid_valid().
453  */
454 kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
455 {
456 	/* Map the gid to a global kernel gid */
457 	return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
458 }
459 EXPORT_SYMBOL(make_kgid);
460 
461 /**
462  *	from_kgid - Create a gid from a kgid user-namespace pair.
463  *	@targ: The user namespace we want a gid in.
464  *	@kgid: The kernel internal gid to start with.
465  *
466  *	Map @kgid into the user-namespace specified by @targ and
467  *	return the resulting gid.
468  *
469  *	There is always a mapping into the initial user_namespace.
470  *
471  *	If @kgid has no mapping in @targ (gid_t)-1 is returned.
472  */
473 gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
474 {
475 	/* Map the gid from a global kernel gid */
476 	return map_id_up(&targ->gid_map, __kgid_val(kgid));
477 }
478 EXPORT_SYMBOL(from_kgid);
479 
480 /**
481  *	from_kgid_munged - Create a gid from a kgid user-namespace pair.
482  *	@targ: The user namespace we want a gid in.
483  *	@kgid: The kernel internal gid to start with.
484  *
485  *	Map @kgid into the user-namespace specified by @targ and
486  *	return the resulting gid.
487  *
488  *	There is always a mapping into the initial user_namespace.
489  *
490  *	Unlike from_kgid from_kgid_munged never fails and always
491  *	returns a valid gid.  This makes from_kgid_munged appropriate
492  *	for use in syscalls like stat and getgid where failing the
493  *	system call and failing to provide a valid gid are not options.
494  *
495  *	If @kgid has no mapping in @targ overflowgid is returned.
496  */
497 gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
498 {
499 	gid_t gid;
500 	gid = from_kgid(targ, kgid);
501 
502 	if (gid == (gid_t) -1)
503 		gid = overflowgid;
504 	return gid;
505 }
506 EXPORT_SYMBOL(from_kgid_munged);
507 
508 /**
509  *	make_kprojid - Map a user-namespace projid pair into a kprojid.
510  *	@ns:  User namespace that the projid is in
511  *	@projid: Project identifier
512  *
513  *	Maps a user-namespace uid pair into a kernel internal kuid,
514  *	and returns that kuid.
515  *
516  *	When there is no mapping defined for the user-namespace projid
517  *	pair INVALID_PROJID is returned.  Callers are expected to test
518  *	for and handle handle INVALID_PROJID being returned.  INVALID_PROJID
519  *	may be tested for using projid_valid().
520  */
521 kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
522 {
523 	/* Map the uid to a global kernel uid */
524 	return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
525 }
526 EXPORT_SYMBOL(make_kprojid);
527 
528 /**
529  *	from_kprojid - Create a projid from a kprojid user-namespace pair.
530  *	@targ: The user namespace we want a projid in.
531  *	@kprojid: The kernel internal project identifier to start with.
532  *
533  *	Map @kprojid into the user-namespace specified by @targ and
534  *	return the resulting projid.
535  *
536  *	There is always a mapping into the initial user_namespace.
537  *
538  *	If @kprojid has no mapping in @targ (projid_t)-1 is returned.
539  */
540 projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
541 {
542 	/* Map the uid from a global kernel uid */
543 	return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
544 }
545 EXPORT_SYMBOL(from_kprojid);
546 
547 /**
548  *	from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
549  *	@targ: The user namespace we want a projid in.
550  *	@kprojid: The kernel internal projid to start with.
551  *
552  *	Map @kprojid into the user-namespace specified by @targ and
553  *	return the resulting projid.
554  *
555  *	There is always a mapping into the initial user_namespace.
556  *
557  *	Unlike from_kprojid from_kprojid_munged never fails and always
558  *	returns a valid projid.  This makes from_kprojid_munged
559  *	appropriate for use in syscalls like stat and where
560  *	failing the system call and failing to provide a valid projid are
561  *	not an options.
562  *
563  *	If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
564  */
565 projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
566 {
567 	projid_t projid;
568 	projid = from_kprojid(targ, kprojid);
569 
570 	if (projid == (projid_t) -1)
571 		projid = OVERFLOW_PROJID;
572 	return projid;
573 }
574 EXPORT_SYMBOL(from_kprojid_munged);
575 
576 
577 static int uid_m_show(struct seq_file *seq, void *v)
578 {
579 	struct user_namespace *ns = seq->private;
580 	struct uid_gid_extent *extent = v;
581 	struct user_namespace *lower_ns;
582 	uid_t lower;
583 
584 	lower_ns = seq_user_ns(seq);
585 	if ((lower_ns == ns) && lower_ns->parent)
586 		lower_ns = lower_ns->parent;
587 
588 	lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
589 
590 	seq_printf(seq, "%10u %10u %10u\n",
591 		extent->first,
592 		lower,
593 		extent->count);
594 
595 	return 0;
596 }
597 
598 static int gid_m_show(struct seq_file *seq, void *v)
599 {
600 	struct user_namespace *ns = seq->private;
601 	struct uid_gid_extent *extent = v;
602 	struct user_namespace *lower_ns;
603 	gid_t lower;
604 
605 	lower_ns = seq_user_ns(seq);
606 	if ((lower_ns == ns) && lower_ns->parent)
607 		lower_ns = lower_ns->parent;
608 
609 	lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
610 
611 	seq_printf(seq, "%10u %10u %10u\n",
612 		extent->first,
613 		lower,
614 		extent->count);
615 
616 	return 0;
617 }
618 
619 static int projid_m_show(struct seq_file *seq, void *v)
620 {
621 	struct user_namespace *ns = seq->private;
622 	struct uid_gid_extent *extent = v;
623 	struct user_namespace *lower_ns;
624 	projid_t lower;
625 
626 	lower_ns = seq_user_ns(seq);
627 	if ((lower_ns == ns) && lower_ns->parent)
628 		lower_ns = lower_ns->parent;
629 
630 	lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
631 
632 	seq_printf(seq, "%10u %10u %10u\n",
633 		extent->first,
634 		lower,
635 		extent->count);
636 
637 	return 0;
638 }
639 
640 static void *m_start(struct seq_file *seq, loff_t *ppos,
641 		     struct uid_gid_map *map)
642 {
643 	loff_t pos = *ppos;
644 	unsigned extents = map->nr_extents;
645 	smp_rmb();
646 
647 	if (pos >= extents)
648 		return NULL;
649 
650 	if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
651 		return &map->extent[pos];
652 
653 	return &map->forward[pos];
654 }
655 
656 static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
657 {
658 	struct user_namespace *ns = seq->private;
659 
660 	return m_start(seq, ppos, &ns->uid_map);
661 }
662 
663 static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
664 {
665 	struct user_namespace *ns = seq->private;
666 
667 	return m_start(seq, ppos, &ns->gid_map);
668 }
669 
670 static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
671 {
672 	struct user_namespace *ns = seq->private;
673 
674 	return m_start(seq, ppos, &ns->projid_map);
675 }
676 
677 static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
678 {
679 	(*pos)++;
680 	return seq->op->start(seq, pos);
681 }
682 
683 static void m_stop(struct seq_file *seq, void *v)
684 {
685 	return;
686 }
687 
688 const struct seq_operations proc_uid_seq_operations = {
689 	.start = uid_m_start,
690 	.stop = m_stop,
691 	.next = m_next,
692 	.show = uid_m_show,
693 };
694 
695 const struct seq_operations proc_gid_seq_operations = {
696 	.start = gid_m_start,
697 	.stop = m_stop,
698 	.next = m_next,
699 	.show = gid_m_show,
700 };
701 
702 const struct seq_operations proc_projid_seq_operations = {
703 	.start = projid_m_start,
704 	.stop = m_stop,
705 	.next = m_next,
706 	.show = projid_m_show,
707 };
708 
709 static bool mappings_overlap(struct uid_gid_map *new_map,
710 			     struct uid_gid_extent *extent)
711 {
712 	u32 upper_first, lower_first, upper_last, lower_last;
713 	unsigned idx;
714 
715 	upper_first = extent->first;
716 	lower_first = extent->lower_first;
717 	upper_last = upper_first + extent->count - 1;
718 	lower_last = lower_first + extent->count - 1;
719 
720 	for (idx = 0; idx < new_map->nr_extents; idx++) {
721 		u32 prev_upper_first, prev_lower_first;
722 		u32 prev_upper_last, prev_lower_last;
723 		struct uid_gid_extent *prev;
724 
725 		if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
726 			prev = &new_map->extent[idx];
727 		else
728 			prev = &new_map->forward[idx];
729 
730 		prev_upper_first = prev->first;
731 		prev_lower_first = prev->lower_first;
732 		prev_upper_last = prev_upper_first + prev->count - 1;
733 		prev_lower_last = prev_lower_first + prev->count - 1;
734 
735 		/* Does the upper range intersect a previous extent? */
736 		if ((prev_upper_first <= upper_last) &&
737 		    (prev_upper_last >= upper_first))
738 			return true;
739 
740 		/* Does the lower range intersect a previous extent? */
741 		if ((prev_lower_first <= lower_last) &&
742 		    (prev_lower_last >= lower_first))
743 			return true;
744 	}
745 	return false;
746 }
747 
748 /**
749  * insert_extent - Safely insert a new idmap extent into struct uid_gid_map.
750  * Takes care to allocate a 4K block of memory if the number of mappings exceeds
751  * UID_GID_MAP_MAX_BASE_EXTENTS.
752  */
753 static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
754 {
755 	struct uid_gid_extent *dest;
756 
757 	if (map->nr_extents == UID_GID_MAP_MAX_BASE_EXTENTS) {
758 		struct uid_gid_extent *forward;
759 
760 		/* Allocate memory for 340 mappings. */
761 		forward = kmalloc_array(UID_GID_MAP_MAX_EXTENTS,
762 					sizeof(struct uid_gid_extent),
763 					GFP_KERNEL);
764 		if (!forward)
765 			return -ENOMEM;
766 
767 		/* Copy over memory. Only set up memory for the forward pointer.
768 		 * Defer the memory setup for the reverse pointer.
769 		 */
770 		memcpy(forward, map->extent,
771 		       map->nr_extents * sizeof(map->extent[0]));
772 
773 		map->forward = forward;
774 		map->reverse = NULL;
775 	}
776 
777 	if (map->nr_extents < UID_GID_MAP_MAX_BASE_EXTENTS)
778 		dest = &map->extent[map->nr_extents];
779 	else
780 		dest = &map->forward[map->nr_extents];
781 
782 	*dest = *extent;
783 	map->nr_extents++;
784 	return 0;
785 }
786 
787 /* cmp function to sort() forward mappings */
788 static int cmp_extents_forward(const void *a, const void *b)
789 {
790 	const struct uid_gid_extent *e1 = a;
791 	const struct uid_gid_extent *e2 = b;
792 
793 	if (e1->first < e2->first)
794 		return -1;
795 
796 	if (e1->first > e2->first)
797 		return 1;
798 
799 	return 0;
800 }
801 
802 /* cmp function to sort() reverse mappings */
803 static int cmp_extents_reverse(const void *a, const void *b)
804 {
805 	const struct uid_gid_extent *e1 = a;
806 	const struct uid_gid_extent *e2 = b;
807 
808 	if (e1->lower_first < e2->lower_first)
809 		return -1;
810 
811 	if (e1->lower_first > e2->lower_first)
812 		return 1;
813 
814 	return 0;
815 }
816 
817 /**
818  * sort_idmaps - Sorts an array of idmap entries.
819  * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
820  */
821 static int sort_idmaps(struct uid_gid_map *map)
822 {
823 	if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
824 		return 0;
825 
826 	/* Sort forward array. */
827 	sort(map->forward, map->nr_extents, sizeof(struct uid_gid_extent),
828 	     cmp_extents_forward, NULL);
829 
830 	/* Only copy the memory from forward we actually need. */
831 	map->reverse = kmemdup(map->forward,
832 			       map->nr_extents * sizeof(struct uid_gid_extent),
833 			       GFP_KERNEL);
834 	if (!map->reverse)
835 		return -ENOMEM;
836 
837 	/* Sort reverse array. */
838 	sort(map->reverse, map->nr_extents, sizeof(struct uid_gid_extent),
839 	     cmp_extents_reverse, NULL);
840 
841 	return 0;
842 }
843 
844 static ssize_t map_write(struct file *file, const char __user *buf,
845 			 size_t count, loff_t *ppos,
846 			 int cap_setid,
847 			 struct uid_gid_map *map,
848 			 struct uid_gid_map *parent_map)
849 {
850 	struct seq_file *seq = file->private_data;
851 	struct user_namespace *ns = seq->private;
852 	struct uid_gid_map new_map;
853 	unsigned idx;
854 	struct uid_gid_extent extent;
855 	char *kbuf = NULL, *pos, *next_line;
856 	ssize_t ret;
857 
858 	/* Only allow < page size writes at the beginning of the file */
859 	if ((*ppos != 0) || (count >= PAGE_SIZE))
860 		return -EINVAL;
861 
862 	/* Slurp in the user data */
863 	kbuf = memdup_user_nul(buf, count);
864 	if (IS_ERR(kbuf))
865 		return PTR_ERR(kbuf);
866 
867 	/*
868 	 * The userns_state_mutex serializes all writes to any given map.
869 	 *
870 	 * Any map is only ever written once.
871 	 *
872 	 * An id map fits within 1 cache line on most architectures.
873 	 *
874 	 * On read nothing needs to be done unless you are on an
875 	 * architecture with a crazy cache coherency model like alpha.
876 	 *
877 	 * There is a one time data dependency between reading the
878 	 * count of the extents and the values of the extents.  The
879 	 * desired behavior is to see the values of the extents that
880 	 * were written before the count of the extents.
881 	 *
882 	 * To achieve this smp_wmb() is used on guarantee the write
883 	 * order and smp_rmb() is guaranteed that we don't have crazy
884 	 * architectures returning stale data.
885 	 */
886 	mutex_lock(&userns_state_mutex);
887 
888 	memset(&new_map, 0, sizeof(struct uid_gid_map));
889 
890 	ret = -EPERM;
891 	/* Only allow one successful write to the map */
892 	if (map->nr_extents != 0)
893 		goto out;
894 
895 	/*
896 	 * Adjusting namespace settings requires capabilities on the target.
897 	 */
898 	if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
899 		goto out;
900 
901 	/* Parse the user data */
902 	ret = -EINVAL;
903 	pos = kbuf;
904 	for (; pos; pos = next_line) {
905 
906 		/* Find the end of line and ensure I don't look past it */
907 		next_line = strchr(pos, '\n');
908 		if (next_line) {
909 			*next_line = '\0';
910 			next_line++;
911 			if (*next_line == '\0')
912 				next_line = NULL;
913 		}
914 
915 		pos = skip_spaces(pos);
916 		extent.first = simple_strtoul(pos, &pos, 10);
917 		if (!isspace(*pos))
918 			goto out;
919 
920 		pos = skip_spaces(pos);
921 		extent.lower_first = simple_strtoul(pos, &pos, 10);
922 		if (!isspace(*pos))
923 			goto out;
924 
925 		pos = skip_spaces(pos);
926 		extent.count = simple_strtoul(pos, &pos, 10);
927 		if (*pos && !isspace(*pos))
928 			goto out;
929 
930 		/* Verify there is not trailing junk on the line */
931 		pos = skip_spaces(pos);
932 		if (*pos != '\0')
933 			goto out;
934 
935 		/* Verify we have been given valid starting values */
936 		if ((extent.first == (u32) -1) ||
937 		    (extent.lower_first == (u32) -1))
938 			goto out;
939 
940 		/* Verify count is not zero and does not cause the
941 		 * extent to wrap
942 		 */
943 		if ((extent.first + extent.count) <= extent.first)
944 			goto out;
945 		if ((extent.lower_first + extent.count) <=
946 		     extent.lower_first)
947 			goto out;
948 
949 		/* Do the ranges in extent overlap any previous extents? */
950 		if (mappings_overlap(&new_map, &extent))
951 			goto out;
952 
953 		if ((new_map.nr_extents + 1) == UID_GID_MAP_MAX_EXTENTS &&
954 		    (next_line != NULL))
955 			goto out;
956 
957 		ret = insert_extent(&new_map, &extent);
958 		if (ret < 0)
959 			goto out;
960 		ret = -EINVAL;
961 	}
962 	/* Be very certaint the new map actually exists */
963 	if (new_map.nr_extents == 0)
964 		goto out;
965 
966 	ret = -EPERM;
967 	/* Validate the user is allowed to use user id's mapped to. */
968 	if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
969 		goto out;
970 
971 	ret = -EPERM;
972 	/* Map the lower ids from the parent user namespace to the
973 	 * kernel global id space.
974 	 */
975 	for (idx = 0; idx < new_map.nr_extents; idx++) {
976 		struct uid_gid_extent *e;
977 		u32 lower_first;
978 
979 		if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
980 			e = &new_map.extent[idx];
981 		else
982 			e = &new_map.forward[idx];
983 
984 		lower_first = map_id_range_down(parent_map,
985 						e->lower_first,
986 						e->count);
987 
988 		/* Fail if we can not map the specified extent to
989 		 * the kernel global id space.
990 		 */
991 		if (lower_first == (u32) -1)
992 			goto out;
993 
994 		e->lower_first = lower_first;
995 	}
996 
997 	/*
998 	 * If we want to use binary search for lookup, this clones the extent
999 	 * array and sorts both copies.
1000 	 */
1001 	ret = sort_idmaps(&new_map);
1002 	if (ret < 0)
1003 		goto out;
1004 
1005 	/* Install the map */
1006 	if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
1007 		memcpy(map->extent, new_map.extent,
1008 		       new_map.nr_extents * sizeof(new_map.extent[0]));
1009 	} else {
1010 		map->forward = new_map.forward;
1011 		map->reverse = new_map.reverse;
1012 	}
1013 	smp_wmb();
1014 	map->nr_extents = new_map.nr_extents;
1015 
1016 	*ppos = count;
1017 	ret = count;
1018 out:
1019 	if (ret < 0 && new_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
1020 		kfree(new_map.forward);
1021 		kfree(new_map.reverse);
1022 		map->forward = NULL;
1023 		map->reverse = NULL;
1024 		map->nr_extents = 0;
1025 	}
1026 
1027 	mutex_unlock(&userns_state_mutex);
1028 	kfree(kbuf);
1029 	return ret;
1030 }
1031 
1032 ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
1033 			   size_t size, loff_t *ppos)
1034 {
1035 	struct seq_file *seq = file->private_data;
1036 	struct user_namespace *ns = seq->private;
1037 	struct user_namespace *seq_ns = seq_user_ns(seq);
1038 
1039 	if (!ns->parent)
1040 		return -EPERM;
1041 
1042 	if ((seq_ns != ns) && (seq_ns != ns->parent))
1043 		return -EPERM;
1044 
1045 	return map_write(file, buf, size, ppos, CAP_SETUID,
1046 			 &ns->uid_map, &ns->parent->uid_map);
1047 }
1048 
1049 ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
1050 			   size_t size, loff_t *ppos)
1051 {
1052 	struct seq_file *seq = file->private_data;
1053 	struct user_namespace *ns = seq->private;
1054 	struct user_namespace *seq_ns = seq_user_ns(seq);
1055 
1056 	if (!ns->parent)
1057 		return -EPERM;
1058 
1059 	if ((seq_ns != ns) && (seq_ns != ns->parent))
1060 		return -EPERM;
1061 
1062 	return map_write(file, buf, size, ppos, CAP_SETGID,
1063 			 &ns->gid_map, &ns->parent->gid_map);
1064 }
1065 
1066 ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
1067 			      size_t size, loff_t *ppos)
1068 {
1069 	struct seq_file *seq = file->private_data;
1070 	struct user_namespace *ns = seq->private;
1071 	struct user_namespace *seq_ns = seq_user_ns(seq);
1072 
1073 	if (!ns->parent)
1074 		return -EPERM;
1075 
1076 	if ((seq_ns != ns) && (seq_ns != ns->parent))
1077 		return -EPERM;
1078 
1079 	/* Anyone can set any valid project id no capability needed */
1080 	return map_write(file, buf, size, ppos, -1,
1081 			 &ns->projid_map, &ns->parent->projid_map);
1082 }
1083 
1084 static bool new_idmap_permitted(const struct file *file,
1085 				struct user_namespace *ns, int cap_setid,
1086 				struct uid_gid_map *new_map)
1087 {
1088 	const struct cred *cred = file->f_cred;
1089 	/* Don't allow mappings that would allow anything that wouldn't
1090 	 * be allowed without the establishment of unprivileged mappings.
1091 	 */
1092 	if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
1093 	    uid_eq(ns->owner, cred->euid)) {
1094 		u32 id = new_map->extent[0].lower_first;
1095 		if (cap_setid == CAP_SETUID) {
1096 			kuid_t uid = make_kuid(ns->parent, id);
1097 			if (uid_eq(uid, cred->euid))
1098 				return true;
1099 		} else if (cap_setid == CAP_SETGID) {
1100 			kgid_t gid = make_kgid(ns->parent, id);
1101 			if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
1102 			    gid_eq(gid, cred->egid))
1103 				return true;
1104 		}
1105 	}
1106 
1107 	/* Allow anyone to set a mapping that doesn't require privilege */
1108 	if (!cap_valid(cap_setid))
1109 		return true;
1110 
1111 	/* Allow the specified ids if we have the appropriate capability
1112 	 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
1113 	 * And the opener of the id file also had the approprpiate capability.
1114 	 */
1115 	if (ns_capable(ns->parent, cap_setid) &&
1116 	    file_ns_capable(file, ns->parent, cap_setid))
1117 		return true;
1118 
1119 	return false;
1120 }
1121 
1122 int proc_setgroups_show(struct seq_file *seq, void *v)
1123 {
1124 	struct user_namespace *ns = seq->private;
1125 	unsigned long userns_flags = READ_ONCE(ns->flags);
1126 
1127 	seq_printf(seq, "%s\n",
1128 		   (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
1129 		   "allow" : "deny");
1130 	return 0;
1131 }
1132 
1133 ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
1134 			     size_t count, loff_t *ppos)
1135 {
1136 	struct seq_file *seq = file->private_data;
1137 	struct user_namespace *ns = seq->private;
1138 	char kbuf[8], *pos;
1139 	bool setgroups_allowed;
1140 	ssize_t ret;
1141 
1142 	/* Only allow a very narrow range of strings to be written */
1143 	ret = -EINVAL;
1144 	if ((*ppos != 0) || (count >= sizeof(kbuf)))
1145 		goto out;
1146 
1147 	/* What was written? */
1148 	ret = -EFAULT;
1149 	if (copy_from_user(kbuf, buf, count))
1150 		goto out;
1151 	kbuf[count] = '\0';
1152 	pos = kbuf;
1153 
1154 	/* What is being requested? */
1155 	ret = -EINVAL;
1156 	if (strncmp(pos, "allow", 5) == 0) {
1157 		pos += 5;
1158 		setgroups_allowed = true;
1159 	}
1160 	else if (strncmp(pos, "deny", 4) == 0) {
1161 		pos += 4;
1162 		setgroups_allowed = false;
1163 	}
1164 	else
1165 		goto out;
1166 
1167 	/* Verify there is not trailing junk on the line */
1168 	pos = skip_spaces(pos);
1169 	if (*pos != '\0')
1170 		goto out;
1171 
1172 	ret = -EPERM;
1173 	mutex_lock(&userns_state_mutex);
1174 	if (setgroups_allowed) {
1175 		/* Enabling setgroups after setgroups has been disabled
1176 		 * is not allowed.
1177 		 */
1178 		if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
1179 			goto out_unlock;
1180 	} else {
1181 		/* Permanently disabling setgroups after setgroups has
1182 		 * been enabled by writing the gid_map is not allowed.
1183 		 */
1184 		if (ns->gid_map.nr_extents != 0)
1185 			goto out_unlock;
1186 		ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
1187 	}
1188 	mutex_unlock(&userns_state_mutex);
1189 
1190 	/* Report a successful write */
1191 	*ppos = count;
1192 	ret = count;
1193 out:
1194 	return ret;
1195 out_unlock:
1196 	mutex_unlock(&userns_state_mutex);
1197 	goto out;
1198 }
1199 
1200 bool userns_may_setgroups(const struct user_namespace *ns)
1201 {
1202 	bool allowed;
1203 
1204 	mutex_lock(&userns_state_mutex);
1205 	/* It is not safe to use setgroups until a gid mapping in
1206 	 * the user namespace has been established.
1207 	 */
1208 	allowed = ns->gid_map.nr_extents != 0;
1209 	/* Is setgroups allowed? */
1210 	allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
1211 	mutex_unlock(&userns_state_mutex);
1212 
1213 	return allowed;
1214 }
1215 
1216 /*
1217  * Returns true if @child is the same namespace or a descendant of
1218  * @ancestor.
1219  */
1220 bool in_userns(const struct user_namespace *ancestor,
1221 	       const struct user_namespace *child)
1222 {
1223 	const struct user_namespace *ns;
1224 	for (ns = child; ns->level > ancestor->level; ns = ns->parent)
1225 		;
1226 	return (ns == ancestor);
1227 }
1228 
1229 bool current_in_userns(const struct user_namespace *target_ns)
1230 {
1231 	return in_userns(target_ns, current_user_ns());
1232 }
1233 EXPORT_SYMBOL(current_in_userns);
1234 
1235 static inline struct user_namespace *to_user_ns(struct ns_common *ns)
1236 {
1237 	return container_of(ns, struct user_namespace, ns);
1238 }
1239 
1240 static struct ns_common *userns_get(struct task_struct *task)
1241 {
1242 	struct user_namespace *user_ns;
1243 
1244 	rcu_read_lock();
1245 	user_ns = get_user_ns(__task_cred(task)->user_ns);
1246 	rcu_read_unlock();
1247 
1248 	return user_ns ? &user_ns->ns : NULL;
1249 }
1250 
1251 static void userns_put(struct ns_common *ns)
1252 {
1253 	put_user_ns(to_user_ns(ns));
1254 }
1255 
1256 static int userns_install(struct nsset *nsset, struct ns_common *ns)
1257 {
1258 	struct user_namespace *user_ns = to_user_ns(ns);
1259 	struct cred *cred;
1260 
1261 	/* Don't allow gaining capabilities by reentering
1262 	 * the same user namespace.
1263 	 */
1264 	if (user_ns == current_user_ns())
1265 		return -EINVAL;
1266 
1267 	/* Tasks that share a thread group must share a user namespace */
1268 	if (!thread_group_empty(current))
1269 		return -EINVAL;
1270 
1271 	if (current->fs->users != 1)
1272 		return -EINVAL;
1273 
1274 	if (!ns_capable(user_ns, CAP_SYS_ADMIN))
1275 		return -EPERM;
1276 
1277 	cred = nsset_cred(nsset);
1278 	if (!cred)
1279 		return -EINVAL;
1280 
1281 	put_user_ns(cred->user_ns);
1282 	set_cred_user_ns(cred, get_user_ns(user_ns));
1283 
1284 	return 0;
1285 }
1286 
1287 struct ns_common *ns_get_owner(struct ns_common *ns)
1288 {
1289 	struct user_namespace *my_user_ns = current_user_ns();
1290 	struct user_namespace *owner, *p;
1291 
1292 	/* See if the owner is in the current user namespace */
1293 	owner = p = ns->ops->owner(ns);
1294 	for (;;) {
1295 		if (!p)
1296 			return ERR_PTR(-EPERM);
1297 		if (p == my_user_ns)
1298 			break;
1299 		p = p->parent;
1300 	}
1301 
1302 	return &get_user_ns(owner)->ns;
1303 }
1304 
1305 static struct user_namespace *userns_owner(struct ns_common *ns)
1306 {
1307 	return to_user_ns(ns)->parent;
1308 }
1309 
1310 const struct proc_ns_operations userns_operations = {
1311 	.name		= "user",
1312 	.type		= CLONE_NEWUSER,
1313 	.get		= userns_get,
1314 	.put		= userns_put,
1315 	.install	= userns_install,
1316 	.owner		= userns_owner,
1317 	.get_parent	= ns_get_owner,
1318 };
1319 
1320 static __init int user_namespaces_init(void)
1321 {
1322 	user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
1323 	return 0;
1324 }
1325 subsys_initcall(user_namespaces_init);
1326