xref: /openbmc/linux/kernel/user_namespace.c (revision 151f4e2b)
1 /*
2  *  This program is free software; you can redistribute it and/or
3  *  modify it under the terms of the GNU General Public License as
4  *  published by the Free Software Foundation, version 2 of the
5  *  License.
6  */
7 
8 #include <linux/export.h>
9 #include <linux/nsproxy.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/user_namespace.h>
13 #include <linux/proc_ns.h>
14 #include <linux/highuid.h>
15 #include <linux/cred.h>
16 #include <linux/securebits.h>
17 #include <linux/keyctl.h>
18 #include <linux/key-type.h>
19 #include <keys/user-type.h>
20 #include <linux/seq_file.h>
21 #include <linux/fs.h>
22 #include <linux/uaccess.h>
23 #include <linux/ctype.h>
24 #include <linux/projid.h>
25 #include <linux/fs_struct.h>
26 #include <linux/bsearch.h>
27 #include <linux/sort.h>
28 
29 static struct kmem_cache *user_ns_cachep __read_mostly;
30 static DEFINE_MUTEX(userns_state_mutex);
31 
32 static bool new_idmap_permitted(const struct file *file,
33 				struct user_namespace *ns, int cap_setid,
34 				struct uid_gid_map *map);
35 static void free_user_ns(struct work_struct *work);
36 
37 static struct ucounts *inc_user_namespaces(struct user_namespace *ns, kuid_t uid)
38 {
39 	return inc_ucount(ns, uid, UCOUNT_USER_NAMESPACES);
40 }
41 
42 static void dec_user_namespaces(struct ucounts *ucounts)
43 {
44 	return dec_ucount(ucounts, UCOUNT_USER_NAMESPACES);
45 }
46 
47 static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
48 {
49 	/* Start with the same capabilities as init but useless for doing
50 	 * anything as the capabilities are bound to the new user namespace.
51 	 */
52 	cred->securebits = SECUREBITS_DEFAULT;
53 	cred->cap_inheritable = CAP_EMPTY_SET;
54 	cred->cap_permitted = CAP_FULL_SET;
55 	cred->cap_effective = CAP_FULL_SET;
56 	cred->cap_ambient = CAP_EMPTY_SET;
57 	cred->cap_bset = CAP_FULL_SET;
58 #ifdef CONFIG_KEYS
59 	key_put(cred->request_key_auth);
60 	cred->request_key_auth = NULL;
61 #endif
62 	/* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
63 	cred->user_ns = user_ns;
64 }
65 
66 /*
67  * Create a new user namespace, deriving the creator from the user in the
68  * passed credentials, and replacing that user with the new root user for the
69  * new namespace.
70  *
71  * This is called by copy_creds(), which will finish setting the target task's
72  * credentials.
73  */
74 int create_user_ns(struct cred *new)
75 {
76 	struct user_namespace *ns, *parent_ns = new->user_ns;
77 	kuid_t owner = new->euid;
78 	kgid_t group = new->egid;
79 	struct ucounts *ucounts;
80 	int ret, i;
81 
82 	ret = -ENOSPC;
83 	if (parent_ns->level > 32)
84 		goto fail;
85 
86 	ucounts = inc_user_namespaces(parent_ns, owner);
87 	if (!ucounts)
88 		goto fail;
89 
90 	/*
91 	 * Verify that we can not violate the policy of which files
92 	 * may be accessed that is specified by the root directory,
93 	 * by verifing that the root directory is at the root of the
94 	 * mount namespace which allows all files to be accessed.
95 	 */
96 	ret = -EPERM;
97 	if (current_chrooted())
98 		goto fail_dec;
99 
100 	/* The creator needs a mapping in the parent user namespace
101 	 * or else we won't be able to reasonably tell userspace who
102 	 * created a user_namespace.
103 	 */
104 	ret = -EPERM;
105 	if (!kuid_has_mapping(parent_ns, owner) ||
106 	    !kgid_has_mapping(parent_ns, group))
107 		goto fail_dec;
108 
109 	ret = -ENOMEM;
110 	ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
111 	if (!ns)
112 		goto fail_dec;
113 
114 	ret = ns_alloc_inum(&ns->ns);
115 	if (ret)
116 		goto fail_free;
117 	ns->ns.ops = &userns_operations;
118 
119 	atomic_set(&ns->count, 1);
120 	/* Leave the new->user_ns reference with the new user namespace. */
121 	ns->parent = parent_ns;
122 	ns->level = parent_ns->level + 1;
123 	ns->owner = owner;
124 	ns->group = group;
125 	INIT_WORK(&ns->work, free_user_ns);
126 	for (i = 0; i < UCOUNT_COUNTS; i++) {
127 		ns->ucount_max[i] = INT_MAX;
128 	}
129 	ns->ucounts = ucounts;
130 
131 	/* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
132 	mutex_lock(&userns_state_mutex);
133 	ns->flags = parent_ns->flags;
134 	mutex_unlock(&userns_state_mutex);
135 
136 #ifdef CONFIG_PERSISTENT_KEYRINGS
137 	init_rwsem(&ns->persistent_keyring_register_sem);
138 #endif
139 	ret = -ENOMEM;
140 	if (!setup_userns_sysctls(ns))
141 		goto fail_keyring;
142 
143 	set_cred_user_ns(new, ns);
144 	return 0;
145 fail_keyring:
146 #ifdef CONFIG_PERSISTENT_KEYRINGS
147 	key_put(ns->persistent_keyring_register);
148 #endif
149 	ns_free_inum(&ns->ns);
150 fail_free:
151 	kmem_cache_free(user_ns_cachep, ns);
152 fail_dec:
153 	dec_user_namespaces(ucounts);
154 fail:
155 	return ret;
156 }
157 
158 int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
159 {
160 	struct cred *cred;
161 	int err = -ENOMEM;
162 
163 	if (!(unshare_flags & CLONE_NEWUSER))
164 		return 0;
165 
166 	cred = prepare_creds();
167 	if (cred) {
168 		err = create_user_ns(cred);
169 		if (err)
170 			put_cred(cred);
171 		else
172 			*new_cred = cred;
173 	}
174 
175 	return err;
176 }
177 
178 static void free_user_ns(struct work_struct *work)
179 {
180 	struct user_namespace *parent, *ns =
181 		container_of(work, struct user_namespace, work);
182 
183 	do {
184 		struct ucounts *ucounts = ns->ucounts;
185 		parent = ns->parent;
186 		if (ns->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
187 			kfree(ns->gid_map.forward);
188 			kfree(ns->gid_map.reverse);
189 		}
190 		if (ns->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
191 			kfree(ns->uid_map.forward);
192 			kfree(ns->uid_map.reverse);
193 		}
194 		if (ns->projid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
195 			kfree(ns->projid_map.forward);
196 			kfree(ns->projid_map.reverse);
197 		}
198 		retire_userns_sysctls(ns);
199 #ifdef CONFIG_PERSISTENT_KEYRINGS
200 		key_put(ns->persistent_keyring_register);
201 #endif
202 		ns_free_inum(&ns->ns);
203 		kmem_cache_free(user_ns_cachep, ns);
204 		dec_user_namespaces(ucounts);
205 		ns = parent;
206 	} while (atomic_dec_and_test(&parent->count));
207 }
208 
209 void __put_user_ns(struct user_namespace *ns)
210 {
211 	schedule_work(&ns->work);
212 }
213 EXPORT_SYMBOL(__put_user_ns);
214 
215 /**
216  * idmap_key struct holds the information necessary to find an idmapping in a
217  * sorted idmap array. It is passed to cmp_map_id() as first argument.
218  */
219 struct idmap_key {
220 	bool map_up; /* true  -> id from kid; false -> kid from id */
221 	u32 id; /* id to find */
222 	u32 count; /* == 0 unless used with map_id_range_down() */
223 };
224 
225 /**
226  * cmp_map_id - Function to be passed to bsearch() to find the requested
227  * idmapping. Expects struct idmap_key to be passed via @k.
228  */
229 static int cmp_map_id(const void *k, const void *e)
230 {
231 	u32 first, last, id2;
232 	const struct idmap_key *key = k;
233 	const struct uid_gid_extent *el = e;
234 
235 	id2 = key->id + key->count - 1;
236 
237 	/* handle map_id_{down,up}() */
238 	if (key->map_up)
239 		first = el->lower_first;
240 	else
241 		first = el->first;
242 
243 	last = first + el->count - 1;
244 
245 	if (key->id >= first && key->id <= last &&
246 	    (id2 >= first && id2 <= last))
247 		return 0;
248 
249 	if (key->id < first || id2 < first)
250 		return -1;
251 
252 	return 1;
253 }
254 
255 /**
256  * map_id_range_down_max - Find idmap via binary search in ordered idmap array.
257  * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
258  */
259 static struct uid_gid_extent *
260 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
261 {
262 	struct idmap_key key;
263 
264 	key.map_up = false;
265 	key.count = count;
266 	key.id = id;
267 
268 	return bsearch(&key, map->forward, extents,
269 		       sizeof(struct uid_gid_extent), cmp_map_id);
270 }
271 
272 /**
273  * map_id_range_down_base - Find idmap via binary search in static extent array.
274  * Can only be called if number of mappings is equal or less than
275  * UID_GID_MAP_MAX_BASE_EXTENTS.
276  */
277 static struct uid_gid_extent *
278 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
279 {
280 	unsigned idx;
281 	u32 first, last, id2;
282 
283 	id2 = id + count - 1;
284 
285 	/* Find the matching extent */
286 	for (idx = 0; idx < extents; idx++) {
287 		first = map->extent[idx].first;
288 		last = first + map->extent[idx].count - 1;
289 		if (id >= first && id <= last &&
290 		    (id2 >= first && id2 <= last))
291 			return &map->extent[idx];
292 	}
293 	return NULL;
294 }
295 
296 static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
297 {
298 	struct uid_gid_extent *extent;
299 	unsigned extents = map->nr_extents;
300 	smp_rmb();
301 
302 	if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
303 		extent = map_id_range_down_base(extents, map, id, count);
304 	else
305 		extent = map_id_range_down_max(extents, map, id, count);
306 
307 	/* Map the id or note failure */
308 	if (extent)
309 		id = (id - extent->first) + extent->lower_first;
310 	else
311 		id = (u32) -1;
312 
313 	return id;
314 }
315 
316 static u32 map_id_down(struct uid_gid_map *map, u32 id)
317 {
318 	return map_id_range_down(map, id, 1);
319 }
320 
321 /**
322  * map_id_up_base - Find idmap via binary search in static extent array.
323  * Can only be called if number of mappings is equal or less than
324  * UID_GID_MAP_MAX_BASE_EXTENTS.
325  */
326 static struct uid_gid_extent *
327 map_id_up_base(unsigned extents, struct uid_gid_map *map, u32 id)
328 {
329 	unsigned idx;
330 	u32 first, last;
331 
332 	/* Find the matching extent */
333 	for (idx = 0; idx < extents; idx++) {
334 		first = map->extent[idx].lower_first;
335 		last = first + map->extent[idx].count - 1;
336 		if (id >= first && id <= last)
337 			return &map->extent[idx];
338 	}
339 	return NULL;
340 }
341 
342 /**
343  * map_id_up_max - Find idmap via binary search in ordered idmap array.
344  * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
345  */
346 static struct uid_gid_extent *
347 map_id_up_max(unsigned extents, struct uid_gid_map *map, u32 id)
348 {
349 	struct idmap_key key;
350 
351 	key.map_up = true;
352 	key.count = 1;
353 	key.id = id;
354 
355 	return bsearch(&key, map->reverse, extents,
356 		       sizeof(struct uid_gid_extent), cmp_map_id);
357 }
358 
359 static u32 map_id_up(struct uid_gid_map *map, u32 id)
360 {
361 	struct uid_gid_extent *extent;
362 	unsigned extents = map->nr_extents;
363 	smp_rmb();
364 
365 	if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
366 		extent = map_id_up_base(extents, map, id);
367 	else
368 		extent = map_id_up_max(extents, map, id);
369 
370 	/* Map the id or note failure */
371 	if (extent)
372 		id = (id - extent->lower_first) + extent->first;
373 	else
374 		id = (u32) -1;
375 
376 	return id;
377 }
378 
379 /**
380  *	make_kuid - Map a user-namespace uid pair into a kuid.
381  *	@ns:  User namespace that the uid is in
382  *	@uid: User identifier
383  *
384  *	Maps a user-namespace uid pair into a kernel internal kuid,
385  *	and returns that kuid.
386  *
387  *	When there is no mapping defined for the user-namespace uid
388  *	pair INVALID_UID is returned.  Callers are expected to test
389  *	for and handle INVALID_UID being returned.  INVALID_UID
390  *	may be tested for using uid_valid().
391  */
392 kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
393 {
394 	/* Map the uid to a global kernel uid */
395 	return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
396 }
397 EXPORT_SYMBOL(make_kuid);
398 
399 /**
400  *	from_kuid - Create a uid from a kuid user-namespace pair.
401  *	@targ: The user namespace we want a uid in.
402  *	@kuid: The kernel internal uid to start with.
403  *
404  *	Map @kuid into the user-namespace specified by @targ and
405  *	return the resulting uid.
406  *
407  *	There is always a mapping into the initial user_namespace.
408  *
409  *	If @kuid has no mapping in @targ (uid_t)-1 is returned.
410  */
411 uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
412 {
413 	/* Map the uid from a global kernel uid */
414 	return map_id_up(&targ->uid_map, __kuid_val(kuid));
415 }
416 EXPORT_SYMBOL(from_kuid);
417 
418 /**
419  *	from_kuid_munged - Create a uid from a kuid user-namespace pair.
420  *	@targ: The user namespace we want a uid in.
421  *	@kuid: The kernel internal uid to start with.
422  *
423  *	Map @kuid into the user-namespace specified by @targ and
424  *	return the resulting uid.
425  *
426  *	There is always a mapping into the initial user_namespace.
427  *
428  *	Unlike from_kuid from_kuid_munged never fails and always
429  *	returns a valid uid.  This makes from_kuid_munged appropriate
430  *	for use in syscalls like stat and getuid where failing the
431  *	system call and failing to provide a valid uid are not an
432  *	options.
433  *
434  *	If @kuid has no mapping in @targ overflowuid is returned.
435  */
436 uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
437 {
438 	uid_t uid;
439 	uid = from_kuid(targ, kuid);
440 
441 	if (uid == (uid_t) -1)
442 		uid = overflowuid;
443 	return uid;
444 }
445 EXPORT_SYMBOL(from_kuid_munged);
446 
447 /**
448  *	make_kgid - Map a user-namespace gid pair into a kgid.
449  *	@ns:  User namespace that the gid is in
450  *	@gid: group identifier
451  *
452  *	Maps a user-namespace gid pair into a kernel internal kgid,
453  *	and returns that kgid.
454  *
455  *	When there is no mapping defined for the user-namespace gid
456  *	pair INVALID_GID is returned.  Callers are expected to test
457  *	for and handle INVALID_GID being returned.  INVALID_GID may be
458  *	tested for using gid_valid().
459  */
460 kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
461 {
462 	/* Map the gid to a global kernel gid */
463 	return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
464 }
465 EXPORT_SYMBOL(make_kgid);
466 
467 /**
468  *	from_kgid - Create a gid from a kgid user-namespace pair.
469  *	@targ: The user namespace we want a gid in.
470  *	@kgid: The kernel internal gid to start with.
471  *
472  *	Map @kgid into the user-namespace specified by @targ and
473  *	return the resulting gid.
474  *
475  *	There is always a mapping into the initial user_namespace.
476  *
477  *	If @kgid has no mapping in @targ (gid_t)-1 is returned.
478  */
479 gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
480 {
481 	/* Map the gid from a global kernel gid */
482 	return map_id_up(&targ->gid_map, __kgid_val(kgid));
483 }
484 EXPORT_SYMBOL(from_kgid);
485 
486 /**
487  *	from_kgid_munged - Create a gid from a kgid user-namespace pair.
488  *	@targ: The user namespace we want a gid in.
489  *	@kgid: The kernel internal gid to start with.
490  *
491  *	Map @kgid into the user-namespace specified by @targ and
492  *	return the resulting gid.
493  *
494  *	There is always a mapping into the initial user_namespace.
495  *
496  *	Unlike from_kgid from_kgid_munged never fails and always
497  *	returns a valid gid.  This makes from_kgid_munged appropriate
498  *	for use in syscalls like stat and getgid where failing the
499  *	system call and failing to provide a valid gid are not options.
500  *
501  *	If @kgid has no mapping in @targ overflowgid is returned.
502  */
503 gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
504 {
505 	gid_t gid;
506 	gid = from_kgid(targ, kgid);
507 
508 	if (gid == (gid_t) -1)
509 		gid = overflowgid;
510 	return gid;
511 }
512 EXPORT_SYMBOL(from_kgid_munged);
513 
514 /**
515  *	make_kprojid - Map a user-namespace projid pair into a kprojid.
516  *	@ns:  User namespace that the projid is in
517  *	@projid: Project identifier
518  *
519  *	Maps a user-namespace uid pair into a kernel internal kuid,
520  *	and returns that kuid.
521  *
522  *	When there is no mapping defined for the user-namespace projid
523  *	pair INVALID_PROJID is returned.  Callers are expected to test
524  *	for and handle handle INVALID_PROJID being returned.  INVALID_PROJID
525  *	may be tested for using projid_valid().
526  */
527 kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
528 {
529 	/* Map the uid to a global kernel uid */
530 	return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
531 }
532 EXPORT_SYMBOL(make_kprojid);
533 
534 /**
535  *	from_kprojid - Create a projid from a kprojid user-namespace pair.
536  *	@targ: The user namespace we want a projid in.
537  *	@kprojid: The kernel internal project identifier to start with.
538  *
539  *	Map @kprojid into the user-namespace specified by @targ and
540  *	return the resulting projid.
541  *
542  *	There is always a mapping into the initial user_namespace.
543  *
544  *	If @kprojid has no mapping in @targ (projid_t)-1 is returned.
545  */
546 projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
547 {
548 	/* Map the uid from a global kernel uid */
549 	return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
550 }
551 EXPORT_SYMBOL(from_kprojid);
552 
553 /**
554  *	from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
555  *	@targ: The user namespace we want a projid in.
556  *	@kprojid: The kernel internal projid to start with.
557  *
558  *	Map @kprojid into the user-namespace specified by @targ and
559  *	return the resulting projid.
560  *
561  *	There is always a mapping into the initial user_namespace.
562  *
563  *	Unlike from_kprojid from_kprojid_munged never fails and always
564  *	returns a valid projid.  This makes from_kprojid_munged
565  *	appropriate for use in syscalls like stat and where
566  *	failing the system call and failing to provide a valid projid are
567  *	not an options.
568  *
569  *	If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
570  */
571 projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
572 {
573 	projid_t projid;
574 	projid = from_kprojid(targ, kprojid);
575 
576 	if (projid == (projid_t) -1)
577 		projid = OVERFLOW_PROJID;
578 	return projid;
579 }
580 EXPORT_SYMBOL(from_kprojid_munged);
581 
582 
583 static int uid_m_show(struct seq_file *seq, void *v)
584 {
585 	struct user_namespace *ns = seq->private;
586 	struct uid_gid_extent *extent = v;
587 	struct user_namespace *lower_ns;
588 	uid_t lower;
589 
590 	lower_ns = seq_user_ns(seq);
591 	if ((lower_ns == ns) && lower_ns->parent)
592 		lower_ns = lower_ns->parent;
593 
594 	lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
595 
596 	seq_printf(seq, "%10u %10u %10u\n",
597 		extent->first,
598 		lower,
599 		extent->count);
600 
601 	return 0;
602 }
603 
604 static int gid_m_show(struct seq_file *seq, void *v)
605 {
606 	struct user_namespace *ns = seq->private;
607 	struct uid_gid_extent *extent = v;
608 	struct user_namespace *lower_ns;
609 	gid_t lower;
610 
611 	lower_ns = seq_user_ns(seq);
612 	if ((lower_ns == ns) && lower_ns->parent)
613 		lower_ns = lower_ns->parent;
614 
615 	lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
616 
617 	seq_printf(seq, "%10u %10u %10u\n",
618 		extent->first,
619 		lower,
620 		extent->count);
621 
622 	return 0;
623 }
624 
625 static int projid_m_show(struct seq_file *seq, void *v)
626 {
627 	struct user_namespace *ns = seq->private;
628 	struct uid_gid_extent *extent = v;
629 	struct user_namespace *lower_ns;
630 	projid_t lower;
631 
632 	lower_ns = seq_user_ns(seq);
633 	if ((lower_ns == ns) && lower_ns->parent)
634 		lower_ns = lower_ns->parent;
635 
636 	lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
637 
638 	seq_printf(seq, "%10u %10u %10u\n",
639 		extent->first,
640 		lower,
641 		extent->count);
642 
643 	return 0;
644 }
645 
646 static void *m_start(struct seq_file *seq, loff_t *ppos,
647 		     struct uid_gid_map *map)
648 {
649 	loff_t pos = *ppos;
650 	unsigned extents = map->nr_extents;
651 	smp_rmb();
652 
653 	if (pos >= extents)
654 		return NULL;
655 
656 	if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
657 		return &map->extent[pos];
658 
659 	return &map->forward[pos];
660 }
661 
662 static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
663 {
664 	struct user_namespace *ns = seq->private;
665 
666 	return m_start(seq, ppos, &ns->uid_map);
667 }
668 
669 static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
670 {
671 	struct user_namespace *ns = seq->private;
672 
673 	return m_start(seq, ppos, &ns->gid_map);
674 }
675 
676 static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
677 {
678 	struct user_namespace *ns = seq->private;
679 
680 	return m_start(seq, ppos, &ns->projid_map);
681 }
682 
683 static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
684 {
685 	(*pos)++;
686 	return seq->op->start(seq, pos);
687 }
688 
689 static void m_stop(struct seq_file *seq, void *v)
690 {
691 	return;
692 }
693 
694 const struct seq_operations proc_uid_seq_operations = {
695 	.start = uid_m_start,
696 	.stop = m_stop,
697 	.next = m_next,
698 	.show = uid_m_show,
699 };
700 
701 const struct seq_operations proc_gid_seq_operations = {
702 	.start = gid_m_start,
703 	.stop = m_stop,
704 	.next = m_next,
705 	.show = gid_m_show,
706 };
707 
708 const struct seq_operations proc_projid_seq_operations = {
709 	.start = projid_m_start,
710 	.stop = m_stop,
711 	.next = m_next,
712 	.show = projid_m_show,
713 };
714 
715 static bool mappings_overlap(struct uid_gid_map *new_map,
716 			     struct uid_gid_extent *extent)
717 {
718 	u32 upper_first, lower_first, upper_last, lower_last;
719 	unsigned idx;
720 
721 	upper_first = extent->first;
722 	lower_first = extent->lower_first;
723 	upper_last = upper_first + extent->count - 1;
724 	lower_last = lower_first + extent->count - 1;
725 
726 	for (idx = 0; idx < new_map->nr_extents; idx++) {
727 		u32 prev_upper_first, prev_lower_first;
728 		u32 prev_upper_last, prev_lower_last;
729 		struct uid_gid_extent *prev;
730 
731 		if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
732 			prev = &new_map->extent[idx];
733 		else
734 			prev = &new_map->forward[idx];
735 
736 		prev_upper_first = prev->first;
737 		prev_lower_first = prev->lower_first;
738 		prev_upper_last = prev_upper_first + prev->count - 1;
739 		prev_lower_last = prev_lower_first + prev->count - 1;
740 
741 		/* Does the upper range intersect a previous extent? */
742 		if ((prev_upper_first <= upper_last) &&
743 		    (prev_upper_last >= upper_first))
744 			return true;
745 
746 		/* Does the lower range intersect a previous extent? */
747 		if ((prev_lower_first <= lower_last) &&
748 		    (prev_lower_last >= lower_first))
749 			return true;
750 	}
751 	return false;
752 }
753 
754 /**
755  * insert_extent - Safely insert a new idmap extent into struct uid_gid_map.
756  * Takes care to allocate a 4K block of memory if the number of mappings exceeds
757  * UID_GID_MAP_MAX_BASE_EXTENTS.
758  */
759 static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
760 {
761 	struct uid_gid_extent *dest;
762 
763 	if (map->nr_extents == UID_GID_MAP_MAX_BASE_EXTENTS) {
764 		struct uid_gid_extent *forward;
765 
766 		/* Allocate memory for 340 mappings. */
767 		forward = kmalloc_array(UID_GID_MAP_MAX_EXTENTS,
768 					sizeof(struct uid_gid_extent),
769 					GFP_KERNEL);
770 		if (!forward)
771 			return -ENOMEM;
772 
773 		/* Copy over memory. Only set up memory for the forward pointer.
774 		 * Defer the memory setup for the reverse pointer.
775 		 */
776 		memcpy(forward, map->extent,
777 		       map->nr_extents * sizeof(map->extent[0]));
778 
779 		map->forward = forward;
780 		map->reverse = NULL;
781 	}
782 
783 	if (map->nr_extents < UID_GID_MAP_MAX_BASE_EXTENTS)
784 		dest = &map->extent[map->nr_extents];
785 	else
786 		dest = &map->forward[map->nr_extents];
787 
788 	*dest = *extent;
789 	map->nr_extents++;
790 	return 0;
791 }
792 
793 /* cmp function to sort() forward mappings */
794 static int cmp_extents_forward(const void *a, const void *b)
795 {
796 	const struct uid_gid_extent *e1 = a;
797 	const struct uid_gid_extent *e2 = b;
798 
799 	if (e1->first < e2->first)
800 		return -1;
801 
802 	if (e1->first > e2->first)
803 		return 1;
804 
805 	return 0;
806 }
807 
808 /* cmp function to sort() reverse mappings */
809 static int cmp_extents_reverse(const void *a, const void *b)
810 {
811 	const struct uid_gid_extent *e1 = a;
812 	const struct uid_gid_extent *e2 = b;
813 
814 	if (e1->lower_first < e2->lower_first)
815 		return -1;
816 
817 	if (e1->lower_first > e2->lower_first)
818 		return 1;
819 
820 	return 0;
821 }
822 
823 /**
824  * sort_idmaps - Sorts an array of idmap entries.
825  * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
826  */
827 static int sort_idmaps(struct uid_gid_map *map)
828 {
829 	if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
830 		return 0;
831 
832 	/* Sort forward array. */
833 	sort(map->forward, map->nr_extents, sizeof(struct uid_gid_extent),
834 	     cmp_extents_forward, NULL);
835 
836 	/* Only copy the memory from forward we actually need. */
837 	map->reverse = kmemdup(map->forward,
838 			       map->nr_extents * sizeof(struct uid_gid_extent),
839 			       GFP_KERNEL);
840 	if (!map->reverse)
841 		return -ENOMEM;
842 
843 	/* Sort reverse array. */
844 	sort(map->reverse, map->nr_extents, sizeof(struct uid_gid_extent),
845 	     cmp_extents_reverse, NULL);
846 
847 	return 0;
848 }
849 
850 static ssize_t map_write(struct file *file, const char __user *buf,
851 			 size_t count, loff_t *ppos,
852 			 int cap_setid,
853 			 struct uid_gid_map *map,
854 			 struct uid_gid_map *parent_map)
855 {
856 	struct seq_file *seq = file->private_data;
857 	struct user_namespace *ns = seq->private;
858 	struct uid_gid_map new_map;
859 	unsigned idx;
860 	struct uid_gid_extent extent;
861 	char *kbuf = NULL, *pos, *next_line;
862 	ssize_t ret;
863 
864 	/* Only allow < page size writes at the beginning of the file */
865 	if ((*ppos != 0) || (count >= PAGE_SIZE))
866 		return -EINVAL;
867 
868 	/* Slurp in the user data */
869 	kbuf = memdup_user_nul(buf, count);
870 	if (IS_ERR(kbuf))
871 		return PTR_ERR(kbuf);
872 
873 	/*
874 	 * The userns_state_mutex serializes all writes to any given map.
875 	 *
876 	 * Any map is only ever written once.
877 	 *
878 	 * An id map fits within 1 cache line on most architectures.
879 	 *
880 	 * On read nothing needs to be done unless you are on an
881 	 * architecture with a crazy cache coherency model like alpha.
882 	 *
883 	 * There is a one time data dependency between reading the
884 	 * count of the extents and the values of the extents.  The
885 	 * desired behavior is to see the values of the extents that
886 	 * were written before the count of the extents.
887 	 *
888 	 * To achieve this smp_wmb() is used on guarantee the write
889 	 * order and smp_rmb() is guaranteed that we don't have crazy
890 	 * architectures returning stale data.
891 	 */
892 	mutex_lock(&userns_state_mutex);
893 
894 	memset(&new_map, 0, sizeof(struct uid_gid_map));
895 
896 	ret = -EPERM;
897 	/* Only allow one successful write to the map */
898 	if (map->nr_extents != 0)
899 		goto out;
900 
901 	/*
902 	 * Adjusting namespace settings requires capabilities on the target.
903 	 */
904 	if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
905 		goto out;
906 
907 	/* Parse the user data */
908 	ret = -EINVAL;
909 	pos = kbuf;
910 	for (; pos; pos = next_line) {
911 
912 		/* Find the end of line and ensure I don't look past it */
913 		next_line = strchr(pos, '\n');
914 		if (next_line) {
915 			*next_line = '\0';
916 			next_line++;
917 			if (*next_line == '\0')
918 				next_line = NULL;
919 		}
920 
921 		pos = skip_spaces(pos);
922 		extent.first = simple_strtoul(pos, &pos, 10);
923 		if (!isspace(*pos))
924 			goto out;
925 
926 		pos = skip_spaces(pos);
927 		extent.lower_first = simple_strtoul(pos, &pos, 10);
928 		if (!isspace(*pos))
929 			goto out;
930 
931 		pos = skip_spaces(pos);
932 		extent.count = simple_strtoul(pos, &pos, 10);
933 		if (*pos && !isspace(*pos))
934 			goto out;
935 
936 		/* Verify there is not trailing junk on the line */
937 		pos = skip_spaces(pos);
938 		if (*pos != '\0')
939 			goto out;
940 
941 		/* Verify we have been given valid starting values */
942 		if ((extent.first == (u32) -1) ||
943 		    (extent.lower_first == (u32) -1))
944 			goto out;
945 
946 		/* Verify count is not zero and does not cause the
947 		 * extent to wrap
948 		 */
949 		if ((extent.first + extent.count) <= extent.first)
950 			goto out;
951 		if ((extent.lower_first + extent.count) <=
952 		     extent.lower_first)
953 			goto out;
954 
955 		/* Do the ranges in extent overlap any previous extents? */
956 		if (mappings_overlap(&new_map, &extent))
957 			goto out;
958 
959 		if ((new_map.nr_extents + 1) == UID_GID_MAP_MAX_EXTENTS &&
960 		    (next_line != NULL))
961 			goto out;
962 
963 		ret = insert_extent(&new_map, &extent);
964 		if (ret < 0)
965 			goto out;
966 		ret = -EINVAL;
967 	}
968 	/* Be very certaint the new map actually exists */
969 	if (new_map.nr_extents == 0)
970 		goto out;
971 
972 	ret = -EPERM;
973 	/* Validate the user is allowed to use user id's mapped to. */
974 	if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
975 		goto out;
976 
977 	ret = -EPERM;
978 	/* Map the lower ids from the parent user namespace to the
979 	 * kernel global id space.
980 	 */
981 	for (idx = 0; idx < new_map.nr_extents; idx++) {
982 		struct uid_gid_extent *e;
983 		u32 lower_first;
984 
985 		if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
986 			e = &new_map.extent[idx];
987 		else
988 			e = &new_map.forward[idx];
989 
990 		lower_first = map_id_range_down(parent_map,
991 						e->lower_first,
992 						e->count);
993 
994 		/* Fail if we can not map the specified extent to
995 		 * the kernel global id space.
996 		 */
997 		if (lower_first == (u32) -1)
998 			goto out;
999 
1000 		e->lower_first = lower_first;
1001 	}
1002 
1003 	/*
1004 	 * If we want to use binary search for lookup, this clones the extent
1005 	 * array and sorts both copies.
1006 	 */
1007 	ret = sort_idmaps(&new_map);
1008 	if (ret < 0)
1009 		goto out;
1010 
1011 	/* Install the map */
1012 	if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
1013 		memcpy(map->extent, new_map.extent,
1014 		       new_map.nr_extents * sizeof(new_map.extent[0]));
1015 	} else {
1016 		map->forward = new_map.forward;
1017 		map->reverse = new_map.reverse;
1018 	}
1019 	smp_wmb();
1020 	map->nr_extents = new_map.nr_extents;
1021 
1022 	*ppos = count;
1023 	ret = count;
1024 out:
1025 	if (ret < 0 && new_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
1026 		kfree(new_map.forward);
1027 		kfree(new_map.reverse);
1028 		map->forward = NULL;
1029 		map->reverse = NULL;
1030 		map->nr_extents = 0;
1031 	}
1032 
1033 	mutex_unlock(&userns_state_mutex);
1034 	kfree(kbuf);
1035 	return ret;
1036 }
1037 
1038 ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
1039 			   size_t size, loff_t *ppos)
1040 {
1041 	struct seq_file *seq = file->private_data;
1042 	struct user_namespace *ns = seq->private;
1043 	struct user_namespace *seq_ns = seq_user_ns(seq);
1044 
1045 	if (!ns->parent)
1046 		return -EPERM;
1047 
1048 	if ((seq_ns != ns) && (seq_ns != ns->parent))
1049 		return -EPERM;
1050 
1051 	return map_write(file, buf, size, ppos, CAP_SETUID,
1052 			 &ns->uid_map, &ns->parent->uid_map);
1053 }
1054 
1055 ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
1056 			   size_t size, loff_t *ppos)
1057 {
1058 	struct seq_file *seq = file->private_data;
1059 	struct user_namespace *ns = seq->private;
1060 	struct user_namespace *seq_ns = seq_user_ns(seq);
1061 
1062 	if (!ns->parent)
1063 		return -EPERM;
1064 
1065 	if ((seq_ns != ns) && (seq_ns != ns->parent))
1066 		return -EPERM;
1067 
1068 	return map_write(file, buf, size, ppos, CAP_SETGID,
1069 			 &ns->gid_map, &ns->parent->gid_map);
1070 }
1071 
1072 ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
1073 			      size_t size, loff_t *ppos)
1074 {
1075 	struct seq_file *seq = file->private_data;
1076 	struct user_namespace *ns = seq->private;
1077 	struct user_namespace *seq_ns = seq_user_ns(seq);
1078 
1079 	if (!ns->parent)
1080 		return -EPERM;
1081 
1082 	if ((seq_ns != ns) && (seq_ns != ns->parent))
1083 		return -EPERM;
1084 
1085 	/* Anyone can set any valid project id no capability needed */
1086 	return map_write(file, buf, size, ppos, -1,
1087 			 &ns->projid_map, &ns->parent->projid_map);
1088 }
1089 
1090 static bool new_idmap_permitted(const struct file *file,
1091 				struct user_namespace *ns, int cap_setid,
1092 				struct uid_gid_map *new_map)
1093 {
1094 	const struct cred *cred = file->f_cred;
1095 	/* Don't allow mappings that would allow anything that wouldn't
1096 	 * be allowed without the establishment of unprivileged mappings.
1097 	 */
1098 	if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
1099 	    uid_eq(ns->owner, cred->euid)) {
1100 		u32 id = new_map->extent[0].lower_first;
1101 		if (cap_setid == CAP_SETUID) {
1102 			kuid_t uid = make_kuid(ns->parent, id);
1103 			if (uid_eq(uid, cred->euid))
1104 				return true;
1105 		} else if (cap_setid == CAP_SETGID) {
1106 			kgid_t gid = make_kgid(ns->parent, id);
1107 			if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
1108 			    gid_eq(gid, cred->egid))
1109 				return true;
1110 		}
1111 	}
1112 
1113 	/* Allow anyone to set a mapping that doesn't require privilege */
1114 	if (!cap_valid(cap_setid))
1115 		return true;
1116 
1117 	/* Allow the specified ids if we have the appropriate capability
1118 	 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
1119 	 * And the opener of the id file also had the approprpiate capability.
1120 	 */
1121 	if (ns_capable(ns->parent, cap_setid) &&
1122 	    file_ns_capable(file, ns->parent, cap_setid))
1123 		return true;
1124 
1125 	return false;
1126 }
1127 
1128 int proc_setgroups_show(struct seq_file *seq, void *v)
1129 {
1130 	struct user_namespace *ns = seq->private;
1131 	unsigned long userns_flags = READ_ONCE(ns->flags);
1132 
1133 	seq_printf(seq, "%s\n",
1134 		   (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
1135 		   "allow" : "deny");
1136 	return 0;
1137 }
1138 
1139 ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
1140 			     size_t count, loff_t *ppos)
1141 {
1142 	struct seq_file *seq = file->private_data;
1143 	struct user_namespace *ns = seq->private;
1144 	char kbuf[8], *pos;
1145 	bool setgroups_allowed;
1146 	ssize_t ret;
1147 
1148 	/* Only allow a very narrow range of strings to be written */
1149 	ret = -EINVAL;
1150 	if ((*ppos != 0) || (count >= sizeof(kbuf)))
1151 		goto out;
1152 
1153 	/* What was written? */
1154 	ret = -EFAULT;
1155 	if (copy_from_user(kbuf, buf, count))
1156 		goto out;
1157 	kbuf[count] = '\0';
1158 	pos = kbuf;
1159 
1160 	/* What is being requested? */
1161 	ret = -EINVAL;
1162 	if (strncmp(pos, "allow", 5) == 0) {
1163 		pos += 5;
1164 		setgroups_allowed = true;
1165 	}
1166 	else if (strncmp(pos, "deny", 4) == 0) {
1167 		pos += 4;
1168 		setgroups_allowed = false;
1169 	}
1170 	else
1171 		goto out;
1172 
1173 	/* Verify there is not trailing junk on the line */
1174 	pos = skip_spaces(pos);
1175 	if (*pos != '\0')
1176 		goto out;
1177 
1178 	ret = -EPERM;
1179 	mutex_lock(&userns_state_mutex);
1180 	if (setgroups_allowed) {
1181 		/* Enabling setgroups after setgroups has been disabled
1182 		 * is not allowed.
1183 		 */
1184 		if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
1185 			goto out_unlock;
1186 	} else {
1187 		/* Permanently disabling setgroups after setgroups has
1188 		 * been enabled by writing the gid_map is not allowed.
1189 		 */
1190 		if (ns->gid_map.nr_extents != 0)
1191 			goto out_unlock;
1192 		ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
1193 	}
1194 	mutex_unlock(&userns_state_mutex);
1195 
1196 	/* Report a successful write */
1197 	*ppos = count;
1198 	ret = count;
1199 out:
1200 	return ret;
1201 out_unlock:
1202 	mutex_unlock(&userns_state_mutex);
1203 	goto out;
1204 }
1205 
1206 bool userns_may_setgroups(const struct user_namespace *ns)
1207 {
1208 	bool allowed;
1209 
1210 	mutex_lock(&userns_state_mutex);
1211 	/* It is not safe to use setgroups until a gid mapping in
1212 	 * the user namespace has been established.
1213 	 */
1214 	allowed = ns->gid_map.nr_extents != 0;
1215 	/* Is setgroups allowed? */
1216 	allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
1217 	mutex_unlock(&userns_state_mutex);
1218 
1219 	return allowed;
1220 }
1221 
1222 /*
1223  * Returns true if @child is the same namespace or a descendant of
1224  * @ancestor.
1225  */
1226 bool in_userns(const struct user_namespace *ancestor,
1227 	       const struct user_namespace *child)
1228 {
1229 	const struct user_namespace *ns;
1230 	for (ns = child; ns->level > ancestor->level; ns = ns->parent)
1231 		;
1232 	return (ns == ancestor);
1233 }
1234 
1235 bool current_in_userns(const struct user_namespace *target_ns)
1236 {
1237 	return in_userns(target_ns, current_user_ns());
1238 }
1239 EXPORT_SYMBOL(current_in_userns);
1240 
1241 static inline struct user_namespace *to_user_ns(struct ns_common *ns)
1242 {
1243 	return container_of(ns, struct user_namespace, ns);
1244 }
1245 
1246 static struct ns_common *userns_get(struct task_struct *task)
1247 {
1248 	struct user_namespace *user_ns;
1249 
1250 	rcu_read_lock();
1251 	user_ns = get_user_ns(__task_cred(task)->user_ns);
1252 	rcu_read_unlock();
1253 
1254 	return user_ns ? &user_ns->ns : NULL;
1255 }
1256 
1257 static void userns_put(struct ns_common *ns)
1258 {
1259 	put_user_ns(to_user_ns(ns));
1260 }
1261 
1262 static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1263 {
1264 	struct user_namespace *user_ns = to_user_ns(ns);
1265 	struct cred *cred;
1266 
1267 	/* Don't allow gaining capabilities by reentering
1268 	 * the same user namespace.
1269 	 */
1270 	if (user_ns == current_user_ns())
1271 		return -EINVAL;
1272 
1273 	/* Tasks that share a thread group must share a user namespace */
1274 	if (!thread_group_empty(current))
1275 		return -EINVAL;
1276 
1277 	if (current->fs->users != 1)
1278 		return -EINVAL;
1279 
1280 	if (!ns_capable(user_ns, CAP_SYS_ADMIN))
1281 		return -EPERM;
1282 
1283 	cred = prepare_creds();
1284 	if (!cred)
1285 		return -ENOMEM;
1286 
1287 	put_user_ns(cred->user_ns);
1288 	set_cred_user_ns(cred, get_user_ns(user_ns));
1289 
1290 	return commit_creds(cred);
1291 }
1292 
1293 struct ns_common *ns_get_owner(struct ns_common *ns)
1294 {
1295 	struct user_namespace *my_user_ns = current_user_ns();
1296 	struct user_namespace *owner, *p;
1297 
1298 	/* See if the owner is in the current user namespace */
1299 	owner = p = ns->ops->owner(ns);
1300 	for (;;) {
1301 		if (!p)
1302 			return ERR_PTR(-EPERM);
1303 		if (p == my_user_ns)
1304 			break;
1305 		p = p->parent;
1306 	}
1307 
1308 	return &get_user_ns(owner)->ns;
1309 }
1310 
1311 static struct user_namespace *userns_owner(struct ns_common *ns)
1312 {
1313 	return to_user_ns(ns)->parent;
1314 }
1315 
1316 const struct proc_ns_operations userns_operations = {
1317 	.name		= "user",
1318 	.type		= CLONE_NEWUSER,
1319 	.get		= userns_get,
1320 	.put		= userns_put,
1321 	.install	= userns_install,
1322 	.owner		= userns_owner,
1323 	.get_parent	= ns_get_owner,
1324 };
1325 
1326 static __init int user_namespaces_init(void)
1327 {
1328 	user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
1329 	return 0;
1330 }
1331 subsys_initcall(user_namespaces_init);
1332