xref: /openbmc/linux/kernel/user.c (revision 9ac8d3fb)
1 /*
2  * The "user cache".
3  *
4  * (C) Copyright 1991-2000 Linus Torvalds
5  *
6  * We have a per-user structure to keep track of how many
7  * processes, files etc the user has claimed, in order to be
8  * able to have per-user limits for system resources.
9  */
10 
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
19 
20 struct user_namespace init_user_ns = {
21 	.kref = {
22 		.refcount	= ATOMIC_INIT(2),
23 	},
24 	.root_user = &root_user,
25 };
26 EXPORT_SYMBOL_GPL(init_user_ns);
27 
28 /*
29  * UID task count cache, to get fast user lookup in "alloc_uid"
30  * when changing user ID's (ie setuid() and friends).
31  */
32 
33 #define UIDHASH_MASK		(UIDHASH_SZ - 1)
34 #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
35 #define uidhashentry(ns, uid)	((ns)->uidhash_table + __uidhashfn((uid)))
36 
37 static struct kmem_cache *uid_cachep;
38 
39 /*
40  * The uidhash_lock is mostly taken from process context, but it is
41  * occasionally also taken from softirq/tasklet context, when
42  * task-structs get RCU-freed. Hence all locking must be softirq-safe.
43  * But free_uid() is also called with local interrupts disabled, and running
44  * local_bh_enable() with local interrupts disabled is an error - we'll run
45  * softirq callbacks, and they can unconditionally enable interrupts, and
46  * the caller of free_uid() didn't expect that..
47  */
48 static DEFINE_SPINLOCK(uidhash_lock);
49 
50 struct user_struct root_user = {
51 	.__count	= ATOMIC_INIT(1),
52 	.processes	= ATOMIC_INIT(1),
53 	.files		= ATOMIC_INIT(0),
54 	.sigpending	= ATOMIC_INIT(0),
55 	.locked_shm     = 0,
56 #ifdef CONFIG_USER_SCHED
57 	.tg		= &init_task_group,
58 #endif
59 };
60 
61 /*
62  * These routines must be called with the uidhash spinlock held!
63  */
64 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
65 {
66 	hlist_add_head(&up->uidhash_node, hashent);
67 }
68 
69 static void uid_hash_remove(struct user_struct *up)
70 {
71 	hlist_del_init(&up->uidhash_node);
72 }
73 
74 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
75 {
76 	struct user_struct *user;
77 	struct hlist_node *h;
78 
79 	hlist_for_each_entry(user, h, hashent, uidhash_node) {
80 		if (user->uid == uid) {
81 			atomic_inc(&user->__count);
82 			return user;
83 		}
84 	}
85 
86 	return NULL;
87 }
88 
89 #ifdef CONFIG_USER_SCHED
90 
91 static void sched_destroy_user(struct user_struct *up)
92 {
93 	sched_destroy_group(up->tg);
94 }
95 
96 static int sched_create_user(struct user_struct *up)
97 {
98 	int rc = 0;
99 
100 	up->tg = sched_create_group(&root_task_group);
101 	if (IS_ERR(up->tg))
102 		rc = -ENOMEM;
103 
104 	return rc;
105 }
106 
107 static void sched_switch_user(struct task_struct *p)
108 {
109 	sched_move_task(p);
110 }
111 
112 #else	/* CONFIG_USER_SCHED */
113 
114 static void sched_destroy_user(struct user_struct *up) { }
115 static int sched_create_user(struct user_struct *up) { return 0; }
116 static void sched_switch_user(struct task_struct *p) { }
117 
118 #endif	/* CONFIG_USER_SCHED */
119 
120 #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
121 
122 static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
123 static DEFINE_MUTEX(uids_mutex);
124 
125 static inline void uids_mutex_lock(void)
126 {
127 	mutex_lock(&uids_mutex);
128 }
129 
130 static inline void uids_mutex_unlock(void)
131 {
132 	mutex_unlock(&uids_mutex);
133 }
134 
135 /* uid directory attributes */
136 #ifdef CONFIG_FAIR_GROUP_SCHED
137 static ssize_t cpu_shares_show(struct kobject *kobj,
138 			       struct kobj_attribute *attr,
139 			       char *buf)
140 {
141 	struct user_struct *up = container_of(kobj, struct user_struct, kobj);
142 
143 	return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
144 }
145 
146 static ssize_t cpu_shares_store(struct kobject *kobj,
147 				struct kobj_attribute *attr,
148 				const char *buf, size_t size)
149 {
150 	struct user_struct *up = container_of(kobj, struct user_struct, kobj);
151 	unsigned long shares;
152 	int rc;
153 
154 	sscanf(buf, "%lu", &shares);
155 
156 	rc = sched_group_set_shares(up->tg, shares);
157 
158 	return (rc ? rc : size);
159 }
160 
161 static struct kobj_attribute cpu_share_attr =
162 	__ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
163 #endif
164 
165 #ifdef CONFIG_RT_GROUP_SCHED
166 static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
167 				   struct kobj_attribute *attr,
168 				   char *buf)
169 {
170 	struct user_struct *up = container_of(kobj, struct user_struct, kobj);
171 
172 	return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
173 }
174 
175 static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
176 				    struct kobj_attribute *attr,
177 				    const char *buf, size_t size)
178 {
179 	struct user_struct *up = container_of(kobj, struct user_struct, kobj);
180 	unsigned long rt_runtime;
181 	int rc;
182 
183 	sscanf(buf, "%ld", &rt_runtime);
184 
185 	rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
186 
187 	return (rc ? rc : size);
188 }
189 
190 static struct kobj_attribute cpu_rt_runtime_attr =
191 	__ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
192 
193 static ssize_t cpu_rt_period_show(struct kobject *kobj,
194 				   struct kobj_attribute *attr,
195 				   char *buf)
196 {
197 	struct user_struct *up = container_of(kobj, struct user_struct, kobj);
198 
199 	return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
200 }
201 
202 static ssize_t cpu_rt_period_store(struct kobject *kobj,
203 				    struct kobj_attribute *attr,
204 				    const char *buf, size_t size)
205 {
206 	struct user_struct *up = container_of(kobj, struct user_struct, kobj);
207 	unsigned long rt_period;
208 	int rc;
209 
210 	sscanf(buf, "%lu", &rt_period);
211 
212 	rc = sched_group_set_rt_period(up->tg, rt_period);
213 
214 	return (rc ? rc : size);
215 }
216 
217 static struct kobj_attribute cpu_rt_period_attr =
218 	__ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
219 #endif
220 
221 /* default attributes per uid directory */
222 static struct attribute *uids_attributes[] = {
223 #ifdef CONFIG_FAIR_GROUP_SCHED
224 	&cpu_share_attr.attr,
225 #endif
226 #ifdef CONFIG_RT_GROUP_SCHED
227 	&cpu_rt_runtime_attr.attr,
228 	&cpu_rt_period_attr.attr,
229 #endif
230 	NULL
231 };
232 
233 /* the lifetime of user_struct is not managed by the core (now) */
234 static void uids_release(struct kobject *kobj)
235 {
236 	return;
237 }
238 
239 static struct kobj_type uids_ktype = {
240 	.sysfs_ops = &kobj_sysfs_ops,
241 	.default_attrs = uids_attributes,
242 	.release = uids_release,
243 };
244 
245 /* create /sys/kernel/uids/<uid>/cpu_share file for this user */
246 static int uids_user_create(struct user_struct *up)
247 {
248 	struct kobject *kobj = &up->kobj;
249 	int error;
250 
251 	memset(kobj, 0, sizeof(struct kobject));
252 	kobj->kset = uids_kset;
253 	error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
254 	if (error) {
255 		kobject_put(kobj);
256 		goto done;
257 	}
258 
259 	kobject_uevent(kobj, KOBJ_ADD);
260 done:
261 	return error;
262 }
263 
264 /* create these entries in sysfs:
265  * 	"/sys/kernel/uids" directory
266  * 	"/sys/kernel/uids/0" directory (for root user)
267  * 	"/sys/kernel/uids/0/cpu_share" file (for root user)
268  */
269 int __init uids_sysfs_init(void)
270 {
271 	uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
272 	if (!uids_kset)
273 		return -ENOMEM;
274 
275 	return uids_user_create(&root_user);
276 }
277 
278 /* work function to remove sysfs directory for a user and free up
279  * corresponding structures.
280  */
281 static void remove_user_sysfs_dir(struct work_struct *w)
282 {
283 	struct user_struct *up = container_of(w, struct user_struct, work);
284 	unsigned long flags;
285 	int remove_user = 0;
286 
287 	/* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
288 	 * atomic.
289 	 */
290 	uids_mutex_lock();
291 
292 	local_irq_save(flags);
293 
294 	if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
295 		uid_hash_remove(up);
296 		remove_user = 1;
297 		spin_unlock_irqrestore(&uidhash_lock, flags);
298 	} else {
299 		local_irq_restore(flags);
300 	}
301 
302 	if (!remove_user)
303 		goto done;
304 
305 	kobject_uevent(&up->kobj, KOBJ_REMOVE);
306 	kobject_del(&up->kobj);
307 	kobject_put(&up->kobj);
308 
309 	sched_destroy_user(up);
310 	key_put(up->uid_keyring);
311 	key_put(up->session_keyring);
312 	kmem_cache_free(uid_cachep, up);
313 
314 done:
315 	uids_mutex_unlock();
316 }
317 
318 /* IRQs are disabled and uidhash_lock is held upon function entry.
319  * IRQ state (as stored in flags) is restored and uidhash_lock released
320  * upon function exit.
321  */
322 static inline void free_user(struct user_struct *up, unsigned long flags)
323 {
324 	/* restore back the count */
325 	atomic_inc(&up->__count);
326 	spin_unlock_irqrestore(&uidhash_lock, flags);
327 
328 	INIT_WORK(&up->work, remove_user_sysfs_dir);
329 	schedule_work(&up->work);
330 }
331 
332 #else	/* CONFIG_USER_SCHED && CONFIG_SYSFS */
333 
334 int uids_sysfs_init(void) { return 0; }
335 static inline int uids_user_create(struct user_struct *up) { return 0; }
336 static inline void uids_mutex_lock(void) { }
337 static inline void uids_mutex_unlock(void) { }
338 
339 /* IRQs are disabled and uidhash_lock is held upon function entry.
340  * IRQ state (as stored in flags) is restored and uidhash_lock released
341  * upon function exit.
342  */
343 static inline void free_user(struct user_struct *up, unsigned long flags)
344 {
345 	uid_hash_remove(up);
346 	spin_unlock_irqrestore(&uidhash_lock, flags);
347 	sched_destroy_user(up);
348 	key_put(up->uid_keyring);
349 	key_put(up->session_keyring);
350 	kmem_cache_free(uid_cachep, up);
351 }
352 
353 #endif
354 
355 /*
356  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
357  * caller must undo that ref with free_uid().
358  *
359  * If the user_struct could not be found, return NULL.
360  */
361 struct user_struct *find_user(uid_t uid)
362 {
363 	struct user_struct *ret;
364 	unsigned long flags;
365 	struct user_namespace *ns = current->nsproxy->user_ns;
366 
367 	spin_lock_irqsave(&uidhash_lock, flags);
368 	ret = uid_hash_find(uid, uidhashentry(ns, uid));
369 	spin_unlock_irqrestore(&uidhash_lock, flags);
370 	return ret;
371 }
372 
373 void free_uid(struct user_struct *up)
374 {
375 	unsigned long flags;
376 
377 	if (!up)
378 		return;
379 
380 	local_irq_save(flags);
381 	if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
382 		free_user(up, flags);
383 	else
384 		local_irq_restore(flags);
385 }
386 
387 struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
388 {
389 	struct hlist_head *hashent = uidhashentry(ns, uid);
390 	struct user_struct *up, *new;
391 
392 	/* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
393 	 * atomic.
394 	 */
395 	uids_mutex_lock();
396 
397 	spin_lock_irq(&uidhash_lock);
398 	up = uid_hash_find(uid, hashent);
399 	spin_unlock_irq(&uidhash_lock);
400 
401 	if (!up) {
402 		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
403 		if (!new)
404 			goto out_unlock;
405 
406 		new->uid = uid;
407 		atomic_set(&new->__count, 1);
408 
409 		if (sched_create_user(new) < 0)
410 			goto out_free_user;
411 
412 		if (uids_user_create(new))
413 			goto out_destoy_sched;
414 
415 		/*
416 		 * Before adding this, check whether we raced
417 		 * on adding the same user already..
418 		 */
419 		spin_lock_irq(&uidhash_lock);
420 		up = uid_hash_find(uid, hashent);
421 		if (up) {
422 			/* This case is not possible when CONFIG_USER_SCHED
423 			 * is defined, since we serialize alloc_uid() using
424 			 * uids_mutex. Hence no need to call
425 			 * sched_destroy_user() or remove_user_sysfs_dir().
426 			 */
427 			key_put(new->uid_keyring);
428 			key_put(new->session_keyring);
429 			kmem_cache_free(uid_cachep, new);
430 		} else {
431 			uid_hash_insert(new, hashent);
432 			up = new;
433 		}
434 		spin_unlock_irq(&uidhash_lock);
435 
436 	}
437 
438 	uids_mutex_unlock();
439 
440 	return up;
441 
442 out_destoy_sched:
443 	sched_destroy_user(new);
444 out_free_user:
445 	kmem_cache_free(uid_cachep, new);
446 out_unlock:
447 	uids_mutex_unlock();
448 	return NULL;
449 }
450 
451 void switch_uid(struct user_struct *new_user)
452 {
453 	struct user_struct *old_user;
454 
455 	/* What if a process setreuid()'s and this brings the
456 	 * new uid over his NPROC rlimit?  We can check this now
457 	 * cheaply with the new uid cache, so if it matters
458 	 * we should be checking for it.  -DaveM
459 	 */
460 	old_user = current->user;
461 	atomic_inc(&new_user->processes);
462 	atomic_dec(&old_user->processes);
463 	switch_uid_keyring(new_user);
464 	current->user = new_user;
465 	sched_switch_user(current);
466 
467 	/*
468 	 * We need to synchronize with __sigqueue_alloc()
469 	 * doing a get_uid(p->user).. If that saw the old
470 	 * user value, we need to wait until it has exited
471 	 * its critical region before we can free the old
472 	 * structure.
473 	 */
474 	smp_mb();
475 	spin_unlock_wait(&current->sighand->siglock);
476 
477 	free_uid(old_user);
478 	suid_keys(current);
479 }
480 
481 #ifdef CONFIG_USER_NS
482 void release_uids(struct user_namespace *ns)
483 {
484 	int i;
485 	unsigned long flags;
486 	struct hlist_head *head;
487 	struct hlist_node *nd;
488 
489 	spin_lock_irqsave(&uidhash_lock, flags);
490 	/*
491 	 * collapse the chains so that the user_struct-s will
492 	 * be still alive, but not in hashes. subsequent free_uid()
493 	 * will free them.
494 	 */
495 	for (i = 0; i < UIDHASH_SZ; i++) {
496 		head = ns->uidhash_table + i;
497 		while (!hlist_empty(head)) {
498 			nd = head->first;
499 			hlist_del_init(nd);
500 		}
501 	}
502 	spin_unlock_irqrestore(&uidhash_lock, flags);
503 
504 	free_uid(ns->root_user);
505 }
506 #endif
507 
508 static int __init uid_cache_init(void)
509 {
510 	int n;
511 
512 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
513 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
514 
515 	for(n = 0; n < UIDHASH_SZ; ++n)
516 		INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
517 
518 	/* Insert the root user immediately (init already runs as root) */
519 	spin_lock_irq(&uidhash_lock);
520 	uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
521 	spin_unlock_irq(&uidhash_lock);
522 
523 	return 0;
524 }
525 
526 module_init(uid_cache_init);
527