xref: /openbmc/linux/kernel/user.c (revision c21b37f6)
1 /*
2  * The "user cache".
3  *
4  * (C) Copyright 1991-2000 Linus Torvalds
5  *
6  * We have a per-user structure to keep track of how many
7  * processes, files etc the user has claimed, in order to be
8  * able to have per-user limits for system resources.
9  */
10 
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
19 
20 /*
21  * UID task count cache, to get fast user lookup in "alloc_uid"
22  * when changing user ID's (ie setuid() and friends).
23  */
24 
25 #define UIDHASH_MASK		(UIDHASH_SZ - 1)
26 #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
27 #define uidhashentry(ns, uid)	((ns)->uidhash_table + __uidhashfn((uid)))
28 
29 static struct kmem_cache *uid_cachep;
30 
31 /*
32  * The uidhash_lock is mostly taken from process context, but it is
33  * occasionally also taken from softirq/tasklet context, when
34  * task-structs get RCU-freed. Hence all locking must be softirq-safe.
35  * But free_uid() is also called with local interrupts disabled, and running
36  * local_bh_enable() with local interrupts disabled is an error - we'll run
37  * softirq callbacks, and they can unconditionally enable interrupts, and
38  * the caller of free_uid() didn't expect that..
39  */
40 static DEFINE_SPINLOCK(uidhash_lock);
41 
42 struct user_struct root_user = {
43 	.__count	= ATOMIC_INIT(1),
44 	.processes	= ATOMIC_INIT(1),
45 	.files		= ATOMIC_INIT(0),
46 	.sigpending	= ATOMIC_INIT(0),
47 	.mq_bytes	= 0,
48 	.locked_shm     = 0,
49 #ifdef CONFIG_KEYS
50 	.uid_keyring	= &root_user_keyring,
51 	.session_keyring = &root_session_keyring,
52 #endif
53 };
54 
55 /*
56  * These routines must be called with the uidhash spinlock held!
57  */
58 static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent)
59 {
60 	list_add(&up->uidhash_list, hashent);
61 }
62 
63 static inline void uid_hash_remove(struct user_struct *up)
64 {
65 	list_del(&up->uidhash_list);
66 }
67 
68 static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent)
69 {
70 	struct list_head *up;
71 
72 	list_for_each(up, hashent) {
73 		struct user_struct *user;
74 
75 		user = list_entry(up, struct user_struct, uidhash_list);
76 
77 		if(user->uid == uid) {
78 			atomic_inc(&user->__count);
79 			return user;
80 		}
81 	}
82 
83 	return NULL;
84 }
85 
86 /*
87  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
88  * caller must undo that ref with free_uid().
89  *
90  * If the user_struct could not be found, return NULL.
91  */
92 struct user_struct *find_user(uid_t uid)
93 {
94 	struct user_struct *ret;
95 	unsigned long flags;
96 	struct user_namespace *ns = current->nsproxy->user_ns;
97 
98 	spin_lock_irqsave(&uidhash_lock, flags);
99 	ret = uid_hash_find(uid, uidhashentry(ns, uid));
100 	spin_unlock_irqrestore(&uidhash_lock, flags);
101 	return ret;
102 }
103 
104 void free_uid(struct user_struct *up)
105 {
106 	unsigned long flags;
107 
108 	if (!up)
109 		return;
110 
111 	local_irq_save(flags);
112 	if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
113 		uid_hash_remove(up);
114 		spin_unlock_irqrestore(&uidhash_lock, flags);
115 		key_put(up->uid_keyring);
116 		key_put(up->session_keyring);
117 		kmem_cache_free(uid_cachep, up);
118 	} else {
119 		local_irq_restore(flags);
120 	}
121 }
122 
123 struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
124 {
125 	struct list_head *hashent = uidhashentry(ns, uid);
126 	struct user_struct *up;
127 
128 	spin_lock_irq(&uidhash_lock);
129 	up = uid_hash_find(uid, hashent);
130 	spin_unlock_irq(&uidhash_lock);
131 
132 	if (!up) {
133 		struct user_struct *new;
134 
135 		new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
136 		if (!new)
137 			return NULL;
138 		new->uid = uid;
139 		atomic_set(&new->__count, 1);
140 		atomic_set(&new->processes, 0);
141 		atomic_set(&new->files, 0);
142 		atomic_set(&new->sigpending, 0);
143 #ifdef CONFIG_INOTIFY_USER
144 		atomic_set(&new->inotify_watches, 0);
145 		atomic_set(&new->inotify_devs, 0);
146 #endif
147 
148 		new->mq_bytes = 0;
149 		new->locked_shm = 0;
150 
151 		if (alloc_uid_keyring(new, current) < 0) {
152 			kmem_cache_free(uid_cachep, new);
153 			return NULL;
154 		}
155 
156 		/*
157 		 * Before adding this, check whether we raced
158 		 * on adding the same user already..
159 		 */
160 		spin_lock_irq(&uidhash_lock);
161 		up = uid_hash_find(uid, hashent);
162 		if (up) {
163 			key_put(new->uid_keyring);
164 			key_put(new->session_keyring);
165 			kmem_cache_free(uid_cachep, new);
166 		} else {
167 			uid_hash_insert(new, hashent);
168 			up = new;
169 		}
170 		spin_unlock_irq(&uidhash_lock);
171 
172 	}
173 	return up;
174 }
175 
176 void switch_uid(struct user_struct *new_user)
177 {
178 	struct user_struct *old_user;
179 
180 	/* What if a process setreuid()'s and this brings the
181 	 * new uid over his NPROC rlimit?  We can check this now
182 	 * cheaply with the new uid cache, so if it matters
183 	 * we should be checking for it.  -DaveM
184 	 */
185 	old_user = current->user;
186 	atomic_inc(&new_user->processes);
187 	atomic_dec(&old_user->processes);
188 	switch_uid_keyring(new_user);
189 	current->user = new_user;
190 
191 	/*
192 	 * We need to synchronize with __sigqueue_alloc()
193 	 * doing a get_uid(p->user).. If that saw the old
194 	 * user value, we need to wait until it has exited
195 	 * its critical region before we can free the old
196 	 * structure.
197 	 */
198 	smp_mb();
199 	spin_unlock_wait(&current->sighand->siglock);
200 
201 	free_uid(old_user);
202 	suid_keys(current);
203 }
204 
205 
206 static int __init uid_cache_init(void)
207 {
208 	int n;
209 
210 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
211 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
212 
213 	for(n = 0; n < UIDHASH_SZ; ++n)
214 		INIT_LIST_HEAD(init_user_ns.uidhash_table + n);
215 
216 	/* Insert the root user immediately (init already runs as root) */
217 	spin_lock_irq(&uidhash_lock);
218 	uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
219 	spin_unlock_irq(&uidhash_lock);
220 
221 	return 0;
222 }
223 
224 module_init(uid_cache_init);
225