xref: /openbmc/linux/kernel/user.c (revision d5cb9783536a41df9f9cba5b0a1d78047ed787f7)
1 /*
2  * The "user cache".
3  *
4  * (C) Copyright 1991-2000 Linus Torvalds
5  *
6  * We have a per-user structure to keep track of how many
7  * processes, files etc the user has claimed, in order to be
8  * able to have per-user limits for system resources.
9  */
10 
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 
17 /*
18  * UID task count cache, to get fast user lookup in "alloc_uid"
19  * when changing user ID's (ie setuid() and friends).
20  */
21 
22 #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8)
23 #define UIDHASH_SZ		(1 << UIDHASH_BITS)
24 #define UIDHASH_MASK		(UIDHASH_SZ - 1)
25 #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
26 #define uidhashentry(uid)	(uidhash_table + __uidhashfn((uid)))
27 
28 static kmem_cache_t *uid_cachep;
29 static struct list_head uidhash_table[UIDHASH_SZ];
30 static DEFINE_SPINLOCK(uidhash_lock);
31 
32 struct user_struct root_user = {
33 	.__count	= ATOMIC_INIT(1),
34 	.processes	= ATOMIC_INIT(1),
35 	.files		= ATOMIC_INIT(0),
36 	.sigpending	= ATOMIC_INIT(0),
37 	.mq_bytes	= 0,
38 	.locked_shm     = 0,
39 #ifdef CONFIG_KEYS
40 	.uid_keyring	= &root_user_keyring,
41 	.session_keyring = &root_session_keyring,
42 #endif
43 };
44 
45 /*
46  * These routines must be called with the uidhash spinlock held!
47  */
48 static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent)
49 {
50 	list_add(&up->uidhash_list, hashent);
51 }
52 
53 static inline void uid_hash_remove(struct user_struct *up)
54 {
55 	list_del(&up->uidhash_list);
56 }
57 
58 static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent)
59 {
60 	struct list_head *up;
61 
62 	list_for_each(up, hashent) {
63 		struct user_struct *user;
64 
65 		user = list_entry(up, struct user_struct, uidhash_list);
66 
67 		if(user->uid == uid) {
68 			atomic_inc(&user->__count);
69 			return user;
70 		}
71 	}
72 
73 	return NULL;
74 }
75 
76 /*
77  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
78  * caller must undo that ref with free_uid().
79  *
80  * If the user_struct could not be found, return NULL.
81  */
82 struct user_struct *find_user(uid_t uid)
83 {
84 	struct user_struct *ret;
85 
86 	spin_lock(&uidhash_lock);
87 	ret = uid_hash_find(uid, uidhashentry(uid));
88 	spin_unlock(&uidhash_lock);
89 	return ret;
90 }
91 
92 void free_uid(struct user_struct *up)
93 {
94 	if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
95 		uid_hash_remove(up);
96 		key_put(up->uid_keyring);
97 		key_put(up->session_keyring);
98 		kmem_cache_free(uid_cachep, up);
99 		spin_unlock(&uidhash_lock);
100 	}
101 }
102 
103 struct user_struct * alloc_uid(uid_t uid)
104 {
105 	struct list_head *hashent = uidhashentry(uid);
106 	struct user_struct *up;
107 
108 	spin_lock(&uidhash_lock);
109 	up = uid_hash_find(uid, hashent);
110 	spin_unlock(&uidhash_lock);
111 
112 	if (!up) {
113 		struct user_struct *new;
114 
115 		new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
116 		if (!new)
117 			return NULL;
118 		new->uid = uid;
119 		atomic_set(&new->__count, 1);
120 		atomic_set(&new->processes, 0);
121 		atomic_set(&new->files, 0);
122 		atomic_set(&new->sigpending, 0);
123 #ifdef CONFIG_INOTIFY
124 		atomic_set(&new->inotify_watches, 0);
125 		atomic_set(&new->inotify_devs, 0);
126 #endif
127 
128 		new->mq_bytes = 0;
129 		new->locked_shm = 0;
130 
131 		if (alloc_uid_keyring(new) < 0) {
132 			kmem_cache_free(uid_cachep, new);
133 			return NULL;
134 		}
135 
136 		/*
137 		 * Before adding this, check whether we raced
138 		 * on adding the same user already..
139 		 */
140 		spin_lock(&uidhash_lock);
141 		up = uid_hash_find(uid, hashent);
142 		if (up) {
143 			key_put(new->uid_keyring);
144 			key_put(new->session_keyring);
145 			kmem_cache_free(uid_cachep, new);
146 		} else {
147 			uid_hash_insert(new, hashent);
148 			up = new;
149 		}
150 		spin_unlock(&uidhash_lock);
151 
152 	}
153 	return up;
154 }
155 
156 void switch_uid(struct user_struct *new_user)
157 {
158 	struct user_struct *old_user;
159 
160 	/* What if a process setreuid()'s and this brings the
161 	 * new uid over his NPROC rlimit?  We can check this now
162 	 * cheaply with the new uid cache, so if it matters
163 	 * we should be checking for it.  -DaveM
164 	 */
165 	old_user = current->user;
166 	atomic_inc(&new_user->processes);
167 	atomic_dec(&old_user->processes);
168 	switch_uid_keyring(new_user);
169 	current->user = new_user;
170 	free_uid(old_user);
171 	suid_keys(current);
172 }
173 
174 
175 static int __init uid_cache_init(void)
176 {
177 	int n;
178 
179 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
180 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
181 
182 	for(n = 0; n < UIDHASH_SZ; ++n)
183 		INIT_LIST_HEAD(uidhash_table + n);
184 
185 	/* Insert the root user immediately (init already runs as root) */
186 	spin_lock(&uidhash_lock);
187 	uid_hash_insert(&root_user, uidhashentry(0));
188 	spin_unlock(&uidhash_lock);
189 
190 	return 0;
191 }
192 
193 module_init(uid_cache_init);
194