xref: /openbmc/linux/kernel/pid.c (revision c21b37f6)
1 /*
2  * Generic pidhash and scalable, time-bounded PID allocator
3  *
4  * (C) 2002-2003 William Irwin, IBM
5  * (C) 2004 William Irwin, Oracle
6  * (C) 2002-2004 Ingo Molnar, Red Hat
7  *
8  * pid-structures are backing objects for tasks sharing a given ID to chain
9  * against. There is very little to them aside from hashing them and
10  * parking tasks using given ID's on a list.
11  *
12  * The hash is always changed with the tasklist_lock write-acquired,
13  * and the hash is only accessed with the tasklist_lock at least
14  * read-acquired, so there's no additional SMP locking needed here.
15  *
16  * We have a list of bitmap pages, which bitmaps represent the PID space.
17  * Allocating and freeing PIDs is completely lockless. The worst-case
18  * allocation scenario when all but one out of 1 million PIDs possible are
19  * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20  * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21  */
22 
23 #include <linux/mm.h>
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/bootmem.h>
28 #include <linux/hash.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/init_task.h>
31 
32 #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
33 static struct hlist_head *pid_hash;
34 static int pidhash_shift;
35 static struct kmem_cache *pid_cachep;
36 struct pid init_struct_pid = INIT_STRUCT_PID;
37 
38 int pid_max = PID_MAX_DEFAULT;
39 
40 #define RESERVED_PIDS		300
41 
42 int pid_max_min = RESERVED_PIDS + 1;
43 int pid_max_max = PID_MAX_LIMIT;
44 
45 #define BITS_PER_PAGE		(PAGE_SIZE*8)
46 #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE-1)
47 
48 static inline int mk_pid(struct pid_namespace *pid_ns,
49 		struct pidmap *map, int off)
50 {
51 	return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
52 }
53 
54 #define find_next_offset(map, off)					\
55 		find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
56 
57 /*
58  * PID-map pages start out as NULL, they get allocated upon
59  * first use and are never deallocated. This way a low pid_max
60  * value does not cause lots of bitmaps to be allocated, but
61  * the scheme scales to up to 4 million PIDs, runtime.
62  */
63 struct pid_namespace init_pid_ns = {
64 	.kref = {
65 		.refcount       = ATOMIC_INIT(2),
66 	},
67 	.pidmap = {
68 		[ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
69 	},
70 	.last_pid = 0,
71 	.child_reaper = &init_task
72 };
73 
74 /*
75  * Note: disable interrupts while the pidmap_lock is held as an
76  * interrupt might come in and do read_lock(&tasklist_lock).
77  *
78  * If we don't disable interrupts there is a nasty deadlock between
79  * detach_pid()->free_pid() and another cpu that does
80  * spin_lock(&pidmap_lock) followed by an interrupt routine that does
81  * read_lock(&tasklist_lock);
82  *
83  * After we clean up the tasklist_lock and know there are no
84  * irq handlers that take it we can leave the interrupts enabled.
85  * For now it is easier to be safe than to prove it can't happen.
86  */
87 
88 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
89 
90 static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid)
91 {
92 	struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
93 	int offset = pid & BITS_PER_PAGE_MASK;
94 
95 	clear_bit(offset, map->page);
96 	atomic_inc(&map->nr_free);
97 }
98 
99 static int alloc_pidmap(struct pid_namespace *pid_ns)
100 {
101 	int i, offset, max_scan, pid, last = pid_ns->last_pid;
102 	struct pidmap *map;
103 
104 	pid = last + 1;
105 	if (pid >= pid_max)
106 		pid = RESERVED_PIDS;
107 	offset = pid & BITS_PER_PAGE_MASK;
108 	map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
109 	max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
110 	for (i = 0; i <= max_scan; ++i) {
111 		if (unlikely(!map->page)) {
112 			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
113 			/*
114 			 * Free the page if someone raced with us
115 			 * installing it:
116 			 */
117 			spin_lock_irq(&pidmap_lock);
118 			if (map->page)
119 				kfree(page);
120 			else
121 				map->page = page;
122 			spin_unlock_irq(&pidmap_lock);
123 			if (unlikely(!map->page))
124 				break;
125 		}
126 		if (likely(atomic_read(&map->nr_free))) {
127 			do {
128 				if (!test_and_set_bit(offset, map->page)) {
129 					atomic_dec(&map->nr_free);
130 					pid_ns->last_pid = pid;
131 					return pid;
132 				}
133 				offset = find_next_offset(map, offset);
134 				pid = mk_pid(pid_ns, map, offset);
135 			/*
136 			 * find_next_offset() found a bit, the pid from it
137 			 * is in-bounds, and if we fell back to the last
138 			 * bitmap block and the final block was the same
139 			 * as the starting point, pid is before last_pid.
140 			 */
141 			} while (offset < BITS_PER_PAGE && pid < pid_max &&
142 					(i != max_scan || pid < last ||
143 					    !((last+1) & BITS_PER_PAGE_MASK)));
144 		}
145 		if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
146 			++map;
147 			offset = 0;
148 		} else {
149 			map = &pid_ns->pidmap[0];
150 			offset = RESERVED_PIDS;
151 			if (unlikely(last == offset))
152 				break;
153 		}
154 		pid = mk_pid(pid_ns, map, offset);
155 	}
156 	return -1;
157 }
158 
159 static int next_pidmap(struct pid_namespace *pid_ns, int last)
160 {
161 	int offset;
162 	struct pidmap *map, *end;
163 
164 	offset = (last + 1) & BITS_PER_PAGE_MASK;
165 	map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
166 	end = &pid_ns->pidmap[PIDMAP_ENTRIES];
167 	for (; map < end; map++, offset = 0) {
168 		if (unlikely(!map->page))
169 			continue;
170 		offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
171 		if (offset < BITS_PER_PAGE)
172 			return mk_pid(pid_ns, map, offset);
173 	}
174 	return -1;
175 }
176 
177 fastcall void put_pid(struct pid *pid)
178 {
179 	if (!pid)
180 		return;
181 	if ((atomic_read(&pid->count) == 1) ||
182 	     atomic_dec_and_test(&pid->count))
183 		kmem_cache_free(pid_cachep, pid);
184 }
185 EXPORT_SYMBOL_GPL(put_pid);
186 
187 static void delayed_put_pid(struct rcu_head *rhp)
188 {
189 	struct pid *pid = container_of(rhp, struct pid, rcu);
190 	put_pid(pid);
191 }
192 
193 fastcall void free_pid(struct pid *pid)
194 {
195 	/* We can be called with write_lock_irq(&tasklist_lock) held */
196 	unsigned long flags;
197 
198 	spin_lock_irqsave(&pidmap_lock, flags);
199 	hlist_del_rcu(&pid->pid_chain);
200 	spin_unlock_irqrestore(&pidmap_lock, flags);
201 
202 	free_pidmap(&init_pid_ns, pid->nr);
203 	call_rcu(&pid->rcu, delayed_put_pid);
204 }
205 
206 struct pid *alloc_pid(void)
207 {
208 	struct pid *pid;
209 	enum pid_type type;
210 	int nr = -1;
211 
212 	pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL);
213 	if (!pid)
214 		goto out;
215 
216 	nr = alloc_pidmap(current->nsproxy->pid_ns);
217 	if (nr < 0)
218 		goto out_free;
219 
220 	atomic_set(&pid->count, 1);
221 	pid->nr = nr;
222 	for (type = 0; type < PIDTYPE_MAX; ++type)
223 		INIT_HLIST_HEAD(&pid->tasks[type]);
224 
225 	spin_lock_irq(&pidmap_lock);
226 	hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]);
227 	spin_unlock_irq(&pidmap_lock);
228 
229 out:
230 	return pid;
231 
232 out_free:
233 	kmem_cache_free(pid_cachep, pid);
234 	pid = NULL;
235 	goto out;
236 }
237 
238 struct pid * fastcall find_pid(int nr)
239 {
240 	struct hlist_node *elem;
241 	struct pid *pid;
242 
243 	hlist_for_each_entry_rcu(pid, elem,
244 			&pid_hash[pid_hashfn(nr)], pid_chain) {
245 		if (pid->nr == nr)
246 			return pid;
247 	}
248 	return NULL;
249 }
250 EXPORT_SYMBOL_GPL(find_pid);
251 
252 /*
253  * attach_pid() must be called with the tasklist_lock write-held.
254  */
255 int fastcall attach_pid(struct task_struct *task, enum pid_type type,
256 		struct pid *pid)
257 {
258 	struct pid_link *link;
259 
260 	link = &task->pids[type];
261 	link->pid = pid;
262 	hlist_add_head_rcu(&link->node, &pid->tasks[type]);
263 
264 	return 0;
265 }
266 
267 void fastcall detach_pid(struct task_struct *task, enum pid_type type)
268 {
269 	struct pid_link *link;
270 	struct pid *pid;
271 	int tmp;
272 
273 	link = &task->pids[type];
274 	pid = link->pid;
275 
276 	hlist_del_rcu(&link->node);
277 	link->pid = NULL;
278 
279 	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
280 		if (!hlist_empty(&pid->tasks[tmp]))
281 			return;
282 
283 	free_pid(pid);
284 }
285 
286 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
287 void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
288 			   enum pid_type type)
289 {
290 	new->pids[type].pid = old->pids[type].pid;
291 	hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
292 	old->pids[type].pid = NULL;
293 }
294 
295 struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
296 {
297 	struct task_struct *result = NULL;
298 	if (pid) {
299 		struct hlist_node *first;
300 		first = rcu_dereference(pid->tasks[type].first);
301 		if (first)
302 			result = hlist_entry(first, struct task_struct, pids[(type)].node);
303 	}
304 	return result;
305 }
306 
307 /*
308  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
309  */
310 struct task_struct *find_task_by_pid_type(int type, int nr)
311 {
312 	return pid_task(find_pid(nr), type);
313 }
314 
315 EXPORT_SYMBOL(find_task_by_pid_type);
316 
317 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
318 {
319 	struct pid *pid;
320 	rcu_read_lock();
321 	pid = get_pid(task->pids[type].pid);
322 	rcu_read_unlock();
323 	return pid;
324 }
325 
326 struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
327 {
328 	struct task_struct *result;
329 	rcu_read_lock();
330 	result = pid_task(pid, type);
331 	if (result)
332 		get_task_struct(result);
333 	rcu_read_unlock();
334 	return result;
335 }
336 
337 struct pid *find_get_pid(pid_t nr)
338 {
339 	struct pid *pid;
340 
341 	rcu_read_lock();
342 	pid = get_pid(find_pid(nr));
343 	rcu_read_unlock();
344 
345 	return pid;
346 }
347 
348 /*
349  * Used by proc to find the first pid that is greater then or equal to nr.
350  *
351  * If there is a pid at nr this function is exactly the same as find_pid.
352  */
353 struct pid *find_ge_pid(int nr)
354 {
355 	struct pid *pid;
356 
357 	do {
358 		pid = find_pid(nr);
359 		if (pid)
360 			break;
361 		nr = next_pidmap(current->nsproxy->pid_ns, nr);
362 	} while (nr > 0);
363 
364 	return pid;
365 }
366 EXPORT_SYMBOL_GPL(find_get_pid);
367 
368 struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
369 {
370 	BUG_ON(!old_ns);
371 	get_pid_ns(old_ns);
372 	return old_ns;
373 }
374 
375 void free_pid_ns(struct kref *kref)
376 {
377 	struct pid_namespace *ns;
378 
379 	ns = container_of(kref, struct pid_namespace, kref);
380 	kfree(ns);
381 }
382 
383 /*
384  * The pid hash table is scaled according to the amount of memory in the
385  * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
386  * more.
387  */
388 void __init pidhash_init(void)
389 {
390 	int i, pidhash_size;
391 	unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
392 
393 	pidhash_shift = max(4, fls(megabytes * 4));
394 	pidhash_shift = min(12, pidhash_shift);
395 	pidhash_size = 1 << pidhash_shift;
396 
397 	printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
398 		pidhash_size, pidhash_shift,
399 		pidhash_size * sizeof(struct hlist_head));
400 
401 	pid_hash = alloc_bootmem(pidhash_size *	sizeof(*(pid_hash)));
402 	if (!pid_hash)
403 		panic("Could not alloc pidhash!\n");
404 	for (i = 0; i < pidhash_size; i++)
405 		INIT_HLIST_HEAD(&pid_hash[i]);
406 }
407 
408 void __init pidmap_init(void)
409 {
410 	init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
411 	/* Reserve PID 0. We never call free_pidmap(0) */
412 	set_bit(0, init_pid_ns.pidmap[0].page);
413 	atomic_dec(&init_pid_ns.pidmap[0].nr_free);
414 
415 	pid_cachep = KMEM_CACHE(pid, SLAB_PANIC);
416 }
417