xref: /openbmc/linux/kernel/pid.c (revision f42b3800)
1 /*
2  * Generic pidhash and scalable, time-bounded PID allocator
3  *
4  * (C) 2002-2003 William Irwin, IBM
5  * (C) 2004 William Irwin, Oracle
6  * (C) 2002-2004 Ingo Molnar, Red Hat
7  *
8  * pid-structures are backing objects for tasks sharing a given ID to chain
9  * against. There is very little to them aside from hashing them and
10  * parking tasks using given ID's on a list.
11  *
12  * The hash is always changed with the tasklist_lock write-acquired,
13  * and the hash is only accessed with the tasklist_lock at least
14  * read-acquired, so there's no additional SMP locking needed here.
15  *
16  * We have a list of bitmap pages, which bitmaps represent the PID space.
17  * Allocating and freeing PIDs is completely lockless. The worst-case
18  * allocation scenario when all but one out of 1 million PIDs possible are
19  * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20  * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21  *
22  * Pid namespaces:
23  *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24  *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25  *     Many thanks to Oleg Nesterov for comments and help
26  *
27  */
28 
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/bootmem.h>
34 #include <linux/hash.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/init_task.h>
37 #include <linux/syscalls.h>
38 
39 #define pid_hashfn(nr, ns)	\
40 	hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
41 static struct hlist_head *pid_hash;
42 static int pidhash_shift;
43 struct pid init_struct_pid = INIT_STRUCT_PID;
44 
45 int pid_max = PID_MAX_DEFAULT;
46 
47 #define RESERVED_PIDS		300
48 
49 int pid_max_min = RESERVED_PIDS + 1;
50 int pid_max_max = PID_MAX_LIMIT;
51 
52 #define BITS_PER_PAGE		(PAGE_SIZE*8)
53 #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE-1)
54 
55 static inline int mk_pid(struct pid_namespace *pid_ns,
56 		struct pidmap *map, int off)
57 {
58 	return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
59 }
60 
61 #define find_next_offset(map, off)					\
62 		find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
63 
64 /*
65  * PID-map pages start out as NULL, they get allocated upon
66  * first use and are never deallocated. This way a low pid_max
67  * value does not cause lots of bitmaps to be allocated, but
68  * the scheme scales to up to 4 million PIDs, runtime.
69  */
70 struct pid_namespace init_pid_ns = {
71 	.kref = {
72 		.refcount       = ATOMIC_INIT(2),
73 	},
74 	.pidmap = {
75 		[ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
76 	},
77 	.last_pid = 0,
78 	.level = 0,
79 	.child_reaper = &init_task,
80 };
81 EXPORT_SYMBOL_GPL(init_pid_ns);
82 
83 int is_container_init(struct task_struct *tsk)
84 {
85 	int ret = 0;
86 	struct pid *pid;
87 
88 	rcu_read_lock();
89 	pid = task_pid(tsk);
90 	if (pid != NULL && pid->numbers[pid->level].nr == 1)
91 		ret = 1;
92 	rcu_read_unlock();
93 
94 	return ret;
95 }
96 EXPORT_SYMBOL(is_container_init);
97 
98 /*
99  * Note: disable interrupts while the pidmap_lock is held as an
100  * interrupt might come in and do read_lock(&tasklist_lock).
101  *
102  * If we don't disable interrupts there is a nasty deadlock between
103  * detach_pid()->free_pid() and another cpu that does
104  * spin_lock(&pidmap_lock) followed by an interrupt routine that does
105  * read_lock(&tasklist_lock);
106  *
107  * After we clean up the tasklist_lock and know there are no
108  * irq handlers that take it we can leave the interrupts enabled.
109  * For now it is easier to be safe than to prove it can't happen.
110  */
111 
112 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
113 
114 static void free_pidmap(struct pid_namespace *pid_ns, int pid)
115 {
116 	struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
117 	int offset = pid & BITS_PER_PAGE_MASK;
118 
119 	clear_bit(offset, map->page);
120 	atomic_inc(&map->nr_free);
121 }
122 
123 static int alloc_pidmap(struct pid_namespace *pid_ns)
124 {
125 	int i, offset, max_scan, pid, last = pid_ns->last_pid;
126 	struct pidmap *map;
127 
128 	pid = last + 1;
129 	if (pid >= pid_max)
130 		pid = RESERVED_PIDS;
131 	offset = pid & BITS_PER_PAGE_MASK;
132 	map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
133 	max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
134 	for (i = 0; i <= max_scan; ++i) {
135 		if (unlikely(!map->page)) {
136 			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
137 			/*
138 			 * Free the page if someone raced with us
139 			 * installing it:
140 			 */
141 			spin_lock_irq(&pidmap_lock);
142 			if (map->page)
143 				kfree(page);
144 			else
145 				map->page = page;
146 			spin_unlock_irq(&pidmap_lock);
147 			if (unlikely(!map->page))
148 				break;
149 		}
150 		if (likely(atomic_read(&map->nr_free))) {
151 			do {
152 				if (!test_and_set_bit(offset, map->page)) {
153 					atomic_dec(&map->nr_free);
154 					pid_ns->last_pid = pid;
155 					return pid;
156 				}
157 				offset = find_next_offset(map, offset);
158 				pid = mk_pid(pid_ns, map, offset);
159 			/*
160 			 * find_next_offset() found a bit, the pid from it
161 			 * is in-bounds, and if we fell back to the last
162 			 * bitmap block and the final block was the same
163 			 * as the starting point, pid is before last_pid.
164 			 */
165 			} while (offset < BITS_PER_PAGE && pid < pid_max &&
166 					(i != max_scan || pid < last ||
167 					    !((last+1) & BITS_PER_PAGE_MASK)));
168 		}
169 		if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
170 			++map;
171 			offset = 0;
172 		} else {
173 			map = &pid_ns->pidmap[0];
174 			offset = RESERVED_PIDS;
175 			if (unlikely(last == offset))
176 				break;
177 		}
178 		pid = mk_pid(pid_ns, map, offset);
179 	}
180 	return -1;
181 }
182 
183 int next_pidmap(struct pid_namespace *pid_ns, int last)
184 {
185 	int offset;
186 	struct pidmap *map, *end;
187 
188 	offset = (last + 1) & BITS_PER_PAGE_MASK;
189 	map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
190 	end = &pid_ns->pidmap[PIDMAP_ENTRIES];
191 	for (; map < end; map++, offset = 0) {
192 		if (unlikely(!map->page))
193 			continue;
194 		offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
195 		if (offset < BITS_PER_PAGE)
196 			return mk_pid(pid_ns, map, offset);
197 	}
198 	return -1;
199 }
200 
201 void put_pid(struct pid *pid)
202 {
203 	struct pid_namespace *ns;
204 
205 	if (!pid)
206 		return;
207 
208 	ns = pid->numbers[pid->level].ns;
209 	if ((atomic_read(&pid->count) == 1) ||
210 	     atomic_dec_and_test(&pid->count)) {
211 		kmem_cache_free(ns->pid_cachep, pid);
212 		put_pid_ns(ns);
213 	}
214 }
215 EXPORT_SYMBOL_GPL(put_pid);
216 
217 static void delayed_put_pid(struct rcu_head *rhp)
218 {
219 	struct pid *pid = container_of(rhp, struct pid, rcu);
220 	put_pid(pid);
221 }
222 
223 void free_pid(struct pid *pid)
224 {
225 	/* We can be called with write_lock_irq(&tasklist_lock) held */
226 	int i;
227 	unsigned long flags;
228 
229 	spin_lock_irqsave(&pidmap_lock, flags);
230 	for (i = 0; i <= pid->level; i++)
231 		hlist_del_rcu(&pid->numbers[i].pid_chain);
232 	spin_unlock_irqrestore(&pidmap_lock, flags);
233 
234 	for (i = 0; i <= pid->level; i++)
235 		free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
236 
237 	call_rcu(&pid->rcu, delayed_put_pid);
238 }
239 
240 struct pid *alloc_pid(struct pid_namespace *ns)
241 {
242 	struct pid *pid;
243 	enum pid_type type;
244 	int i, nr;
245 	struct pid_namespace *tmp;
246 	struct upid *upid;
247 
248 	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
249 	if (!pid)
250 		goto out;
251 
252 	tmp = ns;
253 	for (i = ns->level; i >= 0; i--) {
254 		nr = alloc_pidmap(tmp);
255 		if (nr < 0)
256 			goto out_free;
257 
258 		pid->numbers[i].nr = nr;
259 		pid->numbers[i].ns = tmp;
260 		tmp = tmp->parent;
261 	}
262 
263 	get_pid_ns(ns);
264 	pid->level = ns->level;
265 	atomic_set(&pid->count, 1);
266 	for (type = 0; type < PIDTYPE_MAX; ++type)
267 		INIT_HLIST_HEAD(&pid->tasks[type]);
268 
269 	spin_lock_irq(&pidmap_lock);
270 	for (i = ns->level; i >= 0; i--) {
271 		upid = &pid->numbers[i];
272 		hlist_add_head_rcu(&upid->pid_chain,
273 				&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
274 	}
275 	spin_unlock_irq(&pidmap_lock);
276 
277 out:
278 	return pid;
279 
280 out_free:
281 	for (i++; i <= ns->level; i++)
282 		free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
283 
284 	kmem_cache_free(ns->pid_cachep, pid);
285 	pid = NULL;
286 	goto out;
287 }
288 
289 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
290 {
291 	struct hlist_node *elem;
292 	struct upid *pnr;
293 
294 	hlist_for_each_entry_rcu(pnr, elem,
295 			&pid_hash[pid_hashfn(nr, ns)], pid_chain)
296 		if (pnr->nr == nr && pnr->ns == ns)
297 			return container_of(pnr, struct pid,
298 					numbers[ns->level]);
299 
300 	return NULL;
301 }
302 EXPORT_SYMBOL_GPL(find_pid_ns);
303 
304 struct pid *find_vpid(int nr)
305 {
306 	return find_pid_ns(nr, current->nsproxy->pid_ns);
307 }
308 EXPORT_SYMBOL_GPL(find_vpid);
309 
310 struct pid *find_pid(int nr)
311 {
312 	return find_pid_ns(nr, &init_pid_ns);
313 }
314 EXPORT_SYMBOL_GPL(find_pid);
315 
316 /*
317  * attach_pid() must be called with the tasklist_lock write-held.
318  */
319 int attach_pid(struct task_struct *task, enum pid_type type,
320 		struct pid *pid)
321 {
322 	struct pid_link *link;
323 
324 	link = &task->pids[type];
325 	link->pid = pid;
326 	hlist_add_head_rcu(&link->node, &pid->tasks[type]);
327 
328 	return 0;
329 }
330 
331 void detach_pid(struct task_struct *task, enum pid_type type)
332 {
333 	struct pid_link *link;
334 	struct pid *pid;
335 	int tmp;
336 
337 	link = &task->pids[type];
338 	pid = link->pid;
339 
340 	hlist_del_rcu(&link->node);
341 	link->pid = NULL;
342 
343 	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
344 		if (!hlist_empty(&pid->tasks[tmp]))
345 			return;
346 
347 	free_pid(pid);
348 }
349 
350 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
351 void transfer_pid(struct task_struct *old, struct task_struct *new,
352 			   enum pid_type type)
353 {
354 	new->pids[type].pid = old->pids[type].pid;
355 	hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
356 	old->pids[type].pid = NULL;
357 }
358 
359 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
360 {
361 	struct task_struct *result = NULL;
362 	if (pid) {
363 		struct hlist_node *first;
364 		first = rcu_dereference(pid->tasks[type].first);
365 		if (first)
366 			result = hlist_entry(first, struct task_struct, pids[(type)].node);
367 	}
368 	return result;
369 }
370 EXPORT_SYMBOL(pid_task);
371 
372 /*
373  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
374  */
375 struct task_struct *find_task_by_pid_type_ns(int type, int nr,
376 		struct pid_namespace *ns)
377 {
378 	return pid_task(find_pid_ns(nr, ns), type);
379 }
380 
381 EXPORT_SYMBOL(find_task_by_pid_type_ns);
382 
383 struct task_struct *find_task_by_pid(pid_t nr)
384 {
385 	return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns);
386 }
387 EXPORT_SYMBOL(find_task_by_pid);
388 
389 struct task_struct *find_task_by_vpid(pid_t vnr)
390 {
391 	return find_task_by_pid_type_ns(PIDTYPE_PID, vnr,
392 			current->nsproxy->pid_ns);
393 }
394 EXPORT_SYMBOL(find_task_by_vpid);
395 
396 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
397 {
398 	return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns);
399 }
400 EXPORT_SYMBOL(find_task_by_pid_ns);
401 
402 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
403 {
404 	struct pid *pid;
405 	rcu_read_lock();
406 	pid = get_pid(task->pids[type].pid);
407 	rcu_read_unlock();
408 	return pid;
409 }
410 
411 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
412 {
413 	struct task_struct *result;
414 	rcu_read_lock();
415 	result = pid_task(pid, type);
416 	if (result)
417 		get_task_struct(result);
418 	rcu_read_unlock();
419 	return result;
420 }
421 
422 struct pid *find_get_pid(pid_t nr)
423 {
424 	struct pid *pid;
425 
426 	rcu_read_lock();
427 	pid = get_pid(find_vpid(nr));
428 	rcu_read_unlock();
429 
430 	return pid;
431 }
432 
433 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
434 {
435 	struct upid *upid;
436 	pid_t nr = 0;
437 
438 	if (pid && ns->level <= pid->level) {
439 		upid = &pid->numbers[ns->level];
440 		if (upid->ns == ns)
441 			nr = upid->nr;
442 	}
443 	return nr;
444 }
445 
446 pid_t pid_vnr(struct pid *pid)
447 {
448 	return pid_nr_ns(pid, current->nsproxy->pid_ns);
449 }
450 EXPORT_SYMBOL_GPL(pid_vnr);
451 
452 pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
453 {
454 	return pid_nr_ns(task_pid(tsk), ns);
455 }
456 EXPORT_SYMBOL(task_pid_nr_ns);
457 
458 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
459 {
460 	return pid_nr_ns(task_tgid(tsk), ns);
461 }
462 EXPORT_SYMBOL(task_tgid_nr_ns);
463 
464 pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
465 {
466 	return pid_nr_ns(task_pgrp(tsk), ns);
467 }
468 EXPORT_SYMBOL(task_pgrp_nr_ns);
469 
470 pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
471 {
472 	return pid_nr_ns(task_session(tsk), ns);
473 }
474 EXPORT_SYMBOL(task_session_nr_ns);
475 
476 /*
477  * Used by proc to find the first pid that is greater then or equal to nr.
478  *
479  * If there is a pid at nr this function is exactly the same as find_pid.
480  */
481 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
482 {
483 	struct pid *pid;
484 
485 	do {
486 		pid = find_pid_ns(nr, ns);
487 		if (pid)
488 			break;
489 		nr = next_pidmap(ns, nr);
490 	} while (nr > 0);
491 
492 	return pid;
493 }
494 EXPORT_SYMBOL_GPL(find_get_pid);
495 
496 /*
497  * The pid hash table is scaled according to the amount of memory in the
498  * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
499  * more.
500  */
501 void __init pidhash_init(void)
502 {
503 	int i, pidhash_size;
504 	unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
505 
506 	pidhash_shift = max(4, fls(megabytes * 4));
507 	pidhash_shift = min(12, pidhash_shift);
508 	pidhash_size = 1 << pidhash_shift;
509 
510 	printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
511 		pidhash_size, pidhash_shift,
512 		pidhash_size * sizeof(struct hlist_head));
513 
514 	pid_hash = alloc_bootmem(pidhash_size *	sizeof(*(pid_hash)));
515 	if (!pid_hash)
516 		panic("Could not alloc pidhash!\n");
517 	for (i = 0; i < pidhash_size; i++)
518 		INIT_HLIST_HEAD(&pid_hash[i]);
519 }
520 
521 void __init pidmap_init(void)
522 {
523 	init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
524 	/* Reserve PID 0. We never call free_pidmap(0) */
525 	set_bit(0, init_pid_ns.pidmap[0].page);
526 	atomic_dec(&init_pid_ns.pidmap[0].nr_free);
527 
528 	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
529 			SLAB_HWCACHE_ALIGN | SLAB_PANIC);
530 }
531