xref: /openbmc/linux/kernel/pid.c (revision 615c36f5)
1 /*
2  * Generic pidhash and scalable, time-bounded PID allocator
3  *
4  * (C) 2002-2003 William Irwin, IBM
5  * (C) 2004 William Irwin, Oracle
6  * (C) 2002-2004 Ingo Molnar, Red Hat
7  *
8  * pid-structures are backing objects for tasks sharing a given ID to chain
9  * against. There is very little to them aside from hashing them and
10  * parking tasks using given ID's on a list.
11  *
12  * The hash is always changed with the tasklist_lock write-acquired,
13  * and the hash is only accessed with the tasklist_lock at least
14  * read-acquired, so there's no additional SMP locking needed here.
15  *
16  * We have a list of bitmap pages, which bitmaps represent the PID space.
17  * Allocating and freeing PIDs is completely lockless. The worst-case
18  * allocation scenario when all but one out of 1 million PIDs possible are
19  * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20  * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21  *
22  * Pid namespaces:
23  *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24  *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25  *     Many thanks to Oleg Nesterov for comments and help
26  *
27  */
28 
29 #include <linux/mm.h>
30 #include <linux/export.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/rculist.h>
34 #include <linux/bootmem.h>
35 #include <linux/hash.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 
40 #define pid_hashfn(nr, ns)	\
41 	hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
42 static struct hlist_head *pid_hash;
43 static unsigned int pidhash_shift = 4;
44 struct pid init_struct_pid = INIT_STRUCT_PID;
45 
46 int pid_max = PID_MAX_DEFAULT;
47 
48 #define RESERVED_PIDS		300
49 
50 int pid_max_min = RESERVED_PIDS + 1;
51 int pid_max_max = PID_MAX_LIMIT;
52 
53 #define BITS_PER_PAGE		(PAGE_SIZE*8)
54 #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE-1)
55 
56 static inline int mk_pid(struct pid_namespace *pid_ns,
57 		struct pidmap *map, int off)
58 {
59 	return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
60 }
61 
62 #define find_next_offset(map, off)					\
63 		find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
64 
65 /*
66  * PID-map pages start out as NULL, they get allocated upon
67  * first use and are never deallocated. This way a low pid_max
68  * value does not cause lots of bitmaps to be allocated, but
69  * the scheme scales to up to 4 million PIDs, runtime.
70  */
71 struct pid_namespace init_pid_ns = {
72 	.kref = {
73 		.refcount       = ATOMIC_INIT(2),
74 	},
75 	.pidmap = {
76 		[ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
77 	},
78 	.last_pid = 0,
79 	.level = 0,
80 	.child_reaper = &init_task,
81 };
82 EXPORT_SYMBOL_GPL(init_pid_ns);
83 
84 int is_container_init(struct task_struct *tsk)
85 {
86 	int ret = 0;
87 	struct pid *pid;
88 
89 	rcu_read_lock();
90 	pid = task_pid(tsk);
91 	if (pid != NULL && pid->numbers[pid->level].nr == 1)
92 		ret = 1;
93 	rcu_read_unlock();
94 
95 	return ret;
96 }
97 EXPORT_SYMBOL(is_container_init);
98 
99 /*
100  * Note: disable interrupts while the pidmap_lock is held as an
101  * interrupt might come in and do read_lock(&tasklist_lock).
102  *
103  * If we don't disable interrupts there is a nasty deadlock between
104  * detach_pid()->free_pid() and another cpu that does
105  * spin_lock(&pidmap_lock) followed by an interrupt routine that does
106  * read_lock(&tasklist_lock);
107  *
108  * After we clean up the tasklist_lock and know there are no
109  * irq handlers that take it we can leave the interrupts enabled.
110  * For now it is easier to be safe than to prove it can't happen.
111  */
112 
113 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
114 
115 static void free_pidmap(struct upid *upid)
116 {
117 	int nr = upid->nr;
118 	struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
119 	int offset = nr & BITS_PER_PAGE_MASK;
120 
121 	clear_bit(offset, map->page);
122 	atomic_inc(&map->nr_free);
123 }
124 
125 /*
126  * If we started walking pids at 'base', is 'a' seen before 'b'?
127  */
128 static int pid_before(int base, int a, int b)
129 {
130 	/*
131 	 * This is the same as saying
132 	 *
133 	 * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
134 	 * and that mapping orders 'a' and 'b' with respect to 'base'.
135 	 */
136 	return (unsigned)(a - base) < (unsigned)(b - base);
137 }
138 
139 /*
140  * We might be racing with someone else trying to set pid_ns->last_pid.
141  * We want the winner to have the "later" value, because if the
142  * "earlier" value prevails, then a pid may get reused immediately.
143  *
144  * Since pids rollover, it is not sufficient to just pick the bigger
145  * value.  We have to consider where we started counting from.
146  *
147  * 'base' is the value of pid_ns->last_pid that we observed when
148  * we started looking for a pid.
149  *
150  * 'pid' is the pid that we eventually found.
151  */
152 static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
153 {
154 	int prev;
155 	int last_write = base;
156 	do {
157 		prev = last_write;
158 		last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
159 	} while ((prev != last_write) && (pid_before(base, last_write, pid)));
160 }
161 
162 static int alloc_pidmap(struct pid_namespace *pid_ns)
163 {
164 	int i, offset, max_scan, pid, last = pid_ns->last_pid;
165 	struct pidmap *map;
166 
167 	pid = last + 1;
168 	if (pid >= pid_max)
169 		pid = RESERVED_PIDS;
170 	offset = pid & BITS_PER_PAGE_MASK;
171 	map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
172 	/*
173 	 * If last_pid points into the middle of the map->page we
174 	 * want to scan this bitmap block twice, the second time
175 	 * we start with offset == 0 (or RESERVED_PIDS).
176 	 */
177 	max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
178 	for (i = 0; i <= max_scan; ++i) {
179 		if (unlikely(!map->page)) {
180 			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
181 			/*
182 			 * Free the page if someone raced with us
183 			 * installing it:
184 			 */
185 			spin_lock_irq(&pidmap_lock);
186 			if (!map->page) {
187 				map->page = page;
188 				page = NULL;
189 			}
190 			spin_unlock_irq(&pidmap_lock);
191 			kfree(page);
192 			if (unlikely(!map->page))
193 				break;
194 		}
195 		if (likely(atomic_read(&map->nr_free))) {
196 			do {
197 				if (!test_and_set_bit(offset, map->page)) {
198 					atomic_dec(&map->nr_free);
199 					set_last_pid(pid_ns, last, pid);
200 					return pid;
201 				}
202 				offset = find_next_offset(map, offset);
203 				pid = mk_pid(pid_ns, map, offset);
204 			} while (offset < BITS_PER_PAGE && pid < pid_max);
205 		}
206 		if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
207 			++map;
208 			offset = 0;
209 		} else {
210 			map = &pid_ns->pidmap[0];
211 			offset = RESERVED_PIDS;
212 			if (unlikely(last == offset))
213 				break;
214 		}
215 		pid = mk_pid(pid_ns, map, offset);
216 	}
217 	return -1;
218 }
219 
220 int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
221 {
222 	int offset;
223 	struct pidmap *map, *end;
224 
225 	if (last >= PID_MAX_LIMIT)
226 		return -1;
227 
228 	offset = (last + 1) & BITS_PER_PAGE_MASK;
229 	map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
230 	end = &pid_ns->pidmap[PIDMAP_ENTRIES];
231 	for (; map < end; map++, offset = 0) {
232 		if (unlikely(!map->page))
233 			continue;
234 		offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
235 		if (offset < BITS_PER_PAGE)
236 			return mk_pid(pid_ns, map, offset);
237 	}
238 	return -1;
239 }
240 
241 void put_pid(struct pid *pid)
242 {
243 	struct pid_namespace *ns;
244 
245 	if (!pid)
246 		return;
247 
248 	ns = pid->numbers[pid->level].ns;
249 	if ((atomic_read(&pid->count) == 1) ||
250 	     atomic_dec_and_test(&pid->count)) {
251 		kmem_cache_free(ns->pid_cachep, pid);
252 		put_pid_ns(ns);
253 	}
254 }
255 EXPORT_SYMBOL_GPL(put_pid);
256 
257 static void delayed_put_pid(struct rcu_head *rhp)
258 {
259 	struct pid *pid = container_of(rhp, struct pid, rcu);
260 	put_pid(pid);
261 }
262 
263 void free_pid(struct pid *pid)
264 {
265 	/* We can be called with write_lock_irq(&tasklist_lock) held */
266 	int i;
267 	unsigned long flags;
268 
269 	spin_lock_irqsave(&pidmap_lock, flags);
270 	for (i = 0; i <= pid->level; i++)
271 		hlist_del_rcu(&pid->numbers[i].pid_chain);
272 	spin_unlock_irqrestore(&pidmap_lock, flags);
273 
274 	for (i = 0; i <= pid->level; i++)
275 		free_pidmap(pid->numbers + i);
276 
277 	call_rcu(&pid->rcu, delayed_put_pid);
278 }
279 
280 struct pid *alloc_pid(struct pid_namespace *ns)
281 {
282 	struct pid *pid;
283 	enum pid_type type;
284 	int i, nr;
285 	struct pid_namespace *tmp;
286 	struct upid *upid;
287 
288 	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
289 	if (!pid)
290 		goto out;
291 
292 	tmp = ns;
293 	for (i = ns->level; i >= 0; i--) {
294 		nr = alloc_pidmap(tmp);
295 		if (nr < 0)
296 			goto out_free;
297 
298 		pid->numbers[i].nr = nr;
299 		pid->numbers[i].ns = tmp;
300 		tmp = tmp->parent;
301 	}
302 
303 	get_pid_ns(ns);
304 	pid->level = ns->level;
305 	atomic_set(&pid->count, 1);
306 	for (type = 0; type < PIDTYPE_MAX; ++type)
307 		INIT_HLIST_HEAD(&pid->tasks[type]);
308 
309 	upid = pid->numbers + ns->level;
310 	spin_lock_irq(&pidmap_lock);
311 	for ( ; upid >= pid->numbers; --upid)
312 		hlist_add_head_rcu(&upid->pid_chain,
313 				&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
314 	spin_unlock_irq(&pidmap_lock);
315 
316 out:
317 	return pid;
318 
319 out_free:
320 	while (++i <= ns->level)
321 		free_pidmap(pid->numbers + i);
322 
323 	kmem_cache_free(ns->pid_cachep, pid);
324 	pid = NULL;
325 	goto out;
326 }
327 
328 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
329 {
330 	struct hlist_node *elem;
331 	struct upid *pnr;
332 
333 	hlist_for_each_entry_rcu(pnr, elem,
334 			&pid_hash[pid_hashfn(nr, ns)], pid_chain)
335 		if (pnr->nr == nr && pnr->ns == ns)
336 			return container_of(pnr, struct pid,
337 					numbers[ns->level]);
338 
339 	return NULL;
340 }
341 EXPORT_SYMBOL_GPL(find_pid_ns);
342 
343 struct pid *find_vpid(int nr)
344 {
345 	return find_pid_ns(nr, current->nsproxy->pid_ns);
346 }
347 EXPORT_SYMBOL_GPL(find_vpid);
348 
349 /*
350  * attach_pid() must be called with the tasklist_lock write-held.
351  */
352 void attach_pid(struct task_struct *task, enum pid_type type,
353 		struct pid *pid)
354 {
355 	struct pid_link *link;
356 
357 	link = &task->pids[type];
358 	link->pid = pid;
359 	hlist_add_head_rcu(&link->node, &pid->tasks[type]);
360 }
361 
362 static void __change_pid(struct task_struct *task, enum pid_type type,
363 			struct pid *new)
364 {
365 	struct pid_link *link;
366 	struct pid *pid;
367 	int tmp;
368 
369 	link = &task->pids[type];
370 	pid = link->pid;
371 
372 	hlist_del_rcu(&link->node);
373 	link->pid = new;
374 
375 	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
376 		if (!hlist_empty(&pid->tasks[tmp]))
377 			return;
378 
379 	free_pid(pid);
380 }
381 
382 void detach_pid(struct task_struct *task, enum pid_type type)
383 {
384 	__change_pid(task, type, NULL);
385 }
386 
387 void change_pid(struct task_struct *task, enum pid_type type,
388 		struct pid *pid)
389 {
390 	__change_pid(task, type, pid);
391 	attach_pid(task, type, pid);
392 }
393 
394 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
395 void transfer_pid(struct task_struct *old, struct task_struct *new,
396 			   enum pid_type type)
397 {
398 	new->pids[type].pid = old->pids[type].pid;
399 	hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
400 }
401 
402 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
403 {
404 	struct task_struct *result = NULL;
405 	if (pid) {
406 		struct hlist_node *first;
407 		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
408 					      lockdep_tasklist_lock_is_held());
409 		if (first)
410 			result = hlist_entry(first, struct task_struct, pids[(type)].node);
411 	}
412 	return result;
413 }
414 EXPORT_SYMBOL(pid_task);
415 
416 /*
417  * Must be called under rcu_read_lock().
418  */
419 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
420 {
421 	rcu_lockdep_assert(rcu_read_lock_held(),
422 			   "find_task_by_pid_ns() needs rcu_read_lock()"
423 			   " protection");
424 	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
425 }
426 
427 struct task_struct *find_task_by_vpid(pid_t vnr)
428 {
429 	return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
430 }
431 
432 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
433 {
434 	struct pid *pid;
435 	rcu_read_lock();
436 	if (type != PIDTYPE_PID)
437 		task = task->group_leader;
438 	pid = get_pid(task->pids[type].pid);
439 	rcu_read_unlock();
440 	return pid;
441 }
442 EXPORT_SYMBOL_GPL(get_task_pid);
443 
444 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
445 {
446 	struct task_struct *result;
447 	rcu_read_lock();
448 	result = pid_task(pid, type);
449 	if (result)
450 		get_task_struct(result);
451 	rcu_read_unlock();
452 	return result;
453 }
454 EXPORT_SYMBOL_GPL(get_pid_task);
455 
456 struct pid *find_get_pid(pid_t nr)
457 {
458 	struct pid *pid;
459 
460 	rcu_read_lock();
461 	pid = get_pid(find_vpid(nr));
462 	rcu_read_unlock();
463 
464 	return pid;
465 }
466 EXPORT_SYMBOL_GPL(find_get_pid);
467 
468 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
469 {
470 	struct upid *upid;
471 	pid_t nr = 0;
472 
473 	if (pid && ns->level <= pid->level) {
474 		upid = &pid->numbers[ns->level];
475 		if (upid->ns == ns)
476 			nr = upid->nr;
477 	}
478 	return nr;
479 }
480 
481 pid_t pid_vnr(struct pid *pid)
482 {
483 	return pid_nr_ns(pid, current->nsproxy->pid_ns);
484 }
485 EXPORT_SYMBOL_GPL(pid_vnr);
486 
487 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
488 			struct pid_namespace *ns)
489 {
490 	pid_t nr = 0;
491 
492 	rcu_read_lock();
493 	if (!ns)
494 		ns = current->nsproxy->pid_ns;
495 	if (likely(pid_alive(task))) {
496 		if (type != PIDTYPE_PID)
497 			task = task->group_leader;
498 		nr = pid_nr_ns(task->pids[type].pid, ns);
499 	}
500 	rcu_read_unlock();
501 
502 	return nr;
503 }
504 EXPORT_SYMBOL(__task_pid_nr_ns);
505 
506 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
507 {
508 	return pid_nr_ns(task_tgid(tsk), ns);
509 }
510 EXPORT_SYMBOL(task_tgid_nr_ns);
511 
512 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
513 {
514 	return ns_of_pid(task_pid(tsk));
515 }
516 EXPORT_SYMBOL_GPL(task_active_pid_ns);
517 
518 /*
519  * Used by proc to find the first pid that is greater than or equal to nr.
520  *
521  * If there is a pid at nr this function is exactly the same as find_pid_ns.
522  */
523 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
524 {
525 	struct pid *pid;
526 
527 	do {
528 		pid = find_pid_ns(nr, ns);
529 		if (pid)
530 			break;
531 		nr = next_pidmap(ns, nr);
532 	} while (nr > 0);
533 
534 	return pid;
535 }
536 
537 /*
538  * The pid hash table is scaled according to the amount of memory in the
539  * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
540  * more.
541  */
542 void __init pidhash_init(void)
543 {
544 	int i, pidhash_size;
545 
546 	pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
547 					   HASH_EARLY | HASH_SMALL,
548 					   &pidhash_shift, NULL, 4096);
549 	pidhash_size = 1 << pidhash_shift;
550 
551 	for (i = 0; i < pidhash_size; i++)
552 		INIT_HLIST_HEAD(&pid_hash[i]);
553 }
554 
555 void __init pidmap_init(void)
556 {
557 	/* bump default and minimum pid_max based on number of cpus */
558 	pid_max = min(pid_max_max, max_t(int, pid_max,
559 				PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
560 	pid_max_min = max_t(int, pid_max_min,
561 				PIDS_PER_CPU_MIN * num_possible_cpus());
562 	pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
563 
564 	init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
565 	/* Reserve PID 0. We never call free_pidmap(0) */
566 	set_bit(0, init_pid_ns.pidmap[0].page);
567 	atomic_dec(&init_pid_ns.pidmap[0].nr_free);
568 
569 	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
570 			SLAB_HWCACHE_ALIGN | SLAB_PANIC);
571 }
572