xref: /openbmc/linux/kernel/pid.c (revision ca79522c)
1 /*
2  * Generic pidhash and scalable, time-bounded PID allocator
3  *
4  * (C) 2002-2003 Nadia Yvette Chambers, IBM
5  * (C) 2004 Nadia Yvette Chambers, Oracle
6  * (C) 2002-2004 Ingo Molnar, Red Hat
7  *
8  * pid-structures are backing objects for tasks sharing a given ID to chain
9  * against. There is very little to them aside from hashing them and
10  * parking tasks using given ID's on a list.
11  *
12  * The hash is always changed with the tasklist_lock write-acquired,
13  * and the hash is only accessed with the tasklist_lock at least
14  * read-acquired, so there's no additional SMP locking needed here.
15  *
16  * We have a list of bitmap pages, which bitmaps represent the PID space.
17  * Allocating and freeing PIDs is completely lockless. The worst-case
18  * allocation scenario when all but one out of 1 million PIDs possible are
19  * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20  * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21  *
22  * Pid namespaces:
23  *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24  *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25  *     Many thanks to Oleg Nesterov for comments and help
26  *
27  */
28 
29 #include <linux/mm.h>
30 #include <linux/export.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/rculist.h>
34 #include <linux/bootmem.h>
35 #include <linux/hash.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/proc_ns.h>
40 #include <linux/proc_fs.h>
41 
42 #define pid_hashfn(nr, ns)	\
43 	hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
44 static struct hlist_head *pid_hash;
45 static unsigned int pidhash_shift = 4;
46 struct pid init_struct_pid = INIT_STRUCT_PID;
47 
48 int pid_max = PID_MAX_DEFAULT;
49 
50 #define RESERVED_PIDS		300
51 
52 int pid_max_min = RESERVED_PIDS + 1;
53 int pid_max_max = PID_MAX_LIMIT;
54 
55 static inline int mk_pid(struct pid_namespace *pid_ns,
56 		struct pidmap *map, int off)
57 {
58 	return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
59 }
60 
61 #define find_next_offset(map, off)					\
62 		find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
63 
64 /*
65  * PID-map pages start out as NULL, they get allocated upon
66  * first use and are never deallocated. This way a low pid_max
67  * value does not cause lots of bitmaps to be allocated, but
68  * the scheme scales to up to 4 million PIDs, runtime.
69  */
70 struct pid_namespace init_pid_ns = {
71 	.kref = {
72 		.refcount       = ATOMIC_INIT(2),
73 	},
74 	.pidmap = {
75 		[ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
76 	},
77 	.last_pid = 0,
78 	.level = 0,
79 	.child_reaper = &init_task,
80 	.user_ns = &init_user_ns,
81 	.proc_inum = PROC_PID_INIT_INO,
82 };
83 EXPORT_SYMBOL_GPL(init_pid_ns);
84 
85 /*
86  * Note: disable interrupts while the pidmap_lock is held as an
87  * interrupt might come in and do read_lock(&tasklist_lock).
88  *
89  * If we don't disable interrupts there is a nasty deadlock between
90  * detach_pid()->free_pid() and another cpu that does
91  * spin_lock(&pidmap_lock) followed by an interrupt routine that does
92  * read_lock(&tasklist_lock);
93  *
94  * After we clean up the tasklist_lock and know there are no
95  * irq handlers that take it we can leave the interrupts enabled.
96  * For now it is easier to be safe than to prove it can't happen.
97  */
98 
99 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
100 
101 static void free_pidmap(struct upid *upid)
102 {
103 	int nr = upid->nr;
104 	struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
105 	int offset = nr & BITS_PER_PAGE_MASK;
106 
107 	clear_bit(offset, map->page);
108 	atomic_inc(&map->nr_free);
109 }
110 
111 /*
112  * If we started walking pids at 'base', is 'a' seen before 'b'?
113  */
114 static int pid_before(int base, int a, int b)
115 {
116 	/*
117 	 * This is the same as saying
118 	 *
119 	 * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
120 	 * and that mapping orders 'a' and 'b' with respect to 'base'.
121 	 */
122 	return (unsigned)(a - base) < (unsigned)(b - base);
123 }
124 
125 /*
126  * We might be racing with someone else trying to set pid_ns->last_pid
127  * at the pid allocation time (there's also a sysctl for this, but racing
128  * with this one is OK, see comment in kernel/pid_namespace.c about it).
129  * We want the winner to have the "later" value, because if the
130  * "earlier" value prevails, then a pid may get reused immediately.
131  *
132  * Since pids rollover, it is not sufficient to just pick the bigger
133  * value.  We have to consider where we started counting from.
134  *
135  * 'base' is the value of pid_ns->last_pid that we observed when
136  * we started looking for a pid.
137  *
138  * 'pid' is the pid that we eventually found.
139  */
140 static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
141 {
142 	int prev;
143 	int last_write = base;
144 	do {
145 		prev = last_write;
146 		last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
147 	} while ((prev != last_write) && (pid_before(base, last_write, pid)));
148 }
149 
150 static int alloc_pidmap(struct pid_namespace *pid_ns)
151 {
152 	int i, offset, max_scan, pid, last = pid_ns->last_pid;
153 	struct pidmap *map;
154 
155 	pid = last + 1;
156 	if (pid >= pid_max)
157 		pid = RESERVED_PIDS;
158 	offset = pid & BITS_PER_PAGE_MASK;
159 	map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
160 	/*
161 	 * If last_pid points into the middle of the map->page we
162 	 * want to scan this bitmap block twice, the second time
163 	 * we start with offset == 0 (or RESERVED_PIDS).
164 	 */
165 	max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
166 	for (i = 0; i <= max_scan; ++i) {
167 		if (unlikely(!map->page)) {
168 			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
169 			/*
170 			 * Free the page if someone raced with us
171 			 * installing it:
172 			 */
173 			spin_lock_irq(&pidmap_lock);
174 			if (!map->page) {
175 				map->page = page;
176 				page = NULL;
177 			}
178 			spin_unlock_irq(&pidmap_lock);
179 			kfree(page);
180 			if (unlikely(!map->page))
181 				break;
182 		}
183 		if (likely(atomic_read(&map->nr_free))) {
184 			for ( ; ; ) {
185 				if (!test_and_set_bit(offset, map->page)) {
186 					atomic_dec(&map->nr_free);
187 					set_last_pid(pid_ns, last, pid);
188 					return pid;
189 				}
190 				offset = find_next_offset(map, offset);
191 				if (offset >= BITS_PER_PAGE)
192 					break;
193 				pid = mk_pid(pid_ns, map, offset);
194 				if (pid >= pid_max)
195 					break;
196 			}
197 		}
198 		if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
199 			++map;
200 			offset = 0;
201 		} else {
202 			map = &pid_ns->pidmap[0];
203 			offset = RESERVED_PIDS;
204 			if (unlikely(last == offset))
205 				break;
206 		}
207 		pid = mk_pid(pid_ns, map, offset);
208 	}
209 	return -1;
210 }
211 
212 int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
213 {
214 	int offset;
215 	struct pidmap *map, *end;
216 
217 	if (last >= PID_MAX_LIMIT)
218 		return -1;
219 
220 	offset = (last + 1) & BITS_PER_PAGE_MASK;
221 	map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
222 	end = &pid_ns->pidmap[PIDMAP_ENTRIES];
223 	for (; map < end; map++, offset = 0) {
224 		if (unlikely(!map->page))
225 			continue;
226 		offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
227 		if (offset < BITS_PER_PAGE)
228 			return mk_pid(pid_ns, map, offset);
229 	}
230 	return -1;
231 }
232 
233 void put_pid(struct pid *pid)
234 {
235 	struct pid_namespace *ns;
236 
237 	if (!pid)
238 		return;
239 
240 	ns = pid->numbers[pid->level].ns;
241 	if ((atomic_read(&pid->count) == 1) ||
242 	     atomic_dec_and_test(&pid->count)) {
243 		kmem_cache_free(ns->pid_cachep, pid);
244 		put_pid_ns(ns);
245 	}
246 }
247 EXPORT_SYMBOL_GPL(put_pid);
248 
249 static void delayed_put_pid(struct rcu_head *rhp)
250 {
251 	struct pid *pid = container_of(rhp, struct pid, rcu);
252 	put_pid(pid);
253 }
254 
255 void free_pid(struct pid *pid)
256 {
257 	/* We can be called with write_lock_irq(&tasklist_lock) held */
258 	int i;
259 	unsigned long flags;
260 
261 	spin_lock_irqsave(&pidmap_lock, flags);
262 	for (i = 0; i <= pid->level; i++) {
263 		struct upid *upid = pid->numbers + i;
264 		struct pid_namespace *ns = upid->ns;
265 		hlist_del_rcu(&upid->pid_chain);
266 		switch(--ns->nr_hashed) {
267 		case 1:
268 			/* When all that is left in the pid namespace
269 			 * is the reaper wake up the reaper.  The reaper
270 			 * may be sleeping in zap_pid_ns_processes().
271 			 */
272 			wake_up_process(ns->child_reaper);
273 			break;
274 		case 0:
275 			schedule_work(&ns->proc_work);
276 			break;
277 		}
278 	}
279 	spin_unlock_irqrestore(&pidmap_lock, flags);
280 
281 	for (i = 0; i <= pid->level; i++)
282 		free_pidmap(pid->numbers + i);
283 
284 	call_rcu(&pid->rcu, delayed_put_pid);
285 }
286 
287 struct pid *alloc_pid(struct pid_namespace *ns)
288 {
289 	struct pid *pid;
290 	enum pid_type type;
291 	int i, nr;
292 	struct pid_namespace *tmp;
293 	struct upid *upid;
294 
295 	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
296 	if (!pid)
297 		goto out;
298 
299 	tmp = ns;
300 	pid->level = ns->level;
301 	for (i = ns->level; i >= 0; i--) {
302 		nr = alloc_pidmap(tmp);
303 		if (nr < 0)
304 			goto out_free;
305 
306 		pid->numbers[i].nr = nr;
307 		pid->numbers[i].ns = tmp;
308 		tmp = tmp->parent;
309 	}
310 
311 	if (unlikely(is_child_reaper(pid))) {
312 		if (pid_ns_prepare_proc(ns))
313 			goto out_free;
314 	}
315 
316 	get_pid_ns(ns);
317 	atomic_set(&pid->count, 1);
318 	for (type = 0; type < PIDTYPE_MAX; ++type)
319 		INIT_HLIST_HEAD(&pid->tasks[type]);
320 
321 	upid = pid->numbers + ns->level;
322 	spin_lock_irq(&pidmap_lock);
323 	if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
324 		goto out_unlock;
325 	for ( ; upid >= pid->numbers; --upid) {
326 		hlist_add_head_rcu(&upid->pid_chain,
327 				&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
328 		upid->ns->nr_hashed++;
329 	}
330 	spin_unlock_irq(&pidmap_lock);
331 
332 out:
333 	return pid;
334 
335 out_unlock:
336 	spin_unlock_irq(&pidmap_lock);
337 out_free:
338 	while (++i <= ns->level)
339 		free_pidmap(pid->numbers + i);
340 
341 	kmem_cache_free(ns->pid_cachep, pid);
342 	pid = NULL;
343 	goto out;
344 }
345 
346 void disable_pid_allocation(struct pid_namespace *ns)
347 {
348 	spin_lock_irq(&pidmap_lock);
349 	ns->nr_hashed &= ~PIDNS_HASH_ADDING;
350 	spin_unlock_irq(&pidmap_lock);
351 }
352 
353 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
354 {
355 	struct upid *pnr;
356 
357 	hlist_for_each_entry_rcu(pnr,
358 			&pid_hash[pid_hashfn(nr, ns)], pid_chain)
359 		if (pnr->nr == nr && pnr->ns == ns)
360 			return container_of(pnr, struct pid,
361 					numbers[ns->level]);
362 
363 	return NULL;
364 }
365 EXPORT_SYMBOL_GPL(find_pid_ns);
366 
367 struct pid *find_vpid(int nr)
368 {
369 	return find_pid_ns(nr, task_active_pid_ns(current));
370 }
371 EXPORT_SYMBOL_GPL(find_vpid);
372 
373 /*
374  * attach_pid() must be called with the tasklist_lock write-held.
375  */
376 void attach_pid(struct task_struct *task, enum pid_type type,
377 		struct pid *pid)
378 {
379 	struct pid_link *link;
380 
381 	link = &task->pids[type];
382 	link->pid = pid;
383 	hlist_add_head_rcu(&link->node, &pid->tasks[type]);
384 }
385 
386 static void __change_pid(struct task_struct *task, enum pid_type type,
387 			struct pid *new)
388 {
389 	struct pid_link *link;
390 	struct pid *pid;
391 	int tmp;
392 
393 	link = &task->pids[type];
394 	pid = link->pid;
395 
396 	hlist_del_rcu(&link->node);
397 	link->pid = new;
398 
399 	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
400 		if (!hlist_empty(&pid->tasks[tmp]))
401 			return;
402 
403 	free_pid(pid);
404 }
405 
406 void detach_pid(struct task_struct *task, enum pid_type type)
407 {
408 	__change_pid(task, type, NULL);
409 }
410 
411 void change_pid(struct task_struct *task, enum pid_type type,
412 		struct pid *pid)
413 {
414 	__change_pid(task, type, pid);
415 	attach_pid(task, type, pid);
416 }
417 
418 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
419 void transfer_pid(struct task_struct *old, struct task_struct *new,
420 			   enum pid_type type)
421 {
422 	new->pids[type].pid = old->pids[type].pid;
423 	hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
424 }
425 
426 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
427 {
428 	struct task_struct *result = NULL;
429 	if (pid) {
430 		struct hlist_node *first;
431 		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
432 					      lockdep_tasklist_lock_is_held());
433 		if (first)
434 			result = hlist_entry(first, struct task_struct, pids[(type)].node);
435 	}
436 	return result;
437 }
438 EXPORT_SYMBOL(pid_task);
439 
440 /*
441  * Must be called under rcu_read_lock().
442  */
443 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
444 {
445 	rcu_lockdep_assert(rcu_read_lock_held(),
446 			   "find_task_by_pid_ns() needs rcu_read_lock()"
447 			   " protection");
448 	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
449 }
450 
451 struct task_struct *find_task_by_vpid(pid_t vnr)
452 {
453 	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
454 }
455 
456 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
457 {
458 	struct pid *pid;
459 	rcu_read_lock();
460 	if (type != PIDTYPE_PID)
461 		task = task->group_leader;
462 	pid = get_pid(task->pids[type].pid);
463 	rcu_read_unlock();
464 	return pid;
465 }
466 EXPORT_SYMBOL_GPL(get_task_pid);
467 
468 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
469 {
470 	struct task_struct *result;
471 	rcu_read_lock();
472 	result = pid_task(pid, type);
473 	if (result)
474 		get_task_struct(result);
475 	rcu_read_unlock();
476 	return result;
477 }
478 EXPORT_SYMBOL_GPL(get_pid_task);
479 
480 struct pid *find_get_pid(pid_t nr)
481 {
482 	struct pid *pid;
483 
484 	rcu_read_lock();
485 	pid = get_pid(find_vpid(nr));
486 	rcu_read_unlock();
487 
488 	return pid;
489 }
490 EXPORT_SYMBOL_GPL(find_get_pid);
491 
492 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
493 {
494 	struct upid *upid;
495 	pid_t nr = 0;
496 
497 	if (pid && ns->level <= pid->level) {
498 		upid = &pid->numbers[ns->level];
499 		if (upid->ns == ns)
500 			nr = upid->nr;
501 	}
502 	return nr;
503 }
504 EXPORT_SYMBOL_GPL(pid_nr_ns);
505 
506 pid_t pid_vnr(struct pid *pid)
507 {
508 	return pid_nr_ns(pid, task_active_pid_ns(current));
509 }
510 EXPORT_SYMBOL_GPL(pid_vnr);
511 
512 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
513 			struct pid_namespace *ns)
514 {
515 	pid_t nr = 0;
516 
517 	rcu_read_lock();
518 	if (!ns)
519 		ns = task_active_pid_ns(current);
520 	if (likely(pid_alive(task))) {
521 		if (type != PIDTYPE_PID)
522 			task = task->group_leader;
523 		nr = pid_nr_ns(task->pids[type].pid, ns);
524 	}
525 	rcu_read_unlock();
526 
527 	return nr;
528 }
529 EXPORT_SYMBOL(__task_pid_nr_ns);
530 
531 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
532 {
533 	return pid_nr_ns(task_tgid(tsk), ns);
534 }
535 EXPORT_SYMBOL(task_tgid_nr_ns);
536 
537 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
538 {
539 	return ns_of_pid(task_pid(tsk));
540 }
541 EXPORT_SYMBOL_GPL(task_active_pid_ns);
542 
543 /*
544  * Used by proc to find the first pid that is greater than or equal to nr.
545  *
546  * If there is a pid at nr this function is exactly the same as find_pid_ns.
547  */
548 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
549 {
550 	struct pid *pid;
551 
552 	do {
553 		pid = find_pid_ns(nr, ns);
554 		if (pid)
555 			break;
556 		nr = next_pidmap(ns, nr);
557 	} while (nr > 0);
558 
559 	return pid;
560 }
561 
562 /*
563  * The pid hash table is scaled according to the amount of memory in the
564  * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
565  * more.
566  */
567 void __init pidhash_init(void)
568 {
569 	unsigned int i, pidhash_size;
570 
571 	pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
572 					   HASH_EARLY | HASH_SMALL,
573 					   &pidhash_shift, NULL,
574 					   0, 4096);
575 	pidhash_size = 1U << pidhash_shift;
576 
577 	for (i = 0; i < pidhash_size; i++)
578 		INIT_HLIST_HEAD(&pid_hash[i]);
579 }
580 
581 void __init pidmap_init(void)
582 {
583 	/* Veryify no one has done anything silly */
584 	BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
585 
586 	/* bump default and minimum pid_max based on number of cpus */
587 	pid_max = min(pid_max_max, max_t(int, pid_max,
588 				PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
589 	pid_max_min = max_t(int, pid_max_min,
590 				PIDS_PER_CPU_MIN * num_possible_cpus());
591 	pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
592 
593 	init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
594 	/* Reserve PID 0. We never call free_pidmap(0) */
595 	set_bit(0, init_pid_ns.pidmap[0].page);
596 	atomic_dec(&init_pid_ns.pidmap[0].nr_free);
597 	init_pid_ns.nr_hashed = PIDNS_HASH_ADDING;
598 
599 	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
600 			SLAB_HWCACHE_ALIGN | SLAB_PANIC);
601 }
602