xref: /openbmc/linux/kernel/pid.c (revision d0b73b48)
1 /*
2  * Generic pidhash and scalable, time-bounded PID allocator
3  *
4  * (C) 2002-2003 Nadia Yvette Chambers, IBM
5  * (C) 2004 Nadia Yvette Chambers, Oracle
6  * (C) 2002-2004 Ingo Molnar, Red Hat
7  *
8  * pid-structures are backing objects for tasks sharing a given ID to chain
9  * against. There is very little to them aside from hashing them and
10  * parking tasks using given ID's on a list.
11  *
12  * The hash is always changed with the tasklist_lock write-acquired,
13  * and the hash is only accessed with the tasklist_lock at least
14  * read-acquired, so there's no additional SMP locking needed here.
15  *
16  * We have a list of bitmap pages, which bitmaps represent the PID space.
17  * Allocating and freeing PIDs is completely lockless. The worst-case
18  * allocation scenario when all but one out of 1 million PIDs possible are
19  * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20  * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21  *
22  * Pid namespaces:
23  *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24  *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25  *     Many thanks to Oleg Nesterov for comments and help
26  *
27  */
28 
29 #include <linux/mm.h>
30 #include <linux/export.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/rculist.h>
34 #include <linux/bootmem.h>
35 #include <linux/hash.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/proc_fs.h>
40 
41 #define pid_hashfn(nr, ns)	\
42 	hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
43 static struct hlist_head *pid_hash;
44 static unsigned int pidhash_shift = 4;
45 struct pid init_struct_pid = INIT_STRUCT_PID;
46 
47 int pid_max = PID_MAX_DEFAULT;
48 
49 #define RESERVED_PIDS		300
50 
51 int pid_max_min = RESERVED_PIDS + 1;
52 int pid_max_max = PID_MAX_LIMIT;
53 
54 #define BITS_PER_PAGE		(PAGE_SIZE*8)
55 #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE-1)
56 
57 static inline int mk_pid(struct pid_namespace *pid_ns,
58 		struct pidmap *map, int off)
59 {
60 	return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
61 }
62 
63 #define find_next_offset(map, off)					\
64 		find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
65 
66 /*
67  * PID-map pages start out as NULL, they get allocated upon
68  * first use and are never deallocated. This way a low pid_max
69  * value does not cause lots of bitmaps to be allocated, but
70  * the scheme scales to up to 4 million PIDs, runtime.
71  */
72 struct pid_namespace init_pid_ns = {
73 	.kref = {
74 		.refcount       = ATOMIC_INIT(2),
75 	},
76 	.pidmap = {
77 		[ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
78 	},
79 	.last_pid = 0,
80 	.level = 0,
81 	.child_reaper = &init_task,
82 	.user_ns = &init_user_ns,
83 	.proc_inum = PROC_PID_INIT_INO,
84 };
85 EXPORT_SYMBOL_GPL(init_pid_ns);
86 
87 /*
88  * Note: disable interrupts while the pidmap_lock is held as an
89  * interrupt might come in and do read_lock(&tasklist_lock).
90  *
91  * If we don't disable interrupts there is a nasty deadlock between
92  * detach_pid()->free_pid() and another cpu that does
93  * spin_lock(&pidmap_lock) followed by an interrupt routine that does
94  * read_lock(&tasklist_lock);
95  *
96  * After we clean up the tasklist_lock and know there are no
97  * irq handlers that take it we can leave the interrupts enabled.
98  * For now it is easier to be safe than to prove it can't happen.
99  */
100 
101 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
102 
103 static void free_pidmap(struct upid *upid)
104 {
105 	int nr = upid->nr;
106 	struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
107 	int offset = nr & BITS_PER_PAGE_MASK;
108 
109 	clear_bit(offset, map->page);
110 	atomic_inc(&map->nr_free);
111 }
112 
113 /*
114  * If we started walking pids at 'base', is 'a' seen before 'b'?
115  */
116 static int pid_before(int base, int a, int b)
117 {
118 	/*
119 	 * This is the same as saying
120 	 *
121 	 * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
122 	 * and that mapping orders 'a' and 'b' with respect to 'base'.
123 	 */
124 	return (unsigned)(a - base) < (unsigned)(b - base);
125 }
126 
127 /*
128  * We might be racing with someone else trying to set pid_ns->last_pid
129  * at the pid allocation time (there's also a sysctl for this, but racing
130  * with this one is OK, see comment in kernel/pid_namespace.c about it).
131  * We want the winner to have the "later" value, because if the
132  * "earlier" value prevails, then a pid may get reused immediately.
133  *
134  * Since pids rollover, it is not sufficient to just pick the bigger
135  * value.  We have to consider where we started counting from.
136  *
137  * 'base' is the value of pid_ns->last_pid that we observed when
138  * we started looking for a pid.
139  *
140  * 'pid' is the pid that we eventually found.
141  */
142 static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
143 {
144 	int prev;
145 	int last_write = base;
146 	do {
147 		prev = last_write;
148 		last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
149 	} while ((prev != last_write) && (pid_before(base, last_write, pid)));
150 }
151 
152 static int alloc_pidmap(struct pid_namespace *pid_ns)
153 {
154 	int i, offset, max_scan, pid, last = pid_ns->last_pid;
155 	struct pidmap *map;
156 
157 	pid = last + 1;
158 	if (pid >= pid_max)
159 		pid = RESERVED_PIDS;
160 	offset = pid & BITS_PER_PAGE_MASK;
161 	map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
162 	/*
163 	 * If last_pid points into the middle of the map->page we
164 	 * want to scan this bitmap block twice, the second time
165 	 * we start with offset == 0 (or RESERVED_PIDS).
166 	 */
167 	max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
168 	for (i = 0; i <= max_scan; ++i) {
169 		if (unlikely(!map->page)) {
170 			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
171 			/*
172 			 * Free the page if someone raced with us
173 			 * installing it:
174 			 */
175 			spin_lock_irq(&pidmap_lock);
176 			if (!map->page) {
177 				map->page = page;
178 				page = NULL;
179 			}
180 			spin_unlock_irq(&pidmap_lock);
181 			kfree(page);
182 			if (unlikely(!map->page))
183 				break;
184 		}
185 		if (likely(atomic_read(&map->nr_free))) {
186 			do {
187 				if (!test_and_set_bit(offset, map->page)) {
188 					atomic_dec(&map->nr_free);
189 					set_last_pid(pid_ns, last, pid);
190 					return pid;
191 				}
192 				offset = find_next_offset(map, offset);
193 				pid = mk_pid(pid_ns, map, offset);
194 			} while (offset < BITS_PER_PAGE && pid < pid_max);
195 		}
196 		if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
197 			++map;
198 			offset = 0;
199 		} else {
200 			map = &pid_ns->pidmap[0];
201 			offset = RESERVED_PIDS;
202 			if (unlikely(last == offset))
203 				break;
204 		}
205 		pid = mk_pid(pid_ns, map, offset);
206 	}
207 	return -1;
208 }
209 
210 int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
211 {
212 	int offset;
213 	struct pidmap *map, *end;
214 
215 	if (last >= PID_MAX_LIMIT)
216 		return -1;
217 
218 	offset = (last + 1) & BITS_PER_PAGE_MASK;
219 	map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
220 	end = &pid_ns->pidmap[PIDMAP_ENTRIES];
221 	for (; map < end; map++, offset = 0) {
222 		if (unlikely(!map->page))
223 			continue;
224 		offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
225 		if (offset < BITS_PER_PAGE)
226 			return mk_pid(pid_ns, map, offset);
227 	}
228 	return -1;
229 }
230 
231 void put_pid(struct pid *pid)
232 {
233 	struct pid_namespace *ns;
234 
235 	if (!pid)
236 		return;
237 
238 	ns = pid->numbers[pid->level].ns;
239 	if ((atomic_read(&pid->count) == 1) ||
240 	     atomic_dec_and_test(&pid->count)) {
241 		kmem_cache_free(ns->pid_cachep, pid);
242 		put_pid_ns(ns);
243 	}
244 }
245 EXPORT_SYMBOL_GPL(put_pid);
246 
247 static void delayed_put_pid(struct rcu_head *rhp)
248 {
249 	struct pid *pid = container_of(rhp, struct pid, rcu);
250 	put_pid(pid);
251 }
252 
253 void free_pid(struct pid *pid)
254 {
255 	/* We can be called with write_lock_irq(&tasklist_lock) held */
256 	int i;
257 	unsigned long flags;
258 
259 	spin_lock_irqsave(&pidmap_lock, flags);
260 	for (i = 0; i <= pid->level; i++) {
261 		struct upid *upid = pid->numbers + i;
262 		struct pid_namespace *ns = upid->ns;
263 		hlist_del_rcu(&upid->pid_chain);
264 		switch(--ns->nr_hashed) {
265 		case 1:
266 			/* When all that is left in the pid namespace
267 			 * is the reaper wake up the reaper.  The reaper
268 			 * may be sleeping in zap_pid_ns_processes().
269 			 */
270 			wake_up_process(ns->child_reaper);
271 			break;
272 		case 0:
273 			schedule_work(&ns->proc_work);
274 			break;
275 		}
276 	}
277 	spin_unlock_irqrestore(&pidmap_lock, flags);
278 
279 	for (i = 0; i <= pid->level; i++)
280 		free_pidmap(pid->numbers + i);
281 
282 	call_rcu(&pid->rcu, delayed_put_pid);
283 }
284 
285 struct pid *alloc_pid(struct pid_namespace *ns)
286 {
287 	struct pid *pid;
288 	enum pid_type type;
289 	int i, nr;
290 	struct pid_namespace *tmp;
291 	struct upid *upid;
292 
293 	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
294 	if (!pid)
295 		goto out;
296 
297 	tmp = ns;
298 	pid->level = ns->level;
299 	for (i = ns->level; i >= 0; i--) {
300 		nr = alloc_pidmap(tmp);
301 		if (nr < 0)
302 			goto out_free;
303 
304 		pid->numbers[i].nr = nr;
305 		pid->numbers[i].ns = tmp;
306 		tmp = tmp->parent;
307 	}
308 
309 	if (unlikely(is_child_reaper(pid))) {
310 		if (pid_ns_prepare_proc(ns))
311 			goto out_free;
312 	}
313 
314 	get_pid_ns(ns);
315 	atomic_set(&pid->count, 1);
316 	for (type = 0; type < PIDTYPE_MAX; ++type)
317 		INIT_HLIST_HEAD(&pid->tasks[type]);
318 
319 	upid = pid->numbers + ns->level;
320 	spin_lock_irq(&pidmap_lock);
321 	if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
322 		goto out_unlock;
323 	for ( ; upid >= pid->numbers; --upid) {
324 		hlist_add_head_rcu(&upid->pid_chain,
325 				&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
326 		upid->ns->nr_hashed++;
327 	}
328 	spin_unlock_irq(&pidmap_lock);
329 
330 out:
331 	return pid;
332 
333 out_unlock:
334 	spin_unlock_irq(&pidmap_lock);
335 out_free:
336 	while (++i <= ns->level)
337 		free_pidmap(pid->numbers + i);
338 
339 	kmem_cache_free(ns->pid_cachep, pid);
340 	pid = NULL;
341 	goto out;
342 }
343 
344 void disable_pid_allocation(struct pid_namespace *ns)
345 {
346 	spin_lock_irq(&pidmap_lock);
347 	ns->nr_hashed &= ~PIDNS_HASH_ADDING;
348 	spin_unlock_irq(&pidmap_lock);
349 }
350 
351 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
352 {
353 	struct hlist_node *elem;
354 	struct upid *pnr;
355 
356 	hlist_for_each_entry_rcu(pnr, elem,
357 			&pid_hash[pid_hashfn(nr, ns)], pid_chain)
358 		if (pnr->nr == nr && pnr->ns == ns)
359 			return container_of(pnr, struct pid,
360 					numbers[ns->level]);
361 
362 	return NULL;
363 }
364 EXPORT_SYMBOL_GPL(find_pid_ns);
365 
366 struct pid *find_vpid(int nr)
367 {
368 	return find_pid_ns(nr, task_active_pid_ns(current));
369 }
370 EXPORT_SYMBOL_GPL(find_vpid);
371 
372 /*
373  * attach_pid() must be called with the tasklist_lock write-held.
374  */
375 void attach_pid(struct task_struct *task, enum pid_type type,
376 		struct pid *pid)
377 {
378 	struct pid_link *link;
379 
380 	link = &task->pids[type];
381 	link->pid = pid;
382 	hlist_add_head_rcu(&link->node, &pid->tasks[type]);
383 }
384 
385 static void __change_pid(struct task_struct *task, enum pid_type type,
386 			struct pid *new)
387 {
388 	struct pid_link *link;
389 	struct pid *pid;
390 	int tmp;
391 
392 	link = &task->pids[type];
393 	pid = link->pid;
394 
395 	hlist_del_rcu(&link->node);
396 	link->pid = new;
397 
398 	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
399 		if (!hlist_empty(&pid->tasks[tmp]))
400 			return;
401 
402 	free_pid(pid);
403 }
404 
405 void detach_pid(struct task_struct *task, enum pid_type type)
406 {
407 	__change_pid(task, type, NULL);
408 }
409 
410 void change_pid(struct task_struct *task, enum pid_type type,
411 		struct pid *pid)
412 {
413 	__change_pid(task, type, pid);
414 	attach_pid(task, type, pid);
415 }
416 
417 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
418 void transfer_pid(struct task_struct *old, struct task_struct *new,
419 			   enum pid_type type)
420 {
421 	new->pids[type].pid = old->pids[type].pid;
422 	hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
423 }
424 
425 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
426 {
427 	struct task_struct *result = NULL;
428 	if (pid) {
429 		struct hlist_node *first;
430 		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
431 					      lockdep_tasklist_lock_is_held());
432 		if (first)
433 			result = hlist_entry(first, struct task_struct, pids[(type)].node);
434 	}
435 	return result;
436 }
437 EXPORT_SYMBOL(pid_task);
438 
439 /*
440  * Must be called under rcu_read_lock().
441  */
442 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
443 {
444 	rcu_lockdep_assert(rcu_read_lock_held(),
445 			   "find_task_by_pid_ns() needs rcu_read_lock()"
446 			   " protection");
447 	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
448 }
449 
450 struct task_struct *find_task_by_vpid(pid_t vnr)
451 {
452 	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
453 }
454 
455 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
456 {
457 	struct pid *pid;
458 	rcu_read_lock();
459 	if (type != PIDTYPE_PID)
460 		task = task->group_leader;
461 	pid = get_pid(task->pids[type].pid);
462 	rcu_read_unlock();
463 	return pid;
464 }
465 EXPORT_SYMBOL_GPL(get_task_pid);
466 
467 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
468 {
469 	struct task_struct *result;
470 	rcu_read_lock();
471 	result = pid_task(pid, type);
472 	if (result)
473 		get_task_struct(result);
474 	rcu_read_unlock();
475 	return result;
476 }
477 EXPORT_SYMBOL_GPL(get_pid_task);
478 
479 struct pid *find_get_pid(pid_t nr)
480 {
481 	struct pid *pid;
482 
483 	rcu_read_lock();
484 	pid = get_pid(find_vpid(nr));
485 	rcu_read_unlock();
486 
487 	return pid;
488 }
489 EXPORT_SYMBOL_GPL(find_get_pid);
490 
491 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
492 {
493 	struct upid *upid;
494 	pid_t nr = 0;
495 
496 	if (pid && ns->level <= pid->level) {
497 		upid = &pid->numbers[ns->level];
498 		if (upid->ns == ns)
499 			nr = upid->nr;
500 	}
501 	return nr;
502 }
503 EXPORT_SYMBOL_GPL(pid_nr_ns);
504 
505 pid_t pid_vnr(struct pid *pid)
506 {
507 	return pid_nr_ns(pid, task_active_pid_ns(current));
508 }
509 EXPORT_SYMBOL_GPL(pid_vnr);
510 
511 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
512 			struct pid_namespace *ns)
513 {
514 	pid_t nr = 0;
515 
516 	rcu_read_lock();
517 	if (!ns)
518 		ns = task_active_pid_ns(current);
519 	if (likely(pid_alive(task))) {
520 		if (type != PIDTYPE_PID)
521 			task = task->group_leader;
522 		nr = pid_nr_ns(task->pids[type].pid, ns);
523 	}
524 	rcu_read_unlock();
525 
526 	return nr;
527 }
528 EXPORT_SYMBOL(__task_pid_nr_ns);
529 
530 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
531 {
532 	return pid_nr_ns(task_tgid(tsk), ns);
533 }
534 EXPORT_SYMBOL(task_tgid_nr_ns);
535 
536 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
537 {
538 	return ns_of_pid(task_pid(tsk));
539 }
540 EXPORT_SYMBOL_GPL(task_active_pid_ns);
541 
542 /*
543  * Used by proc to find the first pid that is greater than or equal to nr.
544  *
545  * If there is a pid at nr this function is exactly the same as find_pid_ns.
546  */
547 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
548 {
549 	struct pid *pid;
550 
551 	do {
552 		pid = find_pid_ns(nr, ns);
553 		if (pid)
554 			break;
555 		nr = next_pidmap(ns, nr);
556 	} while (nr > 0);
557 
558 	return pid;
559 }
560 
561 /*
562  * The pid hash table is scaled according to the amount of memory in the
563  * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
564  * more.
565  */
566 void __init pidhash_init(void)
567 {
568 	unsigned int i, pidhash_size;
569 
570 	pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
571 					   HASH_EARLY | HASH_SMALL,
572 					   &pidhash_shift, NULL,
573 					   0, 4096);
574 	pidhash_size = 1U << pidhash_shift;
575 
576 	for (i = 0; i < pidhash_size; i++)
577 		INIT_HLIST_HEAD(&pid_hash[i]);
578 }
579 
580 void __init pidmap_init(void)
581 {
582 	/* Veryify no one has done anything silly */
583 	BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
584 
585 	/* bump default and minimum pid_max based on number of cpus */
586 	pid_max = min(pid_max_max, max_t(int, pid_max,
587 				PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
588 	pid_max_min = max_t(int, pid_max_min,
589 				PIDS_PER_CPU_MIN * num_possible_cpus());
590 	pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
591 
592 	init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
593 	/* Reserve PID 0. We never call free_pidmap(0) */
594 	set_bit(0, init_pid_ns.pidmap[0].page);
595 	atomic_dec(&init_pid_ns.pidmap[0].nr_free);
596 	init_pid_ns.nr_hashed = PIDNS_HASH_ADDING;
597 
598 	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
599 			SLAB_HWCACHE_ALIGN | SLAB_PANIC);
600 }
601