xref: /openbmc/linux/kernel/pid.c (revision 4bb1e4e7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic pidhash and scalable, time-bounded PID allocator
4  *
5  * (C) 2002-2003 Nadia Yvette Chambers, IBM
6  * (C) 2004 Nadia Yvette Chambers, Oracle
7  * (C) 2002-2004 Ingo Molnar, Red Hat
8  *
9  * pid-structures are backing objects for tasks sharing a given ID to chain
10  * against. There is very little to them aside from hashing them and
11  * parking tasks using given ID's on a list.
12  *
13  * The hash is always changed with the tasklist_lock write-acquired,
14  * and the hash is only accessed with the tasklist_lock at least
15  * read-acquired, so there's no additional SMP locking needed here.
16  *
17  * We have a list of bitmap pages, which bitmaps represent the PID space.
18  * Allocating and freeing PIDs is completely lockless. The worst-case
19  * allocation scenario when all but one out of 1 million PIDs possible are
20  * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
21  * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
22  *
23  * Pid namespaces:
24  *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
25  *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
26  *     Many thanks to Oleg Nesterov for comments and help
27  *
28  */
29 
30 #include <linux/mm.h>
31 #include <linux/export.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/rculist.h>
35 #include <linux/memblock.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/proc_ns.h>
40 #include <linux/refcount.h>
41 #include <linux/anon_inodes.h>
42 #include <linux/sched/signal.h>
43 #include <linux/sched/task.h>
44 #include <linux/idr.h>
45 
46 struct pid init_struct_pid = {
47 	.count		= REFCOUNT_INIT(1),
48 	.tasks		= {
49 		{ .first = NULL },
50 		{ .first = NULL },
51 		{ .first = NULL },
52 	},
53 	.level		= 0,
54 	.numbers	= { {
55 		.nr		= 0,
56 		.ns		= &init_pid_ns,
57 	}, }
58 };
59 
60 int pid_max = PID_MAX_DEFAULT;
61 
62 #define RESERVED_PIDS		300
63 
64 int pid_max_min = RESERVED_PIDS + 1;
65 int pid_max_max = PID_MAX_LIMIT;
66 
67 /*
68  * PID-map pages start out as NULL, they get allocated upon
69  * first use and are never deallocated. This way a low pid_max
70  * value does not cause lots of bitmaps to be allocated, but
71  * the scheme scales to up to 4 million PIDs, runtime.
72  */
73 struct pid_namespace init_pid_ns = {
74 	.kref = KREF_INIT(2),
75 	.idr = IDR_INIT(init_pid_ns.idr),
76 	.pid_allocated = PIDNS_ADDING,
77 	.level = 0,
78 	.child_reaper = &init_task,
79 	.user_ns = &init_user_ns,
80 	.ns.inum = PROC_PID_INIT_INO,
81 #ifdef CONFIG_PID_NS
82 	.ns.ops = &pidns_operations,
83 #endif
84 };
85 EXPORT_SYMBOL_GPL(init_pid_ns);
86 
87 /*
88  * Note: disable interrupts while the pidmap_lock is held as an
89  * interrupt might come in and do read_lock(&tasklist_lock).
90  *
91  * If we don't disable interrupts there is a nasty deadlock between
92  * detach_pid()->free_pid() and another cpu that does
93  * spin_lock(&pidmap_lock) followed by an interrupt routine that does
94  * read_lock(&tasklist_lock);
95  *
96  * After we clean up the tasklist_lock and know there are no
97  * irq handlers that take it we can leave the interrupts enabled.
98  * For now it is easier to be safe than to prove it can't happen.
99  */
100 
101 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
102 
103 void put_pid(struct pid *pid)
104 {
105 	struct pid_namespace *ns;
106 
107 	if (!pid)
108 		return;
109 
110 	ns = pid->numbers[pid->level].ns;
111 	if (refcount_dec_and_test(&pid->count)) {
112 		kmem_cache_free(ns->pid_cachep, pid);
113 		put_pid_ns(ns);
114 	}
115 }
116 EXPORT_SYMBOL_GPL(put_pid);
117 
118 static void delayed_put_pid(struct rcu_head *rhp)
119 {
120 	struct pid *pid = container_of(rhp, struct pid, rcu);
121 	put_pid(pid);
122 }
123 
124 void free_pid(struct pid *pid)
125 {
126 	/* We can be called with write_lock_irq(&tasklist_lock) held */
127 	int i;
128 	unsigned long flags;
129 
130 	spin_lock_irqsave(&pidmap_lock, flags);
131 	for (i = 0; i <= pid->level; i++) {
132 		struct upid *upid = pid->numbers + i;
133 		struct pid_namespace *ns = upid->ns;
134 		switch (--ns->pid_allocated) {
135 		case 2:
136 		case 1:
137 			/* When all that is left in the pid namespace
138 			 * is the reaper wake up the reaper.  The reaper
139 			 * may be sleeping in zap_pid_ns_processes().
140 			 */
141 			wake_up_process(ns->child_reaper);
142 			break;
143 		case PIDNS_ADDING:
144 			/* Handle a fork failure of the first process */
145 			WARN_ON(ns->child_reaper);
146 			ns->pid_allocated = 0;
147 			/* fall through */
148 		case 0:
149 			schedule_work(&ns->proc_work);
150 			break;
151 		}
152 
153 		idr_remove(&ns->idr, upid->nr);
154 	}
155 	spin_unlock_irqrestore(&pidmap_lock, flags);
156 
157 	call_rcu(&pid->rcu, delayed_put_pid);
158 }
159 
160 struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
161 		      size_t set_tid_size)
162 {
163 	struct pid *pid;
164 	enum pid_type type;
165 	int i, nr;
166 	struct pid_namespace *tmp;
167 	struct upid *upid;
168 	int retval = -ENOMEM;
169 
170 	/*
171 	 * set_tid_size contains the size of the set_tid array. Starting at
172 	 * the most nested currently active PID namespace it tells alloc_pid()
173 	 * which PID to set for a process in that most nested PID namespace
174 	 * up to set_tid_size PID namespaces. It does not have to set the PID
175 	 * for a process in all nested PID namespaces but set_tid_size must
176 	 * never be greater than the current ns->level + 1.
177 	 */
178 	if (set_tid_size > ns->level + 1)
179 		return ERR_PTR(-EINVAL);
180 
181 	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
182 	if (!pid)
183 		return ERR_PTR(retval);
184 
185 	tmp = ns;
186 	pid->level = ns->level;
187 
188 	for (i = ns->level; i >= 0; i--) {
189 		int tid = 0;
190 
191 		if (set_tid_size) {
192 			tid = set_tid[ns->level - i];
193 
194 			retval = -EINVAL;
195 			if (tid < 1 || tid >= pid_max)
196 				goto out_free;
197 			/*
198 			 * Also fail if a PID != 1 is requested and
199 			 * no PID 1 exists.
200 			 */
201 			if (tid != 1 && !tmp->child_reaper)
202 				goto out_free;
203 			retval = -EPERM;
204 			if (!ns_capable(tmp->user_ns, CAP_SYS_ADMIN))
205 				goto out_free;
206 			set_tid_size--;
207 		}
208 
209 		idr_preload(GFP_KERNEL);
210 		spin_lock_irq(&pidmap_lock);
211 
212 		if (tid) {
213 			nr = idr_alloc(&tmp->idr, NULL, tid,
214 				       tid + 1, GFP_ATOMIC);
215 			/*
216 			 * If ENOSPC is returned it means that the PID is
217 			 * alreay in use. Return EEXIST in that case.
218 			 */
219 			if (nr == -ENOSPC)
220 				nr = -EEXIST;
221 		} else {
222 			int pid_min = 1;
223 			/*
224 			 * init really needs pid 1, but after reaching the
225 			 * maximum wrap back to RESERVED_PIDS
226 			 */
227 			if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
228 				pid_min = RESERVED_PIDS;
229 
230 			/*
231 			 * Store a null pointer so find_pid_ns does not find
232 			 * a partially initialized PID (see below).
233 			 */
234 			nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
235 					      pid_max, GFP_ATOMIC);
236 		}
237 		spin_unlock_irq(&pidmap_lock);
238 		idr_preload_end();
239 
240 		if (nr < 0) {
241 			retval = (nr == -ENOSPC) ? -EAGAIN : nr;
242 			goto out_free;
243 		}
244 
245 		pid->numbers[i].nr = nr;
246 		pid->numbers[i].ns = tmp;
247 		tmp = tmp->parent;
248 	}
249 
250 	if (unlikely(is_child_reaper(pid))) {
251 		if (pid_ns_prepare_proc(ns))
252 			goto out_free;
253 	}
254 
255 	get_pid_ns(ns);
256 	refcount_set(&pid->count, 1);
257 	for (type = 0; type < PIDTYPE_MAX; ++type)
258 		INIT_HLIST_HEAD(&pid->tasks[type]);
259 
260 	init_waitqueue_head(&pid->wait_pidfd);
261 
262 	upid = pid->numbers + ns->level;
263 	spin_lock_irq(&pidmap_lock);
264 	if (!(ns->pid_allocated & PIDNS_ADDING))
265 		goto out_unlock;
266 	for ( ; upid >= pid->numbers; --upid) {
267 		/* Make the PID visible to find_pid_ns. */
268 		idr_replace(&upid->ns->idr, pid, upid->nr);
269 		upid->ns->pid_allocated++;
270 	}
271 	spin_unlock_irq(&pidmap_lock);
272 
273 	return pid;
274 
275 out_unlock:
276 	spin_unlock_irq(&pidmap_lock);
277 	put_pid_ns(ns);
278 
279 out_free:
280 	spin_lock_irq(&pidmap_lock);
281 	while (++i <= ns->level) {
282 		upid = pid->numbers + i;
283 		idr_remove(&upid->ns->idr, upid->nr);
284 	}
285 
286 	/* On failure to allocate the first pid, reset the state */
287 	if (ns->pid_allocated == PIDNS_ADDING)
288 		idr_set_cursor(&ns->idr, 0);
289 
290 	spin_unlock_irq(&pidmap_lock);
291 
292 	kmem_cache_free(ns->pid_cachep, pid);
293 	return ERR_PTR(retval);
294 }
295 
296 void disable_pid_allocation(struct pid_namespace *ns)
297 {
298 	spin_lock_irq(&pidmap_lock);
299 	ns->pid_allocated &= ~PIDNS_ADDING;
300 	spin_unlock_irq(&pidmap_lock);
301 }
302 
303 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
304 {
305 	return idr_find(&ns->idr, nr);
306 }
307 EXPORT_SYMBOL_GPL(find_pid_ns);
308 
309 struct pid *find_vpid(int nr)
310 {
311 	return find_pid_ns(nr, task_active_pid_ns(current));
312 }
313 EXPORT_SYMBOL_GPL(find_vpid);
314 
315 static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
316 {
317 	return (type == PIDTYPE_PID) ?
318 		&task->thread_pid :
319 		&task->signal->pids[type];
320 }
321 
322 /*
323  * attach_pid() must be called with the tasklist_lock write-held.
324  */
325 void attach_pid(struct task_struct *task, enum pid_type type)
326 {
327 	struct pid *pid = *task_pid_ptr(task, type);
328 	hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
329 }
330 
331 static void __change_pid(struct task_struct *task, enum pid_type type,
332 			struct pid *new)
333 {
334 	struct pid **pid_ptr = task_pid_ptr(task, type);
335 	struct pid *pid;
336 	int tmp;
337 
338 	pid = *pid_ptr;
339 
340 	hlist_del_rcu(&task->pid_links[type]);
341 	*pid_ptr = new;
342 
343 	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
344 		if (pid_has_task(pid, tmp))
345 			return;
346 
347 	free_pid(pid);
348 }
349 
350 void detach_pid(struct task_struct *task, enum pid_type type)
351 {
352 	__change_pid(task, type, NULL);
353 }
354 
355 void change_pid(struct task_struct *task, enum pid_type type,
356 		struct pid *pid)
357 {
358 	__change_pid(task, type, pid);
359 	attach_pid(task, type);
360 }
361 
362 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
363 void transfer_pid(struct task_struct *old, struct task_struct *new,
364 			   enum pid_type type)
365 {
366 	if (type == PIDTYPE_PID)
367 		new->thread_pid = old->thread_pid;
368 	hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
369 }
370 
371 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
372 {
373 	struct task_struct *result = NULL;
374 	if (pid) {
375 		struct hlist_node *first;
376 		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
377 					      lockdep_tasklist_lock_is_held());
378 		if (first)
379 			result = hlist_entry(first, struct task_struct, pid_links[(type)]);
380 	}
381 	return result;
382 }
383 EXPORT_SYMBOL(pid_task);
384 
385 /*
386  * Must be called under rcu_read_lock().
387  */
388 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
389 {
390 	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
391 			 "find_task_by_pid_ns() needs rcu_read_lock() protection");
392 	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
393 }
394 
395 struct task_struct *find_task_by_vpid(pid_t vnr)
396 {
397 	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
398 }
399 
400 struct task_struct *find_get_task_by_vpid(pid_t nr)
401 {
402 	struct task_struct *task;
403 
404 	rcu_read_lock();
405 	task = find_task_by_vpid(nr);
406 	if (task)
407 		get_task_struct(task);
408 	rcu_read_unlock();
409 
410 	return task;
411 }
412 
413 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
414 {
415 	struct pid *pid;
416 	rcu_read_lock();
417 	pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
418 	rcu_read_unlock();
419 	return pid;
420 }
421 EXPORT_SYMBOL_GPL(get_task_pid);
422 
423 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
424 {
425 	struct task_struct *result;
426 	rcu_read_lock();
427 	result = pid_task(pid, type);
428 	if (result)
429 		get_task_struct(result);
430 	rcu_read_unlock();
431 	return result;
432 }
433 EXPORT_SYMBOL_GPL(get_pid_task);
434 
435 struct pid *find_get_pid(pid_t nr)
436 {
437 	struct pid *pid;
438 
439 	rcu_read_lock();
440 	pid = get_pid(find_vpid(nr));
441 	rcu_read_unlock();
442 
443 	return pid;
444 }
445 EXPORT_SYMBOL_GPL(find_get_pid);
446 
447 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
448 {
449 	struct upid *upid;
450 	pid_t nr = 0;
451 
452 	if (pid && ns->level <= pid->level) {
453 		upid = &pid->numbers[ns->level];
454 		if (upid->ns == ns)
455 			nr = upid->nr;
456 	}
457 	return nr;
458 }
459 EXPORT_SYMBOL_GPL(pid_nr_ns);
460 
461 pid_t pid_vnr(struct pid *pid)
462 {
463 	return pid_nr_ns(pid, task_active_pid_ns(current));
464 }
465 EXPORT_SYMBOL_GPL(pid_vnr);
466 
467 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
468 			struct pid_namespace *ns)
469 {
470 	pid_t nr = 0;
471 
472 	rcu_read_lock();
473 	if (!ns)
474 		ns = task_active_pid_ns(current);
475 	if (likely(pid_alive(task)))
476 		nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
477 	rcu_read_unlock();
478 
479 	return nr;
480 }
481 EXPORT_SYMBOL(__task_pid_nr_ns);
482 
483 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
484 {
485 	return ns_of_pid(task_pid(tsk));
486 }
487 EXPORT_SYMBOL_GPL(task_active_pid_ns);
488 
489 /*
490  * Used by proc to find the first pid that is greater than or equal to nr.
491  *
492  * If there is a pid at nr this function is exactly the same as find_pid_ns.
493  */
494 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
495 {
496 	return idr_get_next(&ns->idr, &nr);
497 }
498 
499 /**
500  * pidfd_create() - Create a new pid file descriptor.
501  *
502  * @pid:  struct pid that the pidfd will reference
503  *
504  * This creates a new pid file descriptor with the O_CLOEXEC flag set.
505  *
506  * Note, that this function can only be called after the fd table has
507  * been unshared to avoid leaking the pidfd to the new process.
508  *
509  * Return: On success, a cloexec pidfd is returned.
510  *         On error, a negative errno number will be returned.
511  */
512 static int pidfd_create(struct pid *pid)
513 {
514 	int fd;
515 
516 	fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
517 			      O_RDWR | O_CLOEXEC);
518 	if (fd < 0)
519 		put_pid(pid);
520 
521 	return fd;
522 }
523 
524 /**
525  * pidfd_open() - Open new pid file descriptor.
526  *
527  * @pid:   pid for which to retrieve a pidfd
528  * @flags: flags to pass
529  *
530  * This creates a new pid file descriptor with the O_CLOEXEC flag set for
531  * the process identified by @pid. Currently, the process identified by
532  * @pid must be a thread-group leader. This restriction currently exists
533  * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot
534  * be used with CLONE_THREAD) and pidfd polling (only supports thread group
535  * leaders).
536  *
537  * Return: On success, a cloexec pidfd is returned.
538  *         On error, a negative errno number will be returned.
539  */
540 SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
541 {
542 	int fd;
543 	struct pid *p;
544 
545 	if (flags)
546 		return -EINVAL;
547 
548 	if (pid <= 0)
549 		return -EINVAL;
550 
551 	p = find_get_pid(pid);
552 	if (!p)
553 		return -ESRCH;
554 
555 	if (pid_has_task(p, PIDTYPE_TGID))
556 		fd = pidfd_create(p);
557 	else
558 		fd = -EINVAL;
559 
560 	put_pid(p);
561 	return fd;
562 }
563 
564 void __init pid_idr_init(void)
565 {
566 	/* Verify no one has done anything silly: */
567 	BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
568 
569 	/* bump default and minimum pid_max based on number of cpus */
570 	pid_max = min(pid_max_max, max_t(int, pid_max,
571 				PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
572 	pid_max_min = max_t(int, pid_max_min,
573 				PIDS_PER_CPU_MIN * num_possible_cpus());
574 	pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
575 
576 	idr_init(&init_pid_ns.idr);
577 
578 	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
579 			SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
580 }
581