xref: /openbmc/linux/kernel/pid.c (revision 22d55f02)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Generic pidhash and scalable, time-bounded PID allocator
4   *
5   * (C) 2002-2003 Nadia Yvette Chambers, IBM
6   * (C) 2004 Nadia Yvette Chambers, Oracle
7   * (C) 2002-2004 Ingo Molnar, Red Hat
8   *
9   * pid-structures are backing objects for tasks sharing a given ID to chain
10   * against. There is very little to them aside from hashing them and
11   * parking tasks using given ID's on a list.
12   *
13   * The hash is always changed with the tasklist_lock write-acquired,
14   * and the hash is only accessed with the tasklist_lock at least
15   * read-acquired, so there's no additional SMP locking needed here.
16   *
17   * We have a list of bitmap pages, which bitmaps represent the PID space.
18   * Allocating and freeing PIDs is completely lockless. The worst-case
19   * allocation scenario when all but one out of 1 million PIDs possible are
20   * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
21   * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
22   *
23   * Pid namespaces:
24   *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
25   *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
26   *     Many thanks to Oleg Nesterov for comments and help
27   *
28   */
29  
30  #include <linux/mm.h>
31  #include <linux/export.h>
32  #include <linux/slab.h>
33  #include <linux/init.h>
34  #include <linux/rculist.h>
35  #include <linux/memblock.h>
36  #include <linux/pid_namespace.h>
37  #include <linux/init_task.h>
38  #include <linux/syscalls.h>
39  #include <linux/proc_ns.h>
40  #include <linux/proc_fs.h>
41  #include <linux/sched/task.h>
42  #include <linux/idr.h>
43  
44  struct pid init_struct_pid = {
45  	.count 		= ATOMIC_INIT(1),
46  	.tasks		= {
47  		{ .first = NULL },
48  		{ .first = NULL },
49  		{ .first = NULL },
50  	},
51  	.level		= 0,
52  	.numbers	= { {
53  		.nr		= 0,
54  		.ns		= &init_pid_ns,
55  	}, }
56  };
57  
58  int pid_max = PID_MAX_DEFAULT;
59  
60  #define RESERVED_PIDS		300
61  
62  int pid_max_min = RESERVED_PIDS + 1;
63  int pid_max_max = PID_MAX_LIMIT;
64  
65  /*
66   * PID-map pages start out as NULL, they get allocated upon
67   * first use and are never deallocated. This way a low pid_max
68   * value does not cause lots of bitmaps to be allocated, but
69   * the scheme scales to up to 4 million PIDs, runtime.
70   */
71  struct pid_namespace init_pid_ns = {
72  	.kref = KREF_INIT(2),
73  	.idr = IDR_INIT(init_pid_ns.idr),
74  	.pid_allocated = PIDNS_ADDING,
75  	.level = 0,
76  	.child_reaper = &init_task,
77  	.user_ns = &init_user_ns,
78  	.ns.inum = PROC_PID_INIT_INO,
79  #ifdef CONFIG_PID_NS
80  	.ns.ops = &pidns_operations,
81  #endif
82  };
83  EXPORT_SYMBOL_GPL(init_pid_ns);
84  
85  /*
86   * Note: disable interrupts while the pidmap_lock is held as an
87   * interrupt might come in and do read_lock(&tasklist_lock).
88   *
89   * If we don't disable interrupts there is a nasty deadlock between
90   * detach_pid()->free_pid() and another cpu that does
91   * spin_lock(&pidmap_lock) followed by an interrupt routine that does
92   * read_lock(&tasklist_lock);
93   *
94   * After we clean up the tasklist_lock and know there are no
95   * irq handlers that take it we can leave the interrupts enabled.
96   * For now it is easier to be safe than to prove it can't happen.
97   */
98  
99  static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
100  
101  void put_pid(struct pid *pid)
102  {
103  	struct pid_namespace *ns;
104  
105  	if (!pid)
106  		return;
107  
108  	ns = pid->numbers[pid->level].ns;
109  	if ((atomic_read(&pid->count) == 1) ||
110  	     atomic_dec_and_test(&pid->count)) {
111  		kmem_cache_free(ns->pid_cachep, pid);
112  		put_pid_ns(ns);
113  	}
114  }
115  EXPORT_SYMBOL_GPL(put_pid);
116  
117  static void delayed_put_pid(struct rcu_head *rhp)
118  {
119  	struct pid *pid = container_of(rhp, struct pid, rcu);
120  	put_pid(pid);
121  }
122  
123  void free_pid(struct pid *pid)
124  {
125  	/* We can be called with write_lock_irq(&tasklist_lock) held */
126  	int i;
127  	unsigned long flags;
128  
129  	spin_lock_irqsave(&pidmap_lock, flags);
130  	for (i = 0; i <= pid->level; i++) {
131  		struct upid *upid = pid->numbers + i;
132  		struct pid_namespace *ns = upid->ns;
133  		switch (--ns->pid_allocated) {
134  		case 2:
135  		case 1:
136  			/* When all that is left in the pid namespace
137  			 * is the reaper wake up the reaper.  The reaper
138  			 * may be sleeping in zap_pid_ns_processes().
139  			 */
140  			wake_up_process(ns->child_reaper);
141  			break;
142  		case PIDNS_ADDING:
143  			/* Handle a fork failure of the first process */
144  			WARN_ON(ns->child_reaper);
145  			ns->pid_allocated = 0;
146  			/* fall through */
147  		case 0:
148  			schedule_work(&ns->proc_work);
149  			break;
150  		}
151  
152  		idr_remove(&ns->idr, upid->nr);
153  	}
154  	spin_unlock_irqrestore(&pidmap_lock, flags);
155  
156  	call_rcu(&pid->rcu, delayed_put_pid);
157  }
158  
159  struct pid *alloc_pid(struct pid_namespace *ns)
160  {
161  	struct pid *pid;
162  	enum pid_type type;
163  	int i, nr;
164  	struct pid_namespace *tmp;
165  	struct upid *upid;
166  	int retval = -ENOMEM;
167  
168  	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
169  	if (!pid)
170  		return ERR_PTR(retval);
171  
172  	tmp = ns;
173  	pid->level = ns->level;
174  
175  	for (i = ns->level; i >= 0; i--) {
176  		int pid_min = 1;
177  
178  		idr_preload(GFP_KERNEL);
179  		spin_lock_irq(&pidmap_lock);
180  
181  		/*
182  		 * init really needs pid 1, but after reaching the maximum
183  		 * wrap back to RESERVED_PIDS
184  		 */
185  		if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
186  			pid_min = RESERVED_PIDS;
187  
188  		/*
189  		 * Store a null pointer so find_pid_ns does not find
190  		 * a partially initialized PID (see below).
191  		 */
192  		nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
193  				      pid_max, GFP_ATOMIC);
194  		spin_unlock_irq(&pidmap_lock);
195  		idr_preload_end();
196  
197  		if (nr < 0) {
198  			retval = (nr == -ENOSPC) ? -EAGAIN : nr;
199  			goto out_free;
200  		}
201  
202  		pid->numbers[i].nr = nr;
203  		pid->numbers[i].ns = tmp;
204  		tmp = tmp->parent;
205  	}
206  
207  	if (unlikely(is_child_reaper(pid))) {
208  		if (pid_ns_prepare_proc(ns))
209  			goto out_free;
210  	}
211  
212  	get_pid_ns(ns);
213  	atomic_set(&pid->count, 1);
214  	for (type = 0; type < PIDTYPE_MAX; ++type)
215  		INIT_HLIST_HEAD(&pid->tasks[type]);
216  
217  	upid = pid->numbers + ns->level;
218  	spin_lock_irq(&pidmap_lock);
219  	if (!(ns->pid_allocated & PIDNS_ADDING))
220  		goto out_unlock;
221  	for ( ; upid >= pid->numbers; --upid) {
222  		/* Make the PID visible to find_pid_ns. */
223  		idr_replace(&upid->ns->idr, pid, upid->nr);
224  		upid->ns->pid_allocated++;
225  	}
226  	spin_unlock_irq(&pidmap_lock);
227  
228  	return pid;
229  
230  out_unlock:
231  	spin_unlock_irq(&pidmap_lock);
232  	put_pid_ns(ns);
233  
234  out_free:
235  	spin_lock_irq(&pidmap_lock);
236  	while (++i <= ns->level) {
237  		upid = pid->numbers + i;
238  		idr_remove(&upid->ns->idr, upid->nr);
239  	}
240  
241  	/* On failure to allocate the first pid, reset the state */
242  	if (ns->pid_allocated == PIDNS_ADDING)
243  		idr_set_cursor(&ns->idr, 0);
244  
245  	spin_unlock_irq(&pidmap_lock);
246  
247  	kmem_cache_free(ns->pid_cachep, pid);
248  	return ERR_PTR(retval);
249  }
250  
251  void disable_pid_allocation(struct pid_namespace *ns)
252  {
253  	spin_lock_irq(&pidmap_lock);
254  	ns->pid_allocated &= ~PIDNS_ADDING;
255  	spin_unlock_irq(&pidmap_lock);
256  }
257  
258  struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
259  {
260  	return idr_find(&ns->idr, nr);
261  }
262  EXPORT_SYMBOL_GPL(find_pid_ns);
263  
264  struct pid *find_vpid(int nr)
265  {
266  	return find_pid_ns(nr, task_active_pid_ns(current));
267  }
268  EXPORT_SYMBOL_GPL(find_vpid);
269  
270  static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
271  {
272  	return (type == PIDTYPE_PID) ?
273  		&task->thread_pid :
274  		&task->signal->pids[type];
275  }
276  
277  /*
278   * attach_pid() must be called with the tasklist_lock write-held.
279   */
280  void attach_pid(struct task_struct *task, enum pid_type type)
281  {
282  	struct pid *pid = *task_pid_ptr(task, type);
283  	hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
284  }
285  
286  static void __change_pid(struct task_struct *task, enum pid_type type,
287  			struct pid *new)
288  {
289  	struct pid **pid_ptr = task_pid_ptr(task, type);
290  	struct pid *pid;
291  	int tmp;
292  
293  	pid = *pid_ptr;
294  
295  	hlist_del_rcu(&task->pid_links[type]);
296  	*pid_ptr = new;
297  
298  	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
299  		if (!hlist_empty(&pid->tasks[tmp]))
300  			return;
301  
302  	free_pid(pid);
303  }
304  
305  void detach_pid(struct task_struct *task, enum pid_type type)
306  {
307  	__change_pid(task, type, NULL);
308  }
309  
310  void change_pid(struct task_struct *task, enum pid_type type,
311  		struct pid *pid)
312  {
313  	__change_pid(task, type, pid);
314  	attach_pid(task, type);
315  }
316  
317  /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
318  void transfer_pid(struct task_struct *old, struct task_struct *new,
319  			   enum pid_type type)
320  {
321  	if (type == PIDTYPE_PID)
322  		new->thread_pid = old->thread_pid;
323  	hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
324  }
325  
326  struct task_struct *pid_task(struct pid *pid, enum pid_type type)
327  {
328  	struct task_struct *result = NULL;
329  	if (pid) {
330  		struct hlist_node *first;
331  		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
332  					      lockdep_tasklist_lock_is_held());
333  		if (first)
334  			result = hlist_entry(first, struct task_struct, pid_links[(type)]);
335  	}
336  	return result;
337  }
338  EXPORT_SYMBOL(pid_task);
339  
340  /*
341   * Must be called under rcu_read_lock().
342   */
343  struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
344  {
345  	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
346  			 "find_task_by_pid_ns() needs rcu_read_lock() protection");
347  	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
348  }
349  
350  struct task_struct *find_task_by_vpid(pid_t vnr)
351  {
352  	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
353  }
354  
355  struct task_struct *find_get_task_by_vpid(pid_t nr)
356  {
357  	struct task_struct *task;
358  
359  	rcu_read_lock();
360  	task = find_task_by_vpid(nr);
361  	if (task)
362  		get_task_struct(task);
363  	rcu_read_unlock();
364  
365  	return task;
366  }
367  
368  struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
369  {
370  	struct pid *pid;
371  	rcu_read_lock();
372  	pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
373  	rcu_read_unlock();
374  	return pid;
375  }
376  EXPORT_SYMBOL_GPL(get_task_pid);
377  
378  struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
379  {
380  	struct task_struct *result;
381  	rcu_read_lock();
382  	result = pid_task(pid, type);
383  	if (result)
384  		get_task_struct(result);
385  	rcu_read_unlock();
386  	return result;
387  }
388  EXPORT_SYMBOL_GPL(get_pid_task);
389  
390  struct pid *find_get_pid(pid_t nr)
391  {
392  	struct pid *pid;
393  
394  	rcu_read_lock();
395  	pid = get_pid(find_vpid(nr));
396  	rcu_read_unlock();
397  
398  	return pid;
399  }
400  EXPORT_SYMBOL_GPL(find_get_pid);
401  
402  pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
403  {
404  	struct upid *upid;
405  	pid_t nr = 0;
406  
407  	if (pid && ns->level <= pid->level) {
408  		upid = &pid->numbers[ns->level];
409  		if (upid->ns == ns)
410  			nr = upid->nr;
411  	}
412  	return nr;
413  }
414  EXPORT_SYMBOL_GPL(pid_nr_ns);
415  
416  pid_t pid_vnr(struct pid *pid)
417  {
418  	return pid_nr_ns(pid, task_active_pid_ns(current));
419  }
420  EXPORT_SYMBOL_GPL(pid_vnr);
421  
422  pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
423  			struct pid_namespace *ns)
424  {
425  	pid_t nr = 0;
426  
427  	rcu_read_lock();
428  	if (!ns)
429  		ns = task_active_pid_ns(current);
430  	if (likely(pid_alive(task)))
431  		nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
432  	rcu_read_unlock();
433  
434  	return nr;
435  }
436  EXPORT_SYMBOL(__task_pid_nr_ns);
437  
438  struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
439  {
440  	return ns_of_pid(task_pid(tsk));
441  }
442  EXPORT_SYMBOL_GPL(task_active_pid_ns);
443  
444  /*
445   * Used by proc to find the first pid that is greater than or equal to nr.
446   *
447   * If there is a pid at nr this function is exactly the same as find_pid_ns.
448   */
449  struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
450  {
451  	return idr_get_next(&ns->idr, &nr);
452  }
453  
454  void __init pid_idr_init(void)
455  {
456  	/* Verify no one has done anything silly: */
457  	BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
458  
459  	/* bump default and minimum pid_max based on number of cpus */
460  	pid_max = min(pid_max_max, max_t(int, pid_max,
461  				PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
462  	pid_max_min = max_t(int, pid_max_min,
463  				PIDS_PER_CPU_MIN * num_possible_cpus());
464  	pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
465  
466  	idr_init(&init_pid_ns.idr);
467  
468  	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
469  			SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
470  }
471