1 /* 2 * Generic pidhash and scalable, time-bounded PID allocator 3 * 4 * (C) 2002-2003 Nadia Yvette Chambers, IBM 5 * (C) 2004 Nadia Yvette Chambers, Oracle 6 * (C) 2002-2004 Ingo Molnar, Red Hat 7 * 8 * pid-structures are backing objects for tasks sharing a given ID to chain 9 * against. There is very little to them aside from hashing them and 10 * parking tasks using given ID's on a list. 11 * 12 * The hash is always changed with the tasklist_lock write-acquired, 13 * and the hash is only accessed with the tasklist_lock at least 14 * read-acquired, so there's no additional SMP locking needed here. 15 * 16 * We have a list of bitmap pages, which bitmaps represent the PID space. 17 * Allocating and freeing PIDs is completely lockless. The worst-case 18 * allocation scenario when all but one out of 1 million PIDs possible are 19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE 20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). 21 * 22 * Pid namespaces: 23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. 24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM 25 * Many thanks to Oleg Nesterov for comments and help 26 * 27 */ 28 29 #include <linux/mm.h> 30 #include <linux/export.h> 31 #include <linux/slab.h> 32 #include <linux/init.h> 33 #include <linux/rculist.h> 34 #include <linux/bootmem.h> 35 #include <linux/hash.h> 36 #include <linux/pid_namespace.h> 37 #include <linux/init_task.h> 38 #include <linux/syscalls.h> 39 #include <linux/proc_ns.h> 40 #include <linux/proc_fs.h> 41 #include <linux/sched/task.h> 42 43 #define pid_hashfn(nr, ns) \ 44 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) 45 static struct hlist_head *pid_hash; 46 static unsigned int pidhash_shift = 4; 47 struct pid init_struct_pid = INIT_STRUCT_PID; 48 49 int pid_max = PID_MAX_DEFAULT; 50 51 #define RESERVED_PIDS 300 52 53 int pid_max_min = RESERVED_PIDS + 1; 54 int pid_max_max = PID_MAX_LIMIT; 55 56 static inline int mk_pid(struct pid_namespace *pid_ns, 57 struct pidmap *map, int off) 58 { 59 return (map - pid_ns->pidmap)*BITS_PER_PAGE + off; 60 } 61 62 #define find_next_offset(map, off) \ 63 find_next_zero_bit((map)->page, BITS_PER_PAGE, off) 64 65 /* 66 * PID-map pages start out as NULL, they get allocated upon 67 * first use and are never deallocated. This way a low pid_max 68 * value does not cause lots of bitmaps to be allocated, but 69 * the scheme scales to up to 4 million PIDs, runtime. 70 */ 71 struct pid_namespace init_pid_ns = { 72 .kref = KREF_INIT(2), 73 .pidmap = { 74 [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } 75 }, 76 .last_pid = 0, 77 .nr_hashed = PIDNS_HASH_ADDING, 78 .level = 0, 79 .child_reaper = &init_task, 80 .user_ns = &init_user_ns, 81 .ns.inum = PROC_PID_INIT_INO, 82 #ifdef CONFIG_PID_NS 83 .ns.ops = &pidns_operations, 84 #endif 85 }; 86 EXPORT_SYMBOL_GPL(init_pid_ns); 87 88 /* 89 * Note: disable interrupts while the pidmap_lock is held as an 90 * interrupt might come in and do read_lock(&tasklist_lock). 91 * 92 * If we don't disable interrupts there is a nasty deadlock between 93 * detach_pid()->free_pid() and another cpu that does 94 * spin_lock(&pidmap_lock) followed by an interrupt routine that does 95 * read_lock(&tasklist_lock); 96 * 97 * After we clean up the tasklist_lock and know there are no 98 * irq handlers that take it we can leave the interrupts enabled. 99 * For now it is easier to be safe than to prove it can't happen. 100 */ 101 102 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); 103 104 static void free_pidmap(struct upid *upid) 105 { 106 int nr = upid->nr; 107 struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE; 108 int offset = nr & BITS_PER_PAGE_MASK; 109 110 clear_bit(offset, map->page); 111 atomic_inc(&map->nr_free); 112 } 113 114 /* 115 * If we started walking pids at 'base', is 'a' seen before 'b'? 116 */ 117 static int pid_before(int base, int a, int b) 118 { 119 /* 120 * This is the same as saying 121 * 122 * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT 123 * and that mapping orders 'a' and 'b' with respect to 'base'. 124 */ 125 return (unsigned)(a - base) < (unsigned)(b - base); 126 } 127 128 /* 129 * We might be racing with someone else trying to set pid_ns->last_pid 130 * at the pid allocation time (there's also a sysctl for this, but racing 131 * with this one is OK, see comment in kernel/pid_namespace.c about it). 132 * We want the winner to have the "later" value, because if the 133 * "earlier" value prevails, then a pid may get reused immediately. 134 * 135 * Since pids rollover, it is not sufficient to just pick the bigger 136 * value. We have to consider where we started counting from. 137 * 138 * 'base' is the value of pid_ns->last_pid that we observed when 139 * we started looking for a pid. 140 * 141 * 'pid' is the pid that we eventually found. 142 */ 143 static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid) 144 { 145 int prev; 146 int last_write = base; 147 do { 148 prev = last_write; 149 last_write = cmpxchg(&pid_ns->last_pid, prev, pid); 150 } while ((prev != last_write) && (pid_before(base, last_write, pid))); 151 } 152 153 static int alloc_pidmap(struct pid_namespace *pid_ns) 154 { 155 int i, offset, max_scan, pid, last = pid_ns->last_pid; 156 struct pidmap *map; 157 158 pid = last + 1; 159 if (pid >= pid_max) 160 pid = RESERVED_PIDS; 161 offset = pid & BITS_PER_PAGE_MASK; 162 map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; 163 /* 164 * If last_pid points into the middle of the map->page we 165 * want to scan this bitmap block twice, the second time 166 * we start with offset == 0 (or RESERVED_PIDS). 167 */ 168 max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset; 169 for (i = 0; i <= max_scan; ++i) { 170 if (unlikely(!map->page)) { 171 void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); 172 /* 173 * Free the page if someone raced with us 174 * installing it: 175 */ 176 spin_lock_irq(&pidmap_lock); 177 if (!map->page) { 178 map->page = page; 179 page = NULL; 180 } 181 spin_unlock_irq(&pidmap_lock); 182 kfree(page); 183 if (unlikely(!map->page)) 184 return -ENOMEM; 185 } 186 if (likely(atomic_read(&map->nr_free))) { 187 for ( ; ; ) { 188 if (!test_and_set_bit(offset, map->page)) { 189 atomic_dec(&map->nr_free); 190 set_last_pid(pid_ns, last, pid); 191 return pid; 192 } 193 offset = find_next_offset(map, offset); 194 if (offset >= BITS_PER_PAGE) 195 break; 196 pid = mk_pid(pid_ns, map, offset); 197 if (pid >= pid_max) 198 break; 199 } 200 } 201 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { 202 ++map; 203 offset = 0; 204 } else { 205 map = &pid_ns->pidmap[0]; 206 offset = RESERVED_PIDS; 207 if (unlikely(last == offset)) 208 break; 209 } 210 pid = mk_pid(pid_ns, map, offset); 211 } 212 return -EAGAIN; 213 } 214 215 int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) 216 { 217 int offset; 218 struct pidmap *map, *end; 219 220 if (last >= PID_MAX_LIMIT) 221 return -1; 222 223 offset = (last + 1) & BITS_PER_PAGE_MASK; 224 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; 225 end = &pid_ns->pidmap[PIDMAP_ENTRIES]; 226 for (; map < end; map++, offset = 0) { 227 if (unlikely(!map->page)) 228 continue; 229 offset = find_next_bit((map)->page, BITS_PER_PAGE, offset); 230 if (offset < BITS_PER_PAGE) 231 return mk_pid(pid_ns, map, offset); 232 } 233 return -1; 234 } 235 236 void put_pid(struct pid *pid) 237 { 238 struct pid_namespace *ns; 239 240 if (!pid) 241 return; 242 243 ns = pid->numbers[pid->level].ns; 244 if ((atomic_read(&pid->count) == 1) || 245 atomic_dec_and_test(&pid->count)) { 246 kmem_cache_free(ns->pid_cachep, pid); 247 put_pid_ns(ns); 248 } 249 } 250 EXPORT_SYMBOL_GPL(put_pid); 251 252 static void delayed_put_pid(struct rcu_head *rhp) 253 { 254 struct pid *pid = container_of(rhp, struct pid, rcu); 255 put_pid(pid); 256 } 257 258 void free_pid(struct pid *pid) 259 { 260 /* We can be called with write_lock_irq(&tasklist_lock) held */ 261 int i; 262 unsigned long flags; 263 264 spin_lock_irqsave(&pidmap_lock, flags); 265 for (i = 0; i <= pid->level; i++) { 266 struct upid *upid = pid->numbers + i; 267 struct pid_namespace *ns = upid->ns; 268 hlist_del_rcu(&upid->pid_chain); 269 switch(--ns->nr_hashed) { 270 case 2: 271 case 1: 272 /* When all that is left in the pid namespace 273 * is the reaper wake up the reaper. The reaper 274 * may be sleeping in zap_pid_ns_processes(). 275 */ 276 wake_up_process(ns->child_reaper); 277 break; 278 case PIDNS_HASH_ADDING: 279 /* Handle a fork failure of the first process */ 280 WARN_ON(ns->child_reaper); 281 ns->nr_hashed = 0; 282 /* fall through */ 283 case 0: 284 schedule_work(&ns->proc_work); 285 break; 286 } 287 } 288 spin_unlock_irqrestore(&pidmap_lock, flags); 289 290 for (i = 0; i <= pid->level; i++) 291 free_pidmap(pid->numbers + i); 292 293 call_rcu(&pid->rcu, delayed_put_pid); 294 } 295 296 struct pid *alloc_pid(struct pid_namespace *ns) 297 { 298 struct pid *pid; 299 enum pid_type type; 300 int i, nr; 301 struct pid_namespace *tmp; 302 struct upid *upid; 303 int retval = -ENOMEM; 304 305 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); 306 if (!pid) 307 return ERR_PTR(retval); 308 309 tmp = ns; 310 pid->level = ns->level; 311 for (i = ns->level; i >= 0; i--) { 312 nr = alloc_pidmap(tmp); 313 if (nr < 0) { 314 retval = nr; 315 goto out_free; 316 } 317 318 pid->numbers[i].nr = nr; 319 pid->numbers[i].ns = tmp; 320 tmp = tmp->parent; 321 } 322 323 if (unlikely(is_child_reaper(pid))) { 324 if (pid_ns_prepare_proc(ns)) 325 goto out_free; 326 } 327 328 get_pid_ns(ns); 329 atomic_set(&pid->count, 1); 330 for (type = 0; type < PIDTYPE_MAX; ++type) 331 INIT_HLIST_HEAD(&pid->tasks[type]); 332 333 upid = pid->numbers + ns->level; 334 spin_lock_irq(&pidmap_lock); 335 if (!(ns->nr_hashed & PIDNS_HASH_ADDING)) 336 goto out_unlock; 337 for ( ; upid >= pid->numbers; --upid) { 338 hlist_add_head_rcu(&upid->pid_chain, 339 &pid_hash[pid_hashfn(upid->nr, upid->ns)]); 340 upid->ns->nr_hashed++; 341 } 342 spin_unlock_irq(&pidmap_lock); 343 344 return pid; 345 346 out_unlock: 347 spin_unlock_irq(&pidmap_lock); 348 put_pid_ns(ns); 349 350 out_free: 351 while (++i <= ns->level) 352 free_pidmap(pid->numbers + i); 353 354 kmem_cache_free(ns->pid_cachep, pid); 355 return ERR_PTR(retval); 356 } 357 358 void disable_pid_allocation(struct pid_namespace *ns) 359 { 360 spin_lock_irq(&pidmap_lock); 361 ns->nr_hashed &= ~PIDNS_HASH_ADDING; 362 spin_unlock_irq(&pidmap_lock); 363 } 364 365 struct pid *find_pid_ns(int nr, struct pid_namespace *ns) 366 { 367 struct upid *pnr; 368 369 hlist_for_each_entry_rcu(pnr, 370 &pid_hash[pid_hashfn(nr, ns)], pid_chain) 371 if (pnr->nr == nr && pnr->ns == ns) 372 return container_of(pnr, struct pid, 373 numbers[ns->level]); 374 375 return NULL; 376 } 377 EXPORT_SYMBOL_GPL(find_pid_ns); 378 379 struct pid *find_vpid(int nr) 380 { 381 return find_pid_ns(nr, task_active_pid_ns(current)); 382 } 383 EXPORT_SYMBOL_GPL(find_vpid); 384 385 /* 386 * attach_pid() must be called with the tasklist_lock write-held. 387 */ 388 void attach_pid(struct task_struct *task, enum pid_type type) 389 { 390 struct pid_link *link = &task->pids[type]; 391 hlist_add_head_rcu(&link->node, &link->pid->tasks[type]); 392 } 393 394 static void __change_pid(struct task_struct *task, enum pid_type type, 395 struct pid *new) 396 { 397 struct pid_link *link; 398 struct pid *pid; 399 int tmp; 400 401 link = &task->pids[type]; 402 pid = link->pid; 403 404 hlist_del_rcu(&link->node); 405 link->pid = new; 406 407 for (tmp = PIDTYPE_MAX; --tmp >= 0; ) 408 if (!hlist_empty(&pid->tasks[tmp])) 409 return; 410 411 free_pid(pid); 412 } 413 414 void detach_pid(struct task_struct *task, enum pid_type type) 415 { 416 __change_pid(task, type, NULL); 417 } 418 419 void change_pid(struct task_struct *task, enum pid_type type, 420 struct pid *pid) 421 { 422 __change_pid(task, type, pid); 423 attach_pid(task, type); 424 } 425 426 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ 427 void transfer_pid(struct task_struct *old, struct task_struct *new, 428 enum pid_type type) 429 { 430 new->pids[type].pid = old->pids[type].pid; 431 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); 432 } 433 434 struct task_struct *pid_task(struct pid *pid, enum pid_type type) 435 { 436 struct task_struct *result = NULL; 437 if (pid) { 438 struct hlist_node *first; 439 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), 440 lockdep_tasklist_lock_is_held()); 441 if (first) 442 result = hlist_entry(first, struct task_struct, pids[(type)].node); 443 } 444 return result; 445 } 446 EXPORT_SYMBOL(pid_task); 447 448 /* 449 * Must be called under rcu_read_lock(). 450 */ 451 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) 452 { 453 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 454 "find_task_by_pid_ns() needs rcu_read_lock() protection"); 455 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); 456 } 457 458 struct task_struct *find_task_by_vpid(pid_t vnr) 459 { 460 return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); 461 } 462 463 struct pid *get_task_pid(struct task_struct *task, enum pid_type type) 464 { 465 struct pid *pid; 466 rcu_read_lock(); 467 if (type != PIDTYPE_PID) 468 task = task->group_leader; 469 pid = get_pid(rcu_dereference(task->pids[type].pid)); 470 rcu_read_unlock(); 471 return pid; 472 } 473 EXPORT_SYMBOL_GPL(get_task_pid); 474 475 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) 476 { 477 struct task_struct *result; 478 rcu_read_lock(); 479 result = pid_task(pid, type); 480 if (result) 481 get_task_struct(result); 482 rcu_read_unlock(); 483 return result; 484 } 485 EXPORT_SYMBOL_GPL(get_pid_task); 486 487 struct pid *find_get_pid(pid_t nr) 488 { 489 struct pid *pid; 490 491 rcu_read_lock(); 492 pid = get_pid(find_vpid(nr)); 493 rcu_read_unlock(); 494 495 return pid; 496 } 497 EXPORT_SYMBOL_GPL(find_get_pid); 498 499 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) 500 { 501 struct upid *upid; 502 pid_t nr = 0; 503 504 if (pid && ns->level <= pid->level) { 505 upid = &pid->numbers[ns->level]; 506 if (upid->ns == ns) 507 nr = upid->nr; 508 } 509 return nr; 510 } 511 EXPORT_SYMBOL_GPL(pid_nr_ns); 512 513 pid_t pid_vnr(struct pid *pid) 514 { 515 return pid_nr_ns(pid, task_active_pid_ns(current)); 516 } 517 EXPORT_SYMBOL_GPL(pid_vnr); 518 519 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, 520 struct pid_namespace *ns) 521 { 522 pid_t nr = 0; 523 524 rcu_read_lock(); 525 if (!ns) 526 ns = task_active_pid_ns(current); 527 if (likely(pid_alive(task))) { 528 if (type != PIDTYPE_PID) 529 task = task->group_leader; 530 nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns); 531 } 532 rcu_read_unlock(); 533 534 return nr; 535 } 536 EXPORT_SYMBOL(__task_pid_nr_ns); 537 538 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 539 { 540 return pid_nr_ns(task_tgid(tsk), ns); 541 } 542 EXPORT_SYMBOL(task_tgid_nr_ns); 543 544 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) 545 { 546 return ns_of_pid(task_pid(tsk)); 547 } 548 EXPORT_SYMBOL_GPL(task_active_pid_ns); 549 550 /* 551 * Used by proc to find the first pid that is greater than or equal to nr. 552 * 553 * If there is a pid at nr this function is exactly the same as find_pid_ns. 554 */ 555 struct pid *find_ge_pid(int nr, struct pid_namespace *ns) 556 { 557 struct pid *pid; 558 559 do { 560 pid = find_pid_ns(nr, ns); 561 if (pid) 562 break; 563 nr = next_pidmap(ns, nr); 564 } while (nr > 0); 565 566 return pid; 567 } 568 569 /* 570 * The pid hash table is scaled according to the amount of memory in the 571 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or 572 * more. 573 */ 574 void __init pidhash_init(void) 575 { 576 unsigned int i, pidhash_size; 577 578 pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, 579 HASH_EARLY | HASH_SMALL, 580 &pidhash_shift, NULL, 581 0, 4096); 582 pidhash_size = 1U << pidhash_shift; 583 584 for (i = 0; i < pidhash_size; i++) 585 INIT_HLIST_HEAD(&pid_hash[i]); 586 } 587 588 void __init pidmap_init(void) 589 { 590 /* Verify no one has done anything silly: */ 591 BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING); 592 593 /* bump default and minimum pid_max based on number of cpus */ 594 pid_max = min(pid_max_max, max_t(int, pid_max, 595 PIDS_PER_CPU_DEFAULT * num_possible_cpus())); 596 pid_max_min = max_t(int, pid_max_min, 597 PIDS_PER_CPU_MIN * num_possible_cpus()); 598 pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min); 599 600 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); 601 /* Reserve PID 0. We never call free_pidmap(0) */ 602 set_bit(0, init_pid_ns.pidmap[0].page); 603 atomic_dec(&init_pid_ns.pidmap[0].nr_free); 604 605 init_pid_ns.pid_cachep = KMEM_CACHE(pid, 606 SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); 607 } 608