1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/ipc/sem.c 4 * Copyright (C) 1992 Krishna Balasubramanian 5 * Copyright (C) 1995 Eric Schenk, Bruno Haible 6 * 7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 8 * 9 * SMP-threaded, sysctl's added 10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com> 11 * Enforced range limit on SEM_UNDO 12 * (c) 2001 Red Hat Inc 13 * Lockless wakeup 14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com> 15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net> 16 * Further wakeup optimizations, documentation 17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com> 18 * 19 * support for audit of ipc object properties and permission changes 20 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 21 * 22 * namespaces support 23 * OpenVZ, SWsoft Inc. 24 * Pavel Emelianov <xemul@openvz.org> 25 * 26 * Implementation notes: (May 2010) 27 * This file implements System V semaphores. 28 * 29 * User space visible behavior: 30 * - FIFO ordering for semop() operations (just FIFO, not starvation 31 * protection) 32 * - multiple semaphore operations that alter the same semaphore in 33 * one semop() are handled. 34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and 35 * SETALL calls. 36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO. 37 * - undo adjustments at process exit are limited to 0..SEMVMX. 38 * - namespace are supported. 39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing 40 * to /proc/sys/kernel/sem. 41 * - statistics about the usage are reported in /proc/sysvipc/sem. 42 * 43 * Internals: 44 * - scalability: 45 * - all global variables are read-mostly. 46 * - semop() calls and semctl(RMID) are synchronized by RCU. 47 * - most operations do write operations (actually: spin_lock calls) to 48 * the per-semaphore array structure. 49 * Thus: Perfect SMP scaling between independent semaphore arrays. 50 * If multiple semaphores in one array are used, then cache line 51 * trashing on the semaphore array spinlock will limit the scaling. 52 * - semncnt and semzcnt are calculated on demand in count_semcnt() 53 * - the task that performs a successful semop() scans the list of all 54 * sleeping tasks and completes any pending operations that can be fulfilled. 55 * Semaphores are actively given to waiting tasks (necessary for FIFO). 56 * (see update_queue()) 57 * - To improve the scalability, the actual wake-up calls are performed after 58 * dropping all locks. (see wake_up_sem_queue_prepare()) 59 * - All work is done by the waker, the woken up task does not have to do 60 * anything - not even acquiring a lock or dropping a refcount. 61 * - A woken up task may not even touch the semaphore array anymore, it may 62 * have been destroyed already by a semctl(RMID). 63 * - UNDO values are stored in an array (one per process and per 64 * semaphore array, lazily allocated). For backwards compatibility, multiple 65 * modes for the UNDO variables are supported (per process, per thread) 66 * (see copy_semundo, CLONE_SYSVSEM) 67 * - There are two lists of the pending operations: a per-array list 68 * and per-semaphore list (stored in the array). This allows to achieve FIFO 69 * ordering without always scanning all pending operations. 70 * The worst-case behavior is nevertheless O(N^2) for N wakeups. 71 */ 72 73 #include <linux/compat.h> 74 #include <linux/slab.h> 75 #include <linux/spinlock.h> 76 #include <linux/init.h> 77 #include <linux/proc_fs.h> 78 #include <linux/time.h> 79 #include <linux/security.h> 80 #include <linux/syscalls.h> 81 #include <linux/audit.h> 82 #include <linux/capability.h> 83 #include <linux/seq_file.h> 84 #include <linux/rwsem.h> 85 #include <linux/nsproxy.h> 86 #include <linux/ipc_namespace.h> 87 #include <linux/sched/wake_q.h> 88 89 #include <linux/uaccess.h> 90 #include "util.h" 91 92 /* One semaphore structure for each semaphore in the system. */ 93 struct sem { 94 int semval; /* current value */ 95 /* 96 * PID of the process that last modified the semaphore. For 97 * Linux, specifically these are: 98 * - semop 99 * - semctl, via SETVAL and SETALL. 100 * - at task exit when performing undo adjustments (see exit_sem). 101 */ 102 struct pid *sempid; 103 spinlock_t lock; /* spinlock for fine-grained semtimedop */ 104 struct list_head pending_alter; /* pending single-sop operations */ 105 /* that alter the semaphore */ 106 struct list_head pending_const; /* pending single-sop operations */ 107 /* that do not alter the semaphore*/ 108 time64_t sem_otime; /* candidate for sem_otime */ 109 } ____cacheline_aligned_in_smp; 110 111 /* One sem_array data structure for each set of semaphores in the system. */ 112 struct sem_array { 113 struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */ 114 time64_t sem_ctime; /* create/last semctl() time */ 115 struct list_head pending_alter; /* pending operations */ 116 /* that alter the array */ 117 struct list_head pending_const; /* pending complex operations */ 118 /* that do not alter semvals */ 119 struct list_head list_id; /* undo requests on this array */ 120 int sem_nsems; /* no. of semaphores in array */ 121 int complex_count; /* pending complex operations */ 122 unsigned int use_global_lock;/* >0: global lock required */ 123 124 struct sem sems[]; 125 } __randomize_layout; 126 127 /* One queue for each sleeping process in the system. */ 128 struct sem_queue { 129 struct list_head list; /* queue of pending operations */ 130 struct task_struct *sleeper; /* this process */ 131 struct sem_undo *undo; /* undo structure */ 132 struct pid *pid; /* process id of requesting process */ 133 int status; /* completion status of operation */ 134 struct sembuf *sops; /* array of pending operations */ 135 struct sembuf *blocking; /* the operation that blocked */ 136 int nsops; /* number of operations */ 137 bool alter; /* does *sops alter the array? */ 138 bool dupsop; /* sops on more than one sem_num */ 139 }; 140 141 /* Each task has a list of undo requests. They are executed automatically 142 * when the process exits. 143 */ 144 struct sem_undo { 145 struct list_head list_proc; /* per-process list: * 146 * all undos from one process 147 * rcu protected */ 148 struct rcu_head rcu; /* rcu struct for sem_undo */ 149 struct sem_undo_list *ulp; /* back ptr to sem_undo_list */ 150 struct list_head list_id; /* per semaphore array list: 151 * all undos for one array */ 152 int semid; /* semaphore set identifier */ 153 short *semadj; /* array of adjustments */ 154 /* one per semaphore */ 155 }; 156 157 /* sem_undo_list controls shared access to the list of sem_undo structures 158 * that may be shared among all a CLONE_SYSVSEM task group. 159 */ 160 struct sem_undo_list { 161 refcount_t refcnt; 162 spinlock_t lock; 163 struct list_head list_proc; 164 }; 165 166 167 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS]) 168 169 static int newary(struct ipc_namespace *, struct ipc_params *); 170 static void freeary(struct ipc_namespace *, struct kern_ipc_perm *); 171 #ifdef CONFIG_PROC_FS 172 static int sysvipc_sem_proc_show(struct seq_file *s, void *it); 173 #endif 174 175 #define SEMMSL_FAST 256 /* 512 bytes on stack */ 176 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ 177 178 /* 179 * Switching from the mode suitable for simple ops 180 * to the mode for complex ops is costly. Therefore: 181 * use some hysteresis 182 */ 183 #define USE_GLOBAL_LOCK_HYSTERESIS 10 184 185 /* 186 * Locking: 187 * a) global sem_lock() for read/write 188 * sem_undo.id_next, 189 * sem_array.complex_count, 190 * sem_array.pending{_alter,_const}, 191 * sem_array.sem_undo 192 * 193 * b) global or semaphore sem_lock() for read/write: 194 * sem_array.sems[i].pending_{const,alter}: 195 * 196 * c) special: 197 * sem_undo_list.list_proc: 198 * * undo_list->lock for write 199 * * rcu for read 200 * use_global_lock: 201 * * global sem_lock() for write 202 * * either local or global sem_lock() for read. 203 * 204 * Memory ordering: 205 * Most ordering is enforced by using spin_lock() and spin_unlock(). 206 * The special case is use_global_lock: 207 * Setting it from non-zero to 0 is a RELEASE, this is ensured by 208 * using smp_store_release(). 209 * Testing if it is non-zero is an ACQUIRE, this is ensured by using 210 * smp_load_acquire(). 211 * Setting it from 0 to non-zero must be ordered with regards to 212 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire() 213 * is inside a spin_lock() and after a write from 0 to non-zero a 214 * spin_lock()+spin_unlock() is done. 215 */ 216 217 #define sc_semmsl sem_ctls[0] 218 #define sc_semmns sem_ctls[1] 219 #define sc_semopm sem_ctls[2] 220 #define sc_semmni sem_ctls[3] 221 222 int sem_init_ns(struct ipc_namespace *ns) 223 { 224 ns->sc_semmsl = SEMMSL; 225 ns->sc_semmns = SEMMNS; 226 ns->sc_semopm = SEMOPM; 227 ns->sc_semmni = SEMMNI; 228 ns->used_sems = 0; 229 return ipc_init_ids(&ns->ids[IPC_SEM_IDS]); 230 } 231 232 #ifdef CONFIG_IPC_NS 233 void sem_exit_ns(struct ipc_namespace *ns) 234 { 235 free_ipcs(ns, &sem_ids(ns), freeary); 236 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr); 237 rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht); 238 } 239 #endif 240 241 int __init sem_init(void) 242 { 243 const int err = sem_init_ns(&init_ipc_ns); 244 245 ipc_init_proc_interface("sysvipc/sem", 246 " key semid perms nsems uid gid cuid cgid otime ctime\n", 247 IPC_SEM_IDS, sysvipc_sem_proc_show); 248 return err; 249 } 250 251 /** 252 * unmerge_queues - unmerge queues, if possible. 253 * @sma: semaphore array 254 * 255 * The function unmerges the wait queues if complex_count is 0. 256 * It must be called prior to dropping the global semaphore array lock. 257 */ 258 static void unmerge_queues(struct sem_array *sma) 259 { 260 struct sem_queue *q, *tq; 261 262 /* complex operations still around? */ 263 if (sma->complex_count) 264 return; 265 /* 266 * We will switch back to simple mode. 267 * Move all pending operation back into the per-semaphore 268 * queues. 269 */ 270 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { 271 struct sem *curr; 272 curr = &sma->sems[q->sops[0].sem_num]; 273 274 list_add_tail(&q->list, &curr->pending_alter); 275 } 276 INIT_LIST_HEAD(&sma->pending_alter); 277 } 278 279 /** 280 * merge_queues - merge single semop queues into global queue 281 * @sma: semaphore array 282 * 283 * This function merges all per-semaphore queues into the global queue. 284 * It is necessary to achieve FIFO ordering for the pending single-sop 285 * operations when a multi-semop operation must sleep. 286 * Only the alter operations must be moved, the const operations can stay. 287 */ 288 static void merge_queues(struct sem_array *sma) 289 { 290 int i; 291 for (i = 0; i < sma->sem_nsems; i++) { 292 struct sem *sem = &sma->sems[i]; 293 294 list_splice_init(&sem->pending_alter, &sma->pending_alter); 295 } 296 } 297 298 static void sem_rcu_free(struct rcu_head *head) 299 { 300 struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); 301 struct sem_array *sma = container_of(p, struct sem_array, sem_perm); 302 303 security_sem_free(&sma->sem_perm); 304 kvfree(sma); 305 } 306 307 /* 308 * Enter the mode suitable for non-simple operations: 309 * Caller must own sem_perm.lock. 310 */ 311 static void complexmode_enter(struct sem_array *sma) 312 { 313 int i; 314 struct sem *sem; 315 316 if (sma->use_global_lock > 0) { 317 /* 318 * We are already in global lock mode. 319 * Nothing to do, just reset the 320 * counter until we return to simple mode. 321 */ 322 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; 323 return; 324 } 325 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; 326 327 for (i = 0; i < sma->sem_nsems; i++) { 328 sem = &sma->sems[i]; 329 spin_lock(&sem->lock); 330 spin_unlock(&sem->lock); 331 } 332 } 333 334 /* 335 * Try to leave the mode that disallows simple operations: 336 * Caller must own sem_perm.lock. 337 */ 338 static void complexmode_tryleave(struct sem_array *sma) 339 { 340 if (sma->complex_count) { 341 /* Complex ops are sleeping. 342 * We must stay in complex mode 343 */ 344 return; 345 } 346 if (sma->use_global_lock == 1) { 347 /* 348 * Immediately after setting use_global_lock to 0, 349 * a simple op can start. Thus: all memory writes 350 * performed by the current operation must be visible 351 * before we set use_global_lock to 0. 352 */ 353 smp_store_release(&sma->use_global_lock, 0); 354 } else { 355 sma->use_global_lock--; 356 } 357 } 358 359 #define SEM_GLOBAL_LOCK (-1) 360 /* 361 * If the request contains only one semaphore operation, and there are 362 * no complex transactions pending, lock only the semaphore involved. 363 * Otherwise, lock the entire semaphore array, since we either have 364 * multiple semaphores in our own semops, or we need to look at 365 * semaphores from other pending complex operations. 366 */ 367 static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, 368 int nsops) 369 { 370 struct sem *sem; 371 372 if (nsops != 1) { 373 /* Complex operation - acquire a full lock */ 374 ipc_lock_object(&sma->sem_perm); 375 376 /* Prevent parallel simple ops */ 377 complexmode_enter(sma); 378 return SEM_GLOBAL_LOCK; 379 } 380 381 /* 382 * Only one semaphore affected - try to optimize locking. 383 * Optimized locking is possible if no complex operation 384 * is either enqueued or processed right now. 385 * 386 * Both facts are tracked by use_global_mode. 387 */ 388 sem = &sma->sems[sops->sem_num]; 389 390 /* 391 * Initial check for use_global_lock. Just an optimization, 392 * no locking, no memory barrier. 393 */ 394 if (!sma->use_global_lock) { 395 /* 396 * It appears that no complex operation is around. 397 * Acquire the per-semaphore lock. 398 */ 399 spin_lock(&sem->lock); 400 401 /* pairs with smp_store_release() */ 402 if (!smp_load_acquire(&sma->use_global_lock)) { 403 /* fast path successful! */ 404 return sops->sem_num; 405 } 406 spin_unlock(&sem->lock); 407 } 408 409 /* slow path: acquire the full lock */ 410 ipc_lock_object(&sma->sem_perm); 411 412 if (sma->use_global_lock == 0) { 413 /* 414 * The use_global_lock mode ended while we waited for 415 * sma->sem_perm.lock. Thus we must switch to locking 416 * with sem->lock. 417 * Unlike in the fast path, there is no need to recheck 418 * sma->use_global_lock after we have acquired sem->lock: 419 * We own sma->sem_perm.lock, thus use_global_lock cannot 420 * change. 421 */ 422 spin_lock(&sem->lock); 423 424 ipc_unlock_object(&sma->sem_perm); 425 return sops->sem_num; 426 } else { 427 /* 428 * Not a false alarm, thus continue to use the global lock 429 * mode. No need for complexmode_enter(), this was done by 430 * the caller that has set use_global_mode to non-zero. 431 */ 432 return SEM_GLOBAL_LOCK; 433 } 434 } 435 436 static inline void sem_unlock(struct sem_array *sma, int locknum) 437 { 438 if (locknum == SEM_GLOBAL_LOCK) { 439 unmerge_queues(sma); 440 complexmode_tryleave(sma); 441 ipc_unlock_object(&sma->sem_perm); 442 } else { 443 struct sem *sem = &sma->sems[locknum]; 444 spin_unlock(&sem->lock); 445 } 446 } 447 448 /* 449 * sem_lock_(check_) routines are called in the paths where the rwsem 450 * is not held. 451 * 452 * The caller holds the RCU read lock. 453 */ 454 static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id) 455 { 456 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id); 457 458 if (IS_ERR(ipcp)) 459 return ERR_CAST(ipcp); 460 461 return container_of(ipcp, struct sem_array, sem_perm); 462 } 463 464 static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns, 465 int id) 466 { 467 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id); 468 469 if (IS_ERR(ipcp)) 470 return ERR_CAST(ipcp); 471 472 return container_of(ipcp, struct sem_array, sem_perm); 473 } 474 475 static inline void sem_lock_and_putref(struct sem_array *sma) 476 { 477 sem_lock(sma, NULL, -1); 478 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); 479 } 480 481 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) 482 { 483 ipc_rmid(&sem_ids(ns), &s->sem_perm); 484 } 485 486 static struct sem_array *sem_alloc(size_t nsems) 487 { 488 struct sem_array *sma; 489 size_t size; 490 491 if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0])) 492 return NULL; 493 494 size = sizeof(*sma) + nsems * sizeof(sma->sems[0]); 495 sma = kvmalloc(size, GFP_KERNEL); 496 if (unlikely(!sma)) 497 return NULL; 498 499 memset(sma, 0, size); 500 501 return sma; 502 } 503 504 /** 505 * newary - Create a new semaphore set 506 * @ns: namespace 507 * @params: ptr to the structure that contains key, semflg and nsems 508 * 509 * Called with sem_ids.rwsem held (as a writer) 510 */ 511 static int newary(struct ipc_namespace *ns, struct ipc_params *params) 512 { 513 int retval; 514 struct sem_array *sma; 515 key_t key = params->key; 516 int nsems = params->u.nsems; 517 int semflg = params->flg; 518 int i; 519 520 if (!nsems) 521 return -EINVAL; 522 if (ns->used_sems + nsems > ns->sc_semmns) 523 return -ENOSPC; 524 525 sma = sem_alloc(nsems); 526 if (!sma) 527 return -ENOMEM; 528 529 sma->sem_perm.mode = (semflg & S_IRWXUGO); 530 sma->sem_perm.key = key; 531 532 sma->sem_perm.security = NULL; 533 retval = security_sem_alloc(&sma->sem_perm); 534 if (retval) { 535 kvfree(sma); 536 return retval; 537 } 538 539 for (i = 0; i < nsems; i++) { 540 INIT_LIST_HEAD(&sma->sems[i].pending_alter); 541 INIT_LIST_HEAD(&sma->sems[i].pending_const); 542 spin_lock_init(&sma->sems[i].lock); 543 } 544 545 sma->complex_count = 0; 546 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; 547 INIT_LIST_HEAD(&sma->pending_alter); 548 INIT_LIST_HEAD(&sma->pending_const); 549 INIT_LIST_HEAD(&sma->list_id); 550 sma->sem_nsems = nsems; 551 sma->sem_ctime = ktime_get_real_seconds(); 552 553 /* ipc_addid() locks sma upon success. */ 554 retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); 555 if (retval < 0) { 556 call_rcu(&sma->sem_perm.rcu, sem_rcu_free); 557 return retval; 558 } 559 ns->used_sems += nsems; 560 561 sem_unlock(sma, -1); 562 rcu_read_unlock(); 563 564 return sma->sem_perm.id; 565 } 566 567 568 /* 569 * Called with sem_ids.rwsem and ipcp locked. 570 */ 571 static inline int sem_more_checks(struct kern_ipc_perm *ipcp, 572 struct ipc_params *params) 573 { 574 struct sem_array *sma; 575 576 sma = container_of(ipcp, struct sem_array, sem_perm); 577 if (params->u.nsems > sma->sem_nsems) 578 return -EINVAL; 579 580 return 0; 581 } 582 583 long ksys_semget(key_t key, int nsems, int semflg) 584 { 585 struct ipc_namespace *ns; 586 static const struct ipc_ops sem_ops = { 587 .getnew = newary, 588 .associate = security_sem_associate, 589 .more_checks = sem_more_checks, 590 }; 591 struct ipc_params sem_params; 592 593 ns = current->nsproxy->ipc_ns; 594 595 if (nsems < 0 || nsems > ns->sc_semmsl) 596 return -EINVAL; 597 598 sem_params.key = key; 599 sem_params.flg = semflg; 600 sem_params.u.nsems = nsems; 601 602 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); 603 } 604 605 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) 606 { 607 return ksys_semget(key, nsems, semflg); 608 } 609 610 /** 611 * perform_atomic_semop[_slow] - Attempt to perform semaphore 612 * operations on a given array. 613 * @sma: semaphore array 614 * @q: struct sem_queue that describes the operation 615 * 616 * Caller blocking are as follows, based the value 617 * indicated by the semaphore operation (sem_op): 618 * 619 * (1) >0 never blocks. 620 * (2) 0 (wait-for-zero operation): semval is non-zero. 621 * (3) <0 attempting to decrement semval to a value smaller than zero. 622 * 623 * Returns 0 if the operation was possible. 624 * Returns 1 if the operation is impossible, the caller must sleep. 625 * Returns <0 for error codes. 626 */ 627 static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q) 628 { 629 int result, sem_op, nsops; 630 struct pid *pid; 631 struct sembuf *sop; 632 struct sem *curr; 633 struct sembuf *sops; 634 struct sem_undo *un; 635 636 sops = q->sops; 637 nsops = q->nsops; 638 un = q->undo; 639 640 for (sop = sops; sop < sops + nsops; sop++) { 641 curr = &sma->sems[sop->sem_num]; 642 sem_op = sop->sem_op; 643 result = curr->semval; 644 645 if (!sem_op && result) 646 goto would_block; 647 648 result += sem_op; 649 if (result < 0) 650 goto would_block; 651 if (result > SEMVMX) 652 goto out_of_range; 653 654 if (sop->sem_flg & SEM_UNDO) { 655 int undo = un->semadj[sop->sem_num] - sem_op; 656 /* Exceeding the undo range is an error. */ 657 if (undo < (-SEMAEM - 1) || undo > SEMAEM) 658 goto out_of_range; 659 un->semadj[sop->sem_num] = undo; 660 } 661 662 curr->semval = result; 663 } 664 665 sop--; 666 pid = q->pid; 667 while (sop >= sops) { 668 ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid); 669 sop--; 670 } 671 672 return 0; 673 674 out_of_range: 675 result = -ERANGE; 676 goto undo; 677 678 would_block: 679 q->blocking = sop; 680 681 if (sop->sem_flg & IPC_NOWAIT) 682 result = -EAGAIN; 683 else 684 result = 1; 685 686 undo: 687 sop--; 688 while (sop >= sops) { 689 sem_op = sop->sem_op; 690 sma->sems[sop->sem_num].semval -= sem_op; 691 if (sop->sem_flg & SEM_UNDO) 692 un->semadj[sop->sem_num] += sem_op; 693 sop--; 694 } 695 696 return result; 697 } 698 699 static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) 700 { 701 int result, sem_op, nsops; 702 struct sembuf *sop; 703 struct sem *curr; 704 struct sembuf *sops; 705 struct sem_undo *un; 706 707 sops = q->sops; 708 nsops = q->nsops; 709 un = q->undo; 710 711 if (unlikely(q->dupsop)) 712 return perform_atomic_semop_slow(sma, q); 713 714 /* 715 * We scan the semaphore set twice, first to ensure that the entire 716 * operation can succeed, therefore avoiding any pointless writes 717 * to shared memory and having to undo such changes in order to block 718 * until the operations can go through. 719 */ 720 for (sop = sops; sop < sops + nsops; sop++) { 721 curr = &sma->sems[sop->sem_num]; 722 sem_op = sop->sem_op; 723 result = curr->semval; 724 725 if (!sem_op && result) 726 goto would_block; /* wait-for-zero */ 727 728 result += sem_op; 729 if (result < 0) 730 goto would_block; 731 732 if (result > SEMVMX) 733 return -ERANGE; 734 735 if (sop->sem_flg & SEM_UNDO) { 736 int undo = un->semadj[sop->sem_num] - sem_op; 737 738 /* Exceeding the undo range is an error. */ 739 if (undo < (-SEMAEM - 1) || undo > SEMAEM) 740 return -ERANGE; 741 } 742 } 743 744 for (sop = sops; sop < sops + nsops; sop++) { 745 curr = &sma->sems[sop->sem_num]; 746 sem_op = sop->sem_op; 747 result = curr->semval; 748 749 if (sop->sem_flg & SEM_UNDO) { 750 int undo = un->semadj[sop->sem_num] - sem_op; 751 752 un->semadj[sop->sem_num] = undo; 753 } 754 curr->semval += sem_op; 755 ipc_update_pid(&curr->sempid, q->pid); 756 } 757 758 return 0; 759 760 would_block: 761 q->blocking = sop; 762 return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1; 763 } 764 765 static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error, 766 struct wake_q_head *wake_q) 767 { 768 wake_q_add(wake_q, q->sleeper); 769 /* 770 * Rely on the above implicit barrier, such that we can 771 * ensure that we hold reference to the task before setting 772 * q->status. Otherwise we could race with do_exit if the 773 * task is awoken by an external event before calling 774 * wake_up_process(). 775 */ 776 WRITE_ONCE(q->status, error); 777 } 778 779 static void unlink_queue(struct sem_array *sma, struct sem_queue *q) 780 { 781 list_del(&q->list); 782 if (q->nsops > 1) 783 sma->complex_count--; 784 } 785 786 /** check_restart(sma, q) 787 * @sma: semaphore array 788 * @q: the operation that just completed 789 * 790 * update_queue is O(N^2) when it restarts scanning the whole queue of 791 * waiting operations. Therefore this function checks if the restart is 792 * really necessary. It is called after a previously waiting operation 793 * modified the array. 794 * Note that wait-for-zero operations are handled without restart. 795 */ 796 static inline int check_restart(struct sem_array *sma, struct sem_queue *q) 797 { 798 /* pending complex alter operations are too difficult to analyse */ 799 if (!list_empty(&sma->pending_alter)) 800 return 1; 801 802 /* we were a sleeping complex operation. Too difficult */ 803 if (q->nsops > 1) 804 return 1; 805 806 /* It is impossible that someone waits for the new value: 807 * - complex operations always restart. 808 * - wait-for-zero are handled seperately. 809 * - q is a previously sleeping simple operation that 810 * altered the array. It must be a decrement, because 811 * simple increments never sleep. 812 * - If there are older (higher priority) decrements 813 * in the queue, then they have observed the original 814 * semval value and couldn't proceed. The operation 815 * decremented to value - thus they won't proceed either. 816 */ 817 return 0; 818 } 819 820 /** 821 * wake_const_ops - wake up non-alter tasks 822 * @sma: semaphore array. 823 * @semnum: semaphore that was modified. 824 * @wake_q: lockless wake-queue head. 825 * 826 * wake_const_ops must be called after a semaphore in a semaphore array 827 * was set to 0. If complex const operations are pending, wake_const_ops must 828 * be called with semnum = -1, as well as with the number of each modified 829 * semaphore. 830 * The tasks that must be woken up are added to @wake_q. The return code 831 * is stored in q->pid. 832 * The function returns 1 if at least one operation was completed successfully. 833 */ 834 static int wake_const_ops(struct sem_array *sma, int semnum, 835 struct wake_q_head *wake_q) 836 { 837 struct sem_queue *q, *tmp; 838 struct list_head *pending_list; 839 int semop_completed = 0; 840 841 if (semnum == -1) 842 pending_list = &sma->pending_const; 843 else 844 pending_list = &sma->sems[semnum].pending_const; 845 846 list_for_each_entry_safe(q, tmp, pending_list, list) { 847 int error = perform_atomic_semop(sma, q); 848 849 if (error > 0) 850 continue; 851 /* operation completed, remove from queue & wakeup */ 852 unlink_queue(sma, q); 853 854 wake_up_sem_queue_prepare(q, error, wake_q); 855 if (error == 0) 856 semop_completed = 1; 857 } 858 859 return semop_completed; 860 } 861 862 /** 863 * do_smart_wakeup_zero - wakeup all wait for zero tasks 864 * @sma: semaphore array 865 * @sops: operations that were performed 866 * @nsops: number of operations 867 * @wake_q: lockless wake-queue head 868 * 869 * Checks all required queue for wait-for-zero operations, based 870 * on the actual changes that were performed on the semaphore array. 871 * The function returns 1 if at least one operation was completed successfully. 872 */ 873 static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, 874 int nsops, struct wake_q_head *wake_q) 875 { 876 int i; 877 int semop_completed = 0; 878 int got_zero = 0; 879 880 /* first: the per-semaphore queues, if known */ 881 if (sops) { 882 for (i = 0; i < nsops; i++) { 883 int num = sops[i].sem_num; 884 885 if (sma->sems[num].semval == 0) { 886 got_zero = 1; 887 semop_completed |= wake_const_ops(sma, num, wake_q); 888 } 889 } 890 } else { 891 /* 892 * No sops means modified semaphores not known. 893 * Assume all were changed. 894 */ 895 for (i = 0; i < sma->sem_nsems; i++) { 896 if (sma->sems[i].semval == 0) { 897 got_zero = 1; 898 semop_completed |= wake_const_ops(sma, i, wake_q); 899 } 900 } 901 } 902 /* 903 * If one of the modified semaphores got 0, 904 * then check the global queue, too. 905 */ 906 if (got_zero) 907 semop_completed |= wake_const_ops(sma, -1, wake_q); 908 909 return semop_completed; 910 } 911 912 913 /** 914 * update_queue - look for tasks that can be completed. 915 * @sma: semaphore array. 916 * @semnum: semaphore that was modified. 917 * @wake_q: lockless wake-queue head. 918 * 919 * update_queue must be called after a semaphore in a semaphore array 920 * was modified. If multiple semaphores were modified, update_queue must 921 * be called with semnum = -1, as well as with the number of each modified 922 * semaphore. 923 * The tasks that must be woken up are added to @wake_q. The return code 924 * is stored in q->pid. 925 * The function internally checks if const operations can now succeed. 926 * 927 * The function return 1 if at least one semop was completed successfully. 928 */ 929 static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q) 930 { 931 struct sem_queue *q, *tmp; 932 struct list_head *pending_list; 933 int semop_completed = 0; 934 935 if (semnum == -1) 936 pending_list = &sma->pending_alter; 937 else 938 pending_list = &sma->sems[semnum].pending_alter; 939 940 again: 941 list_for_each_entry_safe(q, tmp, pending_list, list) { 942 int error, restart; 943 944 /* If we are scanning the single sop, per-semaphore list of 945 * one semaphore and that semaphore is 0, then it is not 946 * necessary to scan further: simple increments 947 * that affect only one entry succeed immediately and cannot 948 * be in the per semaphore pending queue, and decrements 949 * cannot be successful if the value is already 0. 950 */ 951 if (semnum != -1 && sma->sems[semnum].semval == 0) 952 break; 953 954 error = perform_atomic_semop(sma, q); 955 956 /* Does q->sleeper still need to sleep? */ 957 if (error > 0) 958 continue; 959 960 unlink_queue(sma, q); 961 962 if (error) { 963 restart = 0; 964 } else { 965 semop_completed = 1; 966 do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q); 967 restart = check_restart(sma, q); 968 } 969 970 wake_up_sem_queue_prepare(q, error, wake_q); 971 if (restart) 972 goto again; 973 } 974 return semop_completed; 975 } 976 977 /** 978 * set_semotime - set sem_otime 979 * @sma: semaphore array 980 * @sops: operations that modified the array, may be NULL 981 * 982 * sem_otime is replicated to avoid cache line trashing. 983 * This function sets one instance to the current time. 984 */ 985 static void set_semotime(struct sem_array *sma, struct sembuf *sops) 986 { 987 if (sops == NULL) { 988 sma->sems[0].sem_otime = ktime_get_real_seconds(); 989 } else { 990 sma->sems[sops[0].sem_num].sem_otime = 991 ktime_get_real_seconds(); 992 } 993 } 994 995 /** 996 * do_smart_update - optimized update_queue 997 * @sma: semaphore array 998 * @sops: operations that were performed 999 * @nsops: number of operations 1000 * @otime: force setting otime 1001 * @wake_q: lockless wake-queue head 1002 * 1003 * do_smart_update() does the required calls to update_queue and wakeup_zero, 1004 * based on the actual changes that were performed on the semaphore array. 1005 * Note that the function does not do the actual wake-up: the caller is 1006 * responsible for calling wake_up_q(). 1007 * It is safe to perform this call after dropping all locks. 1008 */ 1009 static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops, 1010 int otime, struct wake_q_head *wake_q) 1011 { 1012 int i; 1013 1014 otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q); 1015 1016 if (!list_empty(&sma->pending_alter)) { 1017 /* semaphore array uses the global queue - just process it. */ 1018 otime |= update_queue(sma, -1, wake_q); 1019 } else { 1020 if (!sops) { 1021 /* 1022 * No sops, thus the modified semaphores are not 1023 * known. Check all. 1024 */ 1025 for (i = 0; i < sma->sem_nsems; i++) 1026 otime |= update_queue(sma, i, wake_q); 1027 } else { 1028 /* 1029 * Check the semaphores that were increased: 1030 * - No complex ops, thus all sleeping ops are 1031 * decrease. 1032 * - if we decreased the value, then any sleeping 1033 * semaphore ops wont be able to run: If the 1034 * previous value was too small, then the new 1035 * value will be too small, too. 1036 */ 1037 for (i = 0; i < nsops; i++) { 1038 if (sops[i].sem_op > 0) { 1039 otime |= update_queue(sma, 1040 sops[i].sem_num, wake_q); 1041 } 1042 } 1043 } 1044 } 1045 if (otime) 1046 set_semotime(sma, sops); 1047 } 1048 1049 /* 1050 * check_qop: Test if a queued operation sleeps on the semaphore semnum 1051 */ 1052 static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q, 1053 bool count_zero) 1054 { 1055 struct sembuf *sop = q->blocking; 1056 1057 /* 1058 * Linux always (since 0.99.10) reported a task as sleeping on all 1059 * semaphores. This violates SUS, therefore it was changed to the 1060 * standard compliant behavior. 1061 * Give the administrators a chance to notice that an application 1062 * might misbehave because it relies on the Linux behavior. 1063 */ 1064 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n" 1065 "The task %s (%d) triggered the difference, watch for misbehavior.\n", 1066 current->comm, task_pid_nr(current)); 1067 1068 if (sop->sem_num != semnum) 1069 return 0; 1070 1071 if (count_zero && sop->sem_op == 0) 1072 return 1; 1073 if (!count_zero && sop->sem_op < 0) 1074 return 1; 1075 1076 return 0; 1077 } 1078 1079 /* The following counts are associated to each semaphore: 1080 * semncnt number of tasks waiting on semval being nonzero 1081 * semzcnt number of tasks waiting on semval being zero 1082 * 1083 * Per definition, a task waits only on the semaphore of the first semop 1084 * that cannot proceed, even if additional operation would block, too. 1085 */ 1086 static int count_semcnt(struct sem_array *sma, ushort semnum, 1087 bool count_zero) 1088 { 1089 struct list_head *l; 1090 struct sem_queue *q; 1091 int semcnt; 1092 1093 semcnt = 0; 1094 /* First: check the simple operations. They are easy to evaluate */ 1095 if (count_zero) 1096 l = &sma->sems[semnum].pending_const; 1097 else 1098 l = &sma->sems[semnum].pending_alter; 1099 1100 list_for_each_entry(q, l, list) { 1101 /* all task on a per-semaphore list sleep on exactly 1102 * that semaphore 1103 */ 1104 semcnt++; 1105 } 1106 1107 /* Then: check the complex operations. */ 1108 list_for_each_entry(q, &sma->pending_alter, list) { 1109 semcnt += check_qop(sma, semnum, q, count_zero); 1110 } 1111 if (count_zero) { 1112 list_for_each_entry(q, &sma->pending_const, list) { 1113 semcnt += check_qop(sma, semnum, q, count_zero); 1114 } 1115 } 1116 return semcnt; 1117 } 1118 1119 /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked 1120 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem 1121 * remains locked on exit. 1122 */ 1123 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) 1124 { 1125 struct sem_undo *un, *tu; 1126 struct sem_queue *q, *tq; 1127 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); 1128 int i; 1129 DEFINE_WAKE_Q(wake_q); 1130 1131 /* Free the existing undo structures for this semaphore set. */ 1132 ipc_assert_locked_object(&sma->sem_perm); 1133 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) { 1134 list_del(&un->list_id); 1135 spin_lock(&un->ulp->lock); 1136 un->semid = -1; 1137 list_del_rcu(&un->list_proc); 1138 spin_unlock(&un->ulp->lock); 1139 kfree_rcu(un, rcu); 1140 } 1141 1142 /* Wake up all pending processes and let them fail with EIDRM. */ 1143 list_for_each_entry_safe(q, tq, &sma->pending_const, list) { 1144 unlink_queue(sma, q); 1145 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); 1146 } 1147 1148 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { 1149 unlink_queue(sma, q); 1150 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); 1151 } 1152 for (i = 0; i < sma->sem_nsems; i++) { 1153 struct sem *sem = &sma->sems[i]; 1154 list_for_each_entry_safe(q, tq, &sem->pending_const, list) { 1155 unlink_queue(sma, q); 1156 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); 1157 } 1158 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) { 1159 unlink_queue(sma, q); 1160 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); 1161 } 1162 ipc_update_pid(&sem->sempid, NULL); 1163 } 1164 1165 /* Remove the semaphore set from the IDR */ 1166 sem_rmid(ns, sma); 1167 sem_unlock(sma, -1); 1168 rcu_read_unlock(); 1169 1170 wake_up_q(&wake_q); 1171 ns->used_sems -= sma->sem_nsems; 1172 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); 1173 } 1174 1175 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) 1176 { 1177 switch (version) { 1178 case IPC_64: 1179 return copy_to_user(buf, in, sizeof(*in)); 1180 case IPC_OLD: 1181 { 1182 struct semid_ds out; 1183 1184 memset(&out, 0, sizeof(out)); 1185 1186 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); 1187 1188 out.sem_otime = in->sem_otime; 1189 out.sem_ctime = in->sem_ctime; 1190 out.sem_nsems = in->sem_nsems; 1191 1192 return copy_to_user(buf, &out, sizeof(out)); 1193 } 1194 default: 1195 return -EINVAL; 1196 } 1197 } 1198 1199 static time64_t get_semotime(struct sem_array *sma) 1200 { 1201 int i; 1202 time64_t res; 1203 1204 res = sma->sems[0].sem_otime; 1205 for (i = 1; i < sma->sem_nsems; i++) { 1206 time64_t to = sma->sems[i].sem_otime; 1207 1208 if (to > res) 1209 res = to; 1210 } 1211 return res; 1212 } 1213 1214 static int semctl_stat(struct ipc_namespace *ns, int semid, 1215 int cmd, struct semid64_ds *semid64) 1216 { 1217 struct sem_array *sma; 1218 time64_t semotime; 1219 int id = 0; 1220 int err; 1221 1222 memset(semid64, 0, sizeof(*semid64)); 1223 1224 rcu_read_lock(); 1225 if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) { 1226 sma = sem_obtain_object(ns, semid); 1227 if (IS_ERR(sma)) { 1228 err = PTR_ERR(sma); 1229 goto out_unlock; 1230 } 1231 id = sma->sem_perm.id; 1232 } else { /* IPC_STAT */ 1233 sma = sem_obtain_object_check(ns, semid); 1234 if (IS_ERR(sma)) { 1235 err = PTR_ERR(sma); 1236 goto out_unlock; 1237 } 1238 } 1239 1240 /* see comment for SHM_STAT_ANY */ 1241 if (cmd == SEM_STAT_ANY) 1242 audit_ipc_obj(&sma->sem_perm); 1243 else { 1244 err = -EACCES; 1245 if (ipcperms(ns, &sma->sem_perm, S_IRUGO)) 1246 goto out_unlock; 1247 } 1248 1249 err = security_sem_semctl(&sma->sem_perm, cmd); 1250 if (err) 1251 goto out_unlock; 1252 1253 ipc_lock_object(&sma->sem_perm); 1254 1255 if (!ipc_valid_object(&sma->sem_perm)) { 1256 ipc_unlock_object(&sma->sem_perm); 1257 err = -EIDRM; 1258 goto out_unlock; 1259 } 1260 1261 kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm); 1262 semotime = get_semotime(sma); 1263 semid64->sem_otime = semotime; 1264 semid64->sem_ctime = sma->sem_ctime; 1265 #ifndef CONFIG_64BIT 1266 semid64->sem_otime_high = semotime >> 32; 1267 semid64->sem_ctime_high = sma->sem_ctime >> 32; 1268 #endif 1269 semid64->sem_nsems = sma->sem_nsems; 1270 1271 ipc_unlock_object(&sma->sem_perm); 1272 rcu_read_unlock(); 1273 return id; 1274 1275 out_unlock: 1276 rcu_read_unlock(); 1277 return err; 1278 } 1279 1280 static int semctl_info(struct ipc_namespace *ns, int semid, 1281 int cmd, void __user *p) 1282 { 1283 struct seminfo seminfo; 1284 int max_id; 1285 int err; 1286 1287 err = security_sem_semctl(NULL, cmd); 1288 if (err) 1289 return err; 1290 1291 memset(&seminfo, 0, sizeof(seminfo)); 1292 seminfo.semmni = ns->sc_semmni; 1293 seminfo.semmns = ns->sc_semmns; 1294 seminfo.semmsl = ns->sc_semmsl; 1295 seminfo.semopm = ns->sc_semopm; 1296 seminfo.semvmx = SEMVMX; 1297 seminfo.semmnu = SEMMNU; 1298 seminfo.semmap = SEMMAP; 1299 seminfo.semume = SEMUME; 1300 down_read(&sem_ids(ns).rwsem); 1301 if (cmd == SEM_INFO) { 1302 seminfo.semusz = sem_ids(ns).in_use; 1303 seminfo.semaem = ns->used_sems; 1304 } else { 1305 seminfo.semusz = SEMUSZ; 1306 seminfo.semaem = SEMAEM; 1307 } 1308 max_id = ipc_get_maxid(&sem_ids(ns)); 1309 up_read(&sem_ids(ns).rwsem); 1310 if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) 1311 return -EFAULT; 1312 return (max_id < 0) ? 0 : max_id; 1313 } 1314 1315 static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, 1316 int val) 1317 { 1318 struct sem_undo *un; 1319 struct sem_array *sma; 1320 struct sem *curr; 1321 int err; 1322 DEFINE_WAKE_Q(wake_q); 1323 1324 if (val > SEMVMX || val < 0) 1325 return -ERANGE; 1326 1327 rcu_read_lock(); 1328 sma = sem_obtain_object_check(ns, semid); 1329 if (IS_ERR(sma)) { 1330 rcu_read_unlock(); 1331 return PTR_ERR(sma); 1332 } 1333 1334 if (semnum < 0 || semnum >= sma->sem_nsems) { 1335 rcu_read_unlock(); 1336 return -EINVAL; 1337 } 1338 1339 1340 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) { 1341 rcu_read_unlock(); 1342 return -EACCES; 1343 } 1344 1345 err = security_sem_semctl(&sma->sem_perm, SETVAL); 1346 if (err) { 1347 rcu_read_unlock(); 1348 return -EACCES; 1349 } 1350 1351 sem_lock(sma, NULL, -1); 1352 1353 if (!ipc_valid_object(&sma->sem_perm)) { 1354 sem_unlock(sma, -1); 1355 rcu_read_unlock(); 1356 return -EIDRM; 1357 } 1358 1359 curr = &sma->sems[semnum]; 1360 1361 ipc_assert_locked_object(&sma->sem_perm); 1362 list_for_each_entry(un, &sma->list_id, list_id) 1363 un->semadj[semnum] = 0; 1364 1365 curr->semval = val; 1366 ipc_update_pid(&curr->sempid, task_tgid(current)); 1367 sma->sem_ctime = ktime_get_real_seconds(); 1368 /* maybe some queued-up processes were waiting for this */ 1369 do_smart_update(sma, NULL, 0, 0, &wake_q); 1370 sem_unlock(sma, -1); 1371 rcu_read_unlock(); 1372 wake_up_q(&wake_q); 1373 return 0; 1374 } 1375 1376 static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, 1377 int cmd, void __user *p) 1378 { 1379 struct sem_array *sma; 1380 struct sem *curr; 1381 int err, nsems; 1382 ushort fast_sem_io[SEMMSL_FAST]; 1383 ushort *sem_io = fast_sem_io; 1384 DEFINE_WAKE_Q(wake_q); 1385 1386 rcu_read_lock(); 1387 sma = sem_obtain_object_check(ns, semid); 1388 if (IS_ERR(sma)) { 1389 rcu_read_unlock(); 1390 return PTR_ERR(sma); 1391 } 1392 1393 nsems = sma->sem_nsems; 1394 1395 err = -EACCES; 1396 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO)) 1397 goto out_rcu_wakeup; 1398 1399 err = security_sem_semctl(&sma->sem_perm, cmd); 1400 if (err) 1401 goto out_rcu_wakeup; 1402 1403 err = -EACCES; 1404 switch (cmd) { 1405 case GETALL: 1406 { 1407 ushort __user *array = p; 1408 int i; 1409 1410 sem_lock(sma, NULL, -1); 1411 if (!ipc_valid_object(&sma->sem_perm)) { 1412 err = -EIDRM; 1413 goto out_unlock; 1414 } 1415 if (nsems > SEMMSL_FAST) { 1416 if (!ipc_rcu_getref(&sma->sem_perm)) { 1417 err = -EIDRM; 1418 goto out_unlock; 1419 } 1420 sem_unlock(sma, -1); 1421 rcu_read_unlock(); 1422 sem_io = kvmalloc_array(nsems, sizeof(ushort), 1423 GFP_KERNEL); 1424 if (sem_io == NULL) { 1425 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); 1426 return -ENOMEM; 1427 } 1428 1429 rcu_read_lock(); 1430 sem_lock_and_putref(sma); 1431 if (!ipc_valid_object(&sma->sem_perm)) { 1432 err = -EIDRM; 1433 goto out_unlock; 1434 } 1435 } 1436 for (i = 0; i < sma->sem_nsems; i++) 1437 sem_io[i] = sma->sems[i].semval; 1438 sem_unlock(sma, -1); 1439 rcu_read_unlock(); 1440 err = 0; 1441 if (copy_to_user(array, sem_io, nsems*sizeof(ushort))) 1442 err = -EFAULT; 1443 goto out_free; 1444 } 1445 case SETALL: 1446 { 1447 int i; 1448 struct sem_undo *un; 1449 1450 if (!ipc_rcu_getref(&sma->sem_perm)) { 1451 err = -EIDRM; 1452 goto out_rcu_wakeup; 1453 } 1454 rcu_read_unlock(); 1455 1456 if (nsems > SEMMSL_FAST) { 1457 sem_io = kvmalloc_array(nsems, sizeof(ushort), 1458 GFP_KERNEL); 1459 if (sem_io == NULL) { 1460 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); 1461 return -ENOMEM; 1462 } 1463 } 1464 1465 if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { 1466 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); 1467 err = -EFAULT; 1468 goto out_free; 1469 } 1470 1471 for (i = 0; i < nsems; i++) { 1472 if (sem_io[i] > SEMVMX) { 1473 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); 1474 err = -ERANGE; 1475 goto out_free; 1476 } 1477 } 1478 rcu_read_lock(); 1479 sem_lock_and_putref(sma); 1480 if (!ipc_valid_object(&sma->sem_perm)) { 1481 err = -EIDRM; 1482 goto out_unlock; 1483 } 1484 1485 for (i = 0; i < nsems; i++) { 1486 sma->sems[i].semval = sem_io[i]; 1487 ipc_update_pid(&sma->sems[i].sempid, task_tgid(current)); 1488 } 1489 1490 ipc_assert_locked_object(&sma->sem_perm); 1491 list_for_each_entry(un, &sma->list_id, list_id) { 1492 for (i = 0; i < nsems; i++) 1493 un->semadj[i] = 0; 1494 } 1495 sma->sem_ctime = ktime_get_real_seconds(); 1496 /* maybe some queued-up processes were waiting for this */ 1497 do_smart_update(sma, NULL, 0, 0, &wake_q); 1498 err = 0; 1499 goto out_unlock; 1500 } 1501 /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */ 1502 } 1503 err = -EINVAL; 1504 if (semnum < 0 || semnum >= nsems) 1505 goto out_rcu_wakeup; 1506 1507 sem_lock(sma, NULL, -1); 1508 if (!ipc_valid_object(&sma->sem_perm)) { 1509 err = -EIDRM; 1510 goto out_unlock; 1511 } 1512 curr = &sma->sems[semnum]; 1513 1514 switch (cmd) { 1515 case GETVAL: 1516 err = curr->semval; 1517 goto out_unlock; 1518 case GETPID: 1519 err = pid_vnr(curr->sempid); 1520 goto out_unlock; 1521 case GETNCNT: 1522 err = count_semcnt(sma, semnum, 0); 1523 goto out_unlock; 1524 case GETZCNT: 1525 err = count_semcnt(sma, semnum, 1); 1526 goto out_unlock; 1527 } 1528 1529 out_unlock: 1530 sem_unlock(sma, -1); 1531 out_rcu_wakeup: 1532 rcu_read_unlock(); 1533 wake_up_q(&wake_q); 1534 out_free: 1535 if (sem_io != fast_sem_io) 1536 kvfree(sem_io); 1537 return err; 1538 } 1539 1540 static inline unsigned long 1541 copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) 1542 { 1543 switch (version) { 1544 case IPC_64: 1545 if (copy_from_user(out, buf, sizeof(*out))) 1546 return -EFAULT; 1547 return 0; 1548 case IPC_OLD: 1549 { 1550 struct semid_ds tbuf_old; 1551 1552 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 1553 return -EFAULT; 1554 1555 out->sem_perm.uid = tbuf_old.sem_perm.uid; 1556 out->sem_perm.gid = tbuf_old.sem_perm.gid; 1557 out->sem_perm.mode = tbuf_old.sem_perm.mode; 1558 1559 return 0; 1560 } 1561 default: 1562 return -EINVAL; 1563 } 1564 } 1565 1566 /* 1567 * This function handles some semctl commands which require the rwsem 1568 * to be held in write mode. 1569 * NOTE: no locks must be held, the rwsem is taken inside this function. 1570 */ 1571 static int semctl_down(struct ipc_namespace *ns, int semid, 1572 int cmd, struct semid64_ds *semid64) 1573 { 1574 struct sem_array *sma; 1575 int err; 1576 struct kern_ipc_perm *ipcp; 1577 1578 down_write(&sem_ids(ns).rwsem); 1579 rcu_read_lock(); 1580 1581 ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd, 1582 &semid64->sem_perm, 0); 1583 if (IS_ERR(ipcp)) { 1584 err = PTR_ERR(ipcp); 1585 goto out_unlock1; 1586 } 1587 1588 sma = container_of(ipcp, struct sem_array, sem_perm); 1589 1590 err = security_sem_semctl(&sma->sem_perm, cmd); 1591 if (err) 1592 goto out_unlock1; 1593 1594 switch (cmd) { 1595 case IPC_RMID: 1596 sem_lock(sma, NULL, -1); 1597 /* freeary unlocks the ipc object and rcu */ 1598 freeary(ns, ipcp); 1599 goto out_up; 1600 case IPC_SET: 1601 sem_lock(sma, NULL, -1); 1602 err = ipc_update_perm(&semid64->sem_perm, ipcp); 1603 if (err) 1604 goto out_unlock0; 1605 sma->sem_ctime = ktime_get_real_seconds(); 1606 break; 1607 default: 1608 err = -EINVAL; 1609 goto out_unlock1; 1610 } 1611 1612 out_unlock0: 1613 sem_unlock(sma, -1); 1614 out_unlock1: 1615 rcu_read_unlock(); 1616 out_up: 1617 up_write(&sem_ids(ns).rwsem); 1618 return err; 1619 } 1620 1621 long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg) 1622 { 1623 int version; 1624 struct ipc_namespace *ns; 1625 void __user *p = (void __user *)arg; 1626 struct semid64_ds semid64; 1627 int err; 1628 1629 if (semid < 0) 1630 return -EINVAL; 1631 1632 version = ipc_parse_version(&cmd); 1633 ns = current->nsproxy->ipc_ns; 1634 1635 switch (cmd) { 1636 case IPC_INFO: 1637 case SEM_INFO: 1638 return semctl_info(ns, semid, cmd, p); 1639 case IPC_STAT: 1640 case SEM_STAT: 1641 case SEM_STAT_ANY: 1642 err = semctl_stat(ns, semid, cmd, &semid64); 1643 if (err < 0) 1644 return err; 1645 if (copy_semid_to_user(p, &semid64, version)) 1646 err = -EFAULT; 1647 return err; 1648 case GETALL: 1649 case GETVAL: 1650 case GETPID: 1651 case GETNCNT: 1652 case GETZCNT: 1653 case SETALL: 1654 return semctl_main(ns, semid, semnum, cmd, p); 1655 case SETVAL: { 1656 int val; 1657 #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN) 1658 /* big-endian 64bit */ 1659 val = arg >> 32; 1660 #else 1661 /* 32bit or little-endian 64bit */ 1662 val = arg; 1663 #endif 1664 return semctl_setval(ns, semid, semnum, val); 1665 } 1666 case IPC_SET: 1667 if (copy_semid_from_user(&semid64, p, version)) 1668 return -EFAULT; 1669 case IPC_RMID: 1670 return semctl_down(ns, semid, cmd, &semid64); 1671 default: 1672 return -EINVAL; 1673 } 1674 } 1675 1676 SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) 1677 { 1678 return ksys_semctl(semid, semnum, cmd, arg); 1679 } 1680 1681 #ifdef CONFIG_COMPAT 1682 1683 struct compat_semid_ds { 1684 struct compat_ipc_perm sem_perm; 1685 compat_time_t sem_otime; 1686 compat_time_t sem_ctime; 1687 compat_uptr_t sem_base; 1688 compat_uptr_t sem_pending; 1689 compat_uptr_t sem_pending_last; 1690 compat_uptr_t undo; 1691 unsigned short sem_nsems; 1692 }; 1693 1694 static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf, 1695 int version) 1696 { 1697 memset(out, 0, sizeof(*out)); 1698 if (version == IPC_64) { 1699 struct compat_semid64_ds __user *p = buf; 1700 return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm); 1701 } else { 1702 struct compat_semid_ds __user *p = buf; 1703 return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm); 1704 } 1705 } 1706 1707 static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in, 1708 int version) 1709 { 1710 if (version == IPC_64) { 1711 struct compat_semid64_ds v; 1712 memset(&v, 0, sizeof(v)); 1713 to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm); 1714 v.sem_otime = lower_32_bits(in->sem_otime); 1715 v.sem_otime_high = upper_32_bits(in->sem_otime); 1716 v.sem_ctime = lower_32_bits(in->sem_ctime); 1717 v.sem_ctime_high = upper_32_bits(in->sem_ctime); 1718 v.sem_nsems = in->sem_nsems; 1719 return copy_to_user(buf, &v, sizeof(v)); 1720 } else { 1721 struct compat_semid_ds v; 1722 memset(&v, 0, sizeof(v)); 1723 to_compat_ipc_perm(&v.sem_perm, &in->sem_perm); 1724 v.sem_otime = in->sem_otime; 1725 v.sem_ctime = in->sem_ctime; 1726 v.sem_nsems = in->sem_nsems; 1727 return copy_to_user(buf, &v, sizeof(v)); 1728 } 1729 } 1730 1731 long compat_ksys_semctl(int semid, int semnum, int cmd, int arg) 1732 { 1733 void __user *p = compat_ptr(arg); 1734 struct ipc_namespace *ns; 1735 struct semid64_ds semid64; 1736 int version = compat_ipc_parse_version(&cmd); 1737 int err; 1738 1739 ns = current->nsproxy->ipc_ns; 1740 1741 if (semid < 0) 1742 return -EINVAL; 1743 1744 switch (cmd & (~IPC_64)) { 1745 case IPC_INFO: 1746 case SEM_INFO: 1747 return semctl_info(ns, semid, cmd, p); 1748 case IPC_STAT: 1749 case SEM_STAT: 1750 case SEM_STAT_ANY: 1751 err = semctl_stat(ns, semid, cmd, &semid64); 1752 if (err < 0) 1753 return err; 1754 if (copy_compat_semid_to_user(p, &semid64, version)) 1755 err = -EFAULT; 1756 return err; 1757 case GETVAL: 1758 case GETPID: 1759 case GETNCNT: 1760 case GETZCNT: 1761 case GETALL: 1762 case SETALL: 1763 return semctl_main(ns, semid, semnum, cmd, p); 1764 case SETVAL: 1765 return semctl_setval(ns, semid, semnum, arg); 1766 case IPC_SET: 1767 if (copy_compat_semid_from_user(&semid64, p, version)) 1768 return -EFAULT; 1769 /* fallthru */ 1770 case IPC_RMID: 1771 return semctl_down(ns, semid, cmd, &semid64); 1772 default: 1773 return -EINVAL; 1774 } 1775 } 1776 1777 COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg) 1778 { 1779 return compat_ksys_semctl(semid, semnum, cmd, arg); 1780 } 1781 #endif 1782 1783 /* If the task doesn't already have a undo_list, then allocate one 1784 * here. We guarantee there is only one thread using this undo list, 1785 * and current is THE ONE 1786 * 1787 * If this allocation and assignment succeeds, but later 1788 * portions of this code fail, there is no need to free the sem_undo_list. 1789 * Just let it stay associated with the task, and it'll be freed later 1790 * at exit time. 1791 * 1792 * This can block, so callers must hold no locks. 1793 */ 1794 static inline int get_undo_list(struct sem_undo_list **undo_listp) 1795 { 1796 struct sem_undo_list *undo_list; 1797 1798 undo_list = current->sysvsem.undo_list; 1799 if (!undo_list) { 1800 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); 1801 if (undo_list == NULL) 1802 return -ENOMEM; 1803 spin_lock_init(&undo_list->lock); 1804 refcount_set(&undo_list->refcnt, 1); 1805 INIT_LIST_HEAD(&undo_list->list_proc); 1806 1807 current->sysvsem.undo_list = undo_list; 1808 } 1809 *undo_listp = undo_list; 1810 return 0; 1811 } 1812 1813 static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid) 1814 { 1815 struct sem_undo *un; 1816 1817 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { 1818 if (un->semid == semid) 1819 return un; 1820 } 1821 return NULL; 1822 } 1823 1824 static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) 1825 { 1826 struct sem_undo *un; 1827 1828 assert_spin_locked(&ulp->lock); 1829 1830 un = __lookup_undo(ulp, semid); 1831 if (un) { 1832 list_del_rcu(&un->list_proc); 1833 list_add_rcu(&un->list_proc, &ulp->list_proc); 1834 } 1835 return un; 1836 } 1837 1838 /** 1839 * find_alloc_undo - lookup (and if not present create) undo array 1840 * @ns: namespace 1841 * @semid: semaphore array id 1842 * 1843 * The function looks up (and if not present creates) the undo structure. 1844 * The size of the undo structure depends on the size of the semaphore 1845 * array, thus the alloc path is not that straightforward. 1846 * Lifetime-rules: sem_undo is rcu-protected, on success, the function 1847 * performs a rcu_read_lock(). 1848 */ 1849 static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) 1850 { 1851 struct sem_array *sma; 1852 struct sem_undo_list *ulp; 1853 struct sem_undo *un, *new; 1854 int nsems, error; 1855 1856 error = get_undo_list(&ulp); 1857 if (error) 1858 return ERR_PTR(error); 1859 1860 rcu_read_lock(); 1861 spin_lock(&ulp->lock); 1862 un = lookup_undo(ulp, semid); 1863 spin_unlock(&ulp->lock); 1864 if (likely(un != NULL)) 1865 goto out; 1866 1867 /* no undo structure around - allocate one. */ 1868 /* step 1: figure out the size of the semaphore array */ 1869 sma = sem_obtain_object_check(ns, semid); 1870 if (IS_ERR(sma)) { 1871 rcu_read_unlock(); 1872 return ERR_CAST(sma); 1873 } 1874 1875 nsems = sma->sem_nsems; 1876 if (!ipc_rcu_getref(&sma->sem_perm)) { 1877 rcu_read_unlock(); 1878 un = ERR_PTR(-EIDRM); 1879 goto out; 1880 } 1881 rcu_read_unlock(); 1882 1883 /* step 2: allocate new undo structure */ 1884 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); 1885 if (!new) { 1886 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); 1887 return ERR_PTR(-ENOMEM); 1888 } 1889 1890 /* step 3: Acquire the lock on semaphore array */ 1891 rcu_read_lock(); 1892 sem_lock_and_putref(sma); 1893 if (!ipc_valid_object(&sma->sem_perm)) { 1894 sem_unlock(sma, -1); 1895 rcu_read_unlock(); 1896 kfree(new); 1897 un = ERR_PTR(-EIDRM); 1898 goto out; 1899 } 1900 spin_lock(&ulp->lock); 1901 1902 /* 1903 * step 4: check for races: did someone else allocate the undo struct? 1904 */ 1905 un = lookup_undo(ulp, semid); 1906 if (un) { 1907 kfree(new); 1908 goto success; 1909 } 1910 /* step 5: initialize & link new undo structure */ 1911 new->semadj = (short *) &new[1]; 1912 new->ulp = ulp; 1913 new->semid = semid; 1914 assert_spin_locked(&ulp->lock); 1915 list_add_rcu(&new->list_proc, &ulp->list_proc); 1916 ipc_assert_locked_object(&sma->sem_perm); 1917 list_add(&new->list_id, &sma->list_id); 1918 un = new; 1919 1920 success: 1921 spin_unlock(&ulp->lock); 1922 sem_unlock(sma, -1); 1923 out: 1924 return un; 1925 } 1926 1927 static long do_semtimedop(int semid, struct sembuf __user *tsops, 1928 unsigned nsops, const struct timespec64 *timeout) 1929 { 1930 int error = -EINVAL; 1931 struct sem_array *sma; 1932 struct sembuf fast_sops[SEMOPM_FAST]; 1933 struct sembuf *sops = fast_sops, *sop; 1934 struct sem_undo *un; 1935 int max, locknum; 1936 bool undos = false, alter = false, dupsop = false; 1937 struct sem_queue queue; 1938 unsigned long dup = 0, jiffies_left = 0; 1939 struct ipc_namespace *ns; 1940 1941 ns = current->nsproxy->ipc_ns; 1942 1943 if (nsops < 1 || semid < 0) 1944 return -EINVAL; 1945 if (nsops > ns->sc_semopm) 1946 return -E2BIG; 1947 if (nsops > SEMOPM_FAST) { 1948 sops = kvmalloc(sizeof(*sops)*nsops, GFP_KERNEL); 1949 if (sops == NULL) 1950 return -ENOMEM; 1951 } 1952 1953 if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { 1954 error = -EFAULT; 1955 goto out_free; 1956 } 1957 1958 if (timeout) { 1959 if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 || 1960 timeout->tv_nsec >= 1000000000L) { 1961 error = -EINVAL; 1962 goto out_free; 1963 } 1964 jiffies_left = timespec64_to_jiffies(timeout); 1965 } 1966 1967 max = 0; 1968 for (sop = sops; sop < sops + nsops; sop++) { 1969 unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG); 1970 1971 if (sop->sem_num >= max) 1972 max = sop->sem_num; 1973 if (sop->sem_flg & SEM_UNDO) 1974 undos = true; 1975 if (dup & mask) { 1976 /* 1977 * There was a previous alter access that appears 1978 * to have accessed the same semaphore, thus use 1979 * the dupsop logic. "appears", because the detection 1980 * can only check % BITS_PER_LONG. 1981 */ 1982 dupsop = true; 1983 } 1984 if (sop->sem_op != 0) { 1985 alter = true; 1986 dup |= mask; 1987 } 1988 } 1989 1990 if (undos) { 1991 /* On success, find_alloc_undo takes the rcu_read_lock */ 1992 un = find_alloc_undo(ns, semid); 1993 if (IS_ERR(un)) { 1994 error = PTR_ERR(un); 1995 goto out_free; 1996 } 1997 } else { 1998 un = NULL; 1999 rcu_read_lock(); 2000 } 2001 2002 sma = sem_obtain_object_check(ns, semid); 2003 if (IS_ERR(sma)) { 2004 rcu_read_unlock(); 2005 error = PTR_ERR(sma); 2006 goto out_free; 2007 } 2008 2009 error = -EFBIG; 2010 if (max >= sma->sem_nsems) { 2011 rcu_read_unlock(); 2012 goto out_free; 2013 } 2014 2015 error = -EACCES; 2016 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) { 2017 rcu_read_unlock(); 2018 goto out_free; 2019 } 2020 2021 error = security_sem_semop(&sma->sem_perm, sops, nsops, alter); 2022 if (error) { 2023 rcu_read_unlock(); 2024 goto out_free; 2025 } 2026 2027 error = -EIDRM; 2028 locknum = sem_lock(sma, sops, nsops); 2029 /* 2030 * We eventually might perform the following check in a lockless 2031 * fashion, considering ipc_valid_object() locking constraints. 2032 * If nsops == 1 and there is no contention for sem_perm.lock, then 2033 * only a per-semaphore lock is held and it's OK to proceed with the 2034 * check below. More details on the fine grained locking scheme 2035 * entangled here and why it's RMID race safe on comments at sem_lock() 2036 */ 2037 if (!ipc_valid_object(&sma->sem_perm)) 2038 goto out_unlock_free; 2039 /* 2040 * semid identifiers are not unique - find_alloc_undo may have 2041 * allocated an undo structure, it was invalidated by an RMID 2042 * and now a new array with received the same id. Check and fail. 2043 * This case can be detected checking un->semid. The existence of 2044 * "un" itself is guaranteed by rcu. 2045 */ 2046 if (un && un->semid == -1) 2047 goto out_unlock_free; 2048 2049 queue.sops = sops; 2050 queue.nsops = nsops; 2051 queue.undo = un; 2052 queue.pid = task_tgid(current); 2053 queue.alter = alter; 2054 queue.dupsop = dupsop; 2055 2056 error = perform_atomic_semop(sma, &queue); 2057 if (error == 0) { /* non-blocking succesfull path */ 2058 DEFINE_WAKE_Q(wake_q); 2059 2060 /* 2061 * If the operation was successful, then do 2062 * the required updates. 2063 */ 2064 if (alter) 2065 do_smart_update(sma, sops, nsops, 1, &wake_q); 2066 else 2067 set_semotime(sma, sops); 2068 2069 sem_unlock(sma, locknum); 2070 rcu_read_unlock(); 2071 wake_up_q(&wake_q); 2072 2073 goto out_free; 2074 } 2075 if (error < 0) /* non-blocking error path */ 2076 goto out_unlock_free; 2077 2078 /* 2079 * We need to sleep on this operation, so we put the current 2080 * task into the pending queue and go to sleep. 2081 */ 2082 if (nsops == 1) { 2083 struct sem *curr; 2084 curr = &sma->sems[sops->sem_num]; 2085 2086 if (alter) { 2087 if (sma->complex_count) { 2088 list_add_tail(&queue.list, 2089 &sma->pending_alter); 2090 } else { 2091 2092 list_add_tail(&queue.list, 2093 &curr->pending_alter); 2094 } 2095 } else { 2096 list_add_tail(&queue.list, &curr->pending_const); 2097 } 2098 } else { 2099 if (!sma->complex_count) 2100 merge_queues(sma); 2101 2102 if (alter) 2103 list_add_tail(&queue.list, &sma->pending_alter); 2104 else 2105 list_add_tail(&queue.list, &sma->pending_const); 2106 2107 sma->complex_count++; 2108 } 2109 2110 do { 2111 queue.status = -EINTR; 2112 queue.sleeper = current; 2113 2114 __set_current_state(TASK_INTERRUPTIBLE); 2115 sem_unlock(sma, locknum); 2116 rcu_read_unlock(); 2117 2118 if (timeout) 2119 jiffies_left = schedule_timeout(jiffies_left); 2120 else 2121 schedule(); 2122 2123 /* 2124 * fastpath: the semop has completed, either successfully or 2125 * not, from the syscall pov, is quite irrelevant to us at this 2126 * point; we're done. 2127 * 2128 * We _do_ care, nonetheless, about being awoken by a signal or 2129 * spuriously. The queue.status is checked again in the 2130 * slowpath (aka after taking sem_lock), such that we can detect 2131 * scenarios where we were awakened externally, during the 2132 * window between wake_q_add() and wake_up_q(). 2133 */ 2134 error = READ_ONCE(queue.status); 2135 if (error != -EINTR) { 2136 /* 2137 * User space could assume that semop() is a memory 2138 * barrier: Without the mb(), the cpu could 2139 * speculatively read in userspace stale data that was 2140 * overwritten by the previous owner of the semaphore. 2141 */ 2142 smp_mb(); 2143 goto out_free; 2144 } 2145 2146 rcu_read_lock(); 2147 locknum = sem_lock(sma, sops, nsops); 2148 2149 if (!ipc_valid_object(&sma->sem_perm)) 2150 goto out_unlock_free; 2151 2152 error = READ_ONCE(queue.status); 2153 2154 /* 2155 * If queue.status != -EINTR we are woken up by another process. 2156 * Leave without unlink_queue(), but with sem_unlock(). 2157 */ 2158 if (error != -EINTR) 2159 goto out_unlock_free; 2160 2161 /* 2162 * If an interrupt occurred we have to clean up the queue. 2163 */ 2164 if (timeout && jiffies_left == 0) 2165 error = -EAGAIN; 2166 } while (error == -EINTR && !signal_pending(current)); /* spurious */ 2167 2168 unlink_queue(sma, &queue); 2169 2170 out_unlock_free: 2171 sem_unlock(sma, locknum); 2172 rcu_read_unlock(); 2173 out_free: 2174 if (sops != fast_sops) 2175 kvfree(sops); 2176 return error; 2177 } 2178 2179 long ksys_semtimedop(int semid, struct sembuf __user *tsops, 2180 unsigned int nsops, const struct __kernel_timespec __user *timeout) 2181 { 2182 if (timeout) { 2183 struct timespec64 ts; 2184 if (get_timespec64(&ts, timeout)) 2185 return -EFAULT; 2186 return do_semtimedop(semid, tsops, nsops, &ts); 2187 } 2188 return do_semtimedop(semid, tsops, nsops, NULL); 2189 } 2190 2191 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, 2192 unsigned int, nsops, const struct __kernel_timespec __user *, timeout) 2193 { 2194 return ksys_semtimedop(semid, tsops, nsops, timeout); 2195 } 2196 2197 #ifdef CONFIG_COMPAT_32BIT_TIME 2198 long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, 2199 unsigned int nsops, 2200 const struct compat_timespec __user *timeout) 2201 { 2202 if (timeout) { 2203 struct timespec64 ts; 2204 if (compat_get_timespec64(&ts, timeout)) 2205 return -EFAULT; 2206 return do_semtimedop(semid, tsems, nsops, &ts); 2207 } 2208 return do_semtimedop(semid, tsems, nsops, NULL); 2209 } 2210 2211 COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems, 2212 unsigned int, nsops, 2213 const struct compat_timespec __user *, timeout) 2214 { 2215 return compat_ksys_semtimedop(semid, tsems, nsops, timeout); 2216 } 2217 #endif 2218 2219 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops, 2220 unsigned, nsops) 2221 { 2222 return do_semtimedop(semid, tsops, nsops, NULL); 2223 } 2224 2225 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between 2226 * parent and child tasks. 2227 */ 2228 2229 int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) 2230 { 2231 struct sem_undo_list *undo_list; 2232 int error; 2233 2234 if (clone_flags & CLONE_SYSVSEM) { 2235 error = get_undo_list(&undo_list); 2236 if (error) 2237 return error; 2238 refcount_inc(&undo_list->refcnt); 2239 tsk->sysvsem.undo_list = undo_list; 2240 } else 2241 tsk->sysvsem.undo_list = NULL; 2242 2243 return 0; 2244 } 2245 2246 /* 2247 * add semadj values to semaphores, free undo structures. 2248 * undo structures are not freed when semaphore arrays are destroyed 2249 * so some of them may be out of date. 2250 * IMPLEMENTATION NOTE: There is some confusion over whether the 2251 * set of adjustments that needs to be done should be done in an atomic 2252 * manner or not. That is, if we are attempting to decrement the semval 2253 * should we queue up and wait until we can do so legally? 2254 * The original implementation attempted to do this (queue and wait). 2255 * The current implementation does not do so. The POSIX standard 2256 * and SVID should be consulted to determine what behavior is mandated. 2257 */ 2258 void exit_sem(struct task_struct *tsk) 2259 { 2260 struct sem_undo_list *ulp; 2261 2262 ulp = tsk->sysvsem.undo_list; 2263 if (!ulp) 2264 return; 2265 tsk->sysvsem.undo_list = NULL; 2266 2267 if (!refcount_dec_and_test(&ulp->refcnt)) 2268 return; 2269 2270 for (;;) { 2271 struct sem_array *sma; 2272 struct sem_undo *un; 2273 int semid, i; 2274 DEFINE_WAKE_Q(wake_q); 2275 2276 cond_resched(); 2277 2278 rcu_read_lock(); 2279 un = list_entry_rcu(ulp->list_proc.next, 2280 struct sem_undo, list_proc); 2281 if (&un->list_proc == &ulp->list_proc) { 2282 /* 2283 * We must wait for freeary() before freeing this ulp, 2284 * in case we raced with last sem_undo. There is a small 2285 * possibility where we exit while freeary() didn't 2286 * finish unlocking sem_undo_list. 2287 */ 2288 spin_lock(&ulp->lock); 2289 spin_unlock(&ulp->lock); 2290 rcu_read_unlock(); 2291 break; 2292 } 2293 spin_lock(&ulp->lock); 2294 semid = un->semid; 2295 spin_unlock(&ulp->lock); 2296 2297 /* exit_sem raced with IPC_RMID, nothing to do */ 2298 if (semid == -1) { 2299 rcu_read_unlock(); 2300 continue; 2301 } 2302 2303 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid); 2304 /* exit_sem raced with IPC_RMID, nothing to do */ 2305 if (IS_ERR(sma)) { 2306 rcu_read_unlock(); 2307 continue; 2308 } 2309 2310 sem_lock(sma, NULL, -1); 2311 /* exit_sem raced with IPC_RMID, nothing to do */ 2312 if (!ipc_valid_object(&sma->sem_perm)) { 2313 sem_unlock(sma, -1); 2314 rcu_read_unlock(); 2315 continue; 2316 } 2317 un = __lookup_undo(ulp, semid); 2318 if (un == NULL) { 2319 /* exit_sem raced with IPC_RMID+semget() that created 2320 * exactly the same semid. Nothing to do. 2321 */ 2322 sem_unlock(sma, -1); 2323 rcu_read_unlock(); 2324 continue; 2325 } 2326 2327 /* remove un from the linked lists */ 2328 ipc_assert_locked_object(&sma->sem_perm); 2329 list_del(&un->list_id); 2330 2331 /* we are the last process using this ulp, acquiring ulp->lock 2332 * isn't required. Besides that, we are also protected against 2333 * IPC_RMID as we hold sma->sem_perm lock now 2334 */ 2335 list_del_rcu(&un->list_proc); 2336 2337 /* perform adjustments registered in un */ 2338 for (i = 0; i < sma->sem_nsems; i++) { 2339 struct sem *semaphore = &sma->sems[i]; 2340 if (un->semadj[i]) { 2341 semaphore->semval += un->semadj[i]; 2342 /* 2343 * Range checks of the new semaphore value, 2344 * not defined by sus: 2345 * - Some unices ignore the undo entirely 2346 * (e.g. HP UX 11i 11.22, Tru64 V5.1) 2347 * - some cap the value (e.g. FreeBSD caps 2348 * at 0, but doesn't enforce SEMVMX) 2349 * 2350 * Linux caps the semaphore value, both at 0 2351 * and at SEMVMX. 2352 * 2353 * Manfred <manfred@colorfullife.com> 2354 */ 2355 if (semaphore->semval < 0) 2356 semaphore->semval = 0; 2357 if (semaphore->semval > SEMVMX) 2358 semaphore->semval = SEMVMX; 2359 ipc_update_pid(&semaphore->sempid, task_tgid(current)); 2360 } 2361 } 2362 /* maybe some queued-up processes were waiting for this */ 2363 do_smart_update(sma, NULL, 0, 1, &wake_q); 2364 sem_unlock(sma, -1); 2365 rcu_read_unlock(); 2366 wake_up_q(&wake_q); 2367 2368 kfree_rcu(un, rcu); 2369 } 2370 kfree(ulp); 2371 } 2372 2373 #ifdef CONFIG_PROC_FS 2374 static int sysvipc_sem_proc_show(struct seq_file *s, void *it) 2375 { 2376 struct user_namespace *user_ns = seq_user_ns(s); 2377 struct kern_ipc_perm *ipcp = it; 2378 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); 2379 time64_t sem_otime; 2380 2381 /* 2382 * The proc interface isn't aware of sem_lock(), it calls 2383 * ipc_lock_object() directly (in sysvipc_find_ipc). 2384 * In order to stay compatible with sem_lock(), we must 2385 * enter / leave complex_mode. 2386 */ 2387 complexmode_enter(sma); 2388 2389 sem_otime = get_semotime(sma); 2390 2391 seq_printf(s, 2392 "%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n", 2393 sma->sem_perm.key, 2394 sma->sem_perm.id, 2395 sma->sem_perm.mode, 2396 sma->sem_nsems, 2397 from_kuid_munged(user_ns, sma->sem_perm.uid), 2398 from_kgid_munged(user_ns, sma->sem_perm.gid), 2399 from_kuid_munged(user_ns, sma->sem_perm.cuid), 2400 from_kgid_munged(user_ns, sma->sem_perm.cgid), 2401 sem_otime, 2402 sma->sem_ctime); 2403 2404 complexmode_tryleave(sma); 2405 2406 return 0; 2407 } 2408 #endif 2409