1 /* 2 * linux/ipc/sem.c 3 * Copyright (C) 1992 Krishna Balasubramanian 4 * Copyright (C) 1995 Eric Schenk, Bruno Haible 5 * 6 * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995): 7 * This code underwent a massive rewrite in order to solve some problems 8 * with the original code. In particular the original code failed to 9 * wake up processes that were waiting for semval to go to 0 if the 10 * value went to 0 and was then incremented rapidly enough. In solving 11 * this problem I have also modified the implementation so that it 12 * processes pending operations in a FIFO manner, thus give a guarantee 13 * that processes waiting for a lock on the semaphore won't starve 14 * unless another locking process fails to unlock. 15 * In addition the following two changes in behavior have been introduced: 16 * - The original implementation of semop returned the value 17 * last semaphore element examined on success. This does not 18 * match the manual page specifications, and effectively 19 * allows the user to read the semaphore even if they do not 20 * have read permissions. The implementation now returns 0 21 * on success as stated in the manual page. 22 * - There is some confusion over whether the set of undo adjustments 23 * to be performed at exit should be done in an atomic manner. 24 * That is, if we are attempting to decrement the semval should we queue 25 * up and wait until we can do so legally? 26 * The original implementation attempted to do this. 27 * The current implementation does not do so. This is because I don't 28 * think it is the right thing (TM) to do, and because I couldn't 29 * see a clean way to get the old behavior with the new design. 30 * The POSIX standard and SVID should be consulted to determine 31 * what behavior is mandated. 32 * 33 * Further notes on refinement (Christoph Rohland, December 1998): 34 * - The POSIX standard says, that the undo adjustments simply should 35 * redo. So the current implementation is o.K. 36 * - The previous code had two flaws: 37 * 1) It actively gave the semaphore to the next waiting process 38 * sleeping on the semaphore. Since this process did not have the 39 * cpu this led to many unnecessary context switches and bad 40 * performance. Now we only check which process should be able to 41 * get the semaphore and if this process wants to reduce some 42 * semaphore value we simply wake it up without doing the 43 * operation. So it has to try to get it later. Thus e.g. the 44 * running process may reacquire the semaphore during the current 45 * time slice. If it only waits for zero or increases the semaphore, 46 * we do the operation in advance and wake it up. 47 * 2) It did not wake up all zero waiting processes. We try to do 48 * better but only get the semops right which only wait for zero or 49 * increase. If there are decrement operations in the operations 50 * array we do the same as before. 51 * 52 * With the incarnation of O(1) scheduler, it becomes unnecessary to perform 53 * check/retry algorithm for waking up blocked processes as the new scheduler 54 * is better at handling thread switch than the old one. 55 * 56 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 57 * 58 * SMP-threaded, sysctl's added 59 * (c) 1999 Manfred Spraul <manfred@colorfullife.com> 60 * Enforced range limit on SEM_UNDO 61 * (c) 2001 Red Hat Inc <alan@redhat.com> 62 * Lockless wakeup 63 * (c) 2003 Manfred Spraul <manfred@colorfullife.com> 64 * 65 * support for audit of ipc object properties and permission changes 66 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 67 * 68 * namespaces support 69 * OpenVZ, SWsoft Inc. 70 * Pavel Emelianov <xemul@openvz.org> 71 */ 72 73 #include <linux/slab.h> 74 #include <linux/spinlock.h> 75 #include <linux/init.h> 76 #include <linux/proc_fs.h> 77 #include <linux/time.h> 78 #include <linux/security.h> 79 #include <linux/syscalls.h> 80 #include <linux/audit.h> 81 #include <linux/capability.h> 82 #include <linux/seq_file.h> 83 #include <linux/mutex.h> 84 #include <linux/nsproxy.h> 85 86 #include <asm/uaccess.h> 87 #include "util.h" 88 89 #define sem_ids(ns) (*((ns)->ids[IPC_SEM_IDS])) 90 91 #define sem_lock(ns, id) ((struct sem_array*)ipc_lock(&sem_ids(ns), id)) 92 #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) 93 #define sem_rmid(ns, id) ((struct sem_array*)ipc_rmid(&sem_ids(ns), id)) 94 #define sem_checkid(ns, sma, semid) \ 95 ipc_checkid(&sem_ids(ns),&sma->sem_perm,semid) 96 #define sem_buildid(ns, id, seq) \ 97 ipc_buildid(&sem_ids(ns), id, seq) 98 99 static struct ipc_ids init_sem_ids; 100 101 static int newary(struct ipc_namespace *, key_t, int, int); 102 static void freeary(struct ipc_namespace *ns, struct sem_array *sma, int id); 103 #ifdef CONFIG_PROC_FS 104 static int sysvipc_sem_proc_show(struct seq_file *s, void *it); 105 #endif 106 107 #define SEMMSL_FAST 256 /* 512 bytes on stack */ 108 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ 109 110 /* 111 * linked list protection: 112 * sem_undo.id_next, 113 * sem_array.sem_pending{,last}, 114 * sem_array.sem_undo: sem_lock() for read/write 115 * sem_undo.proc_next: only "current" is allowed to read/write that field. 116 * 117 */ 118 119 #define sc_semmsl sem_ctls[0] 120 #define sc_semmns sem_ctls[1] 121 #define sc_semopm sem_ctls[2] 122 #define sc_semmni sem_ctls[3] 123 124 static void __ipc_init __sem_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids) 125 { 126 ns->ids[IPC_SEM_IDS] = ids; 127 ns->sc_semmsl = SEMMSL; 128 ns->sc_semmns = SEMMNS; 129 ns->sc_semopm = SEMOPM; 130 ns->sc_semmni = SEMMNI; 131 ns->used_sems = 0; 132 ipc_init_ids(ids, ns->sc_semmni); 133 } 134 135 #ifdef CONFIG_IPC_NS 136 int sem_init_ns(struct ipc_namespace *ns) 137 { 138 struct ipc_ids *ids; 139 140 ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL); 141 if (ids == NULL) 142 return -ENOMEM; 143 144 __sem_init_ns(ns, ids); 145 return 0; 146 } 147 148 void sem_exit_ns(struct ipc_namespace *ns) 149 { 150 int i; 151 struct sem_array *sma; 152 153 mutex_lock(&sem_ids(ns).mutex); 154 for (i = 0; i <= sem_ids(ns).max_id; i++) { 155 sma = sem_lock(ns, i); 156 if (sma == NULL) 157 continue; 158 159 freeary(ns, sma, i); 160 } 161 mutex_unlock(&sem_ids(ns).mutex); 162 163 ipc_fini_ids(ns->ids[IPC_SEM_IDS]); 164 kfree(ns->ids[IPC_SEM_IDS]); 165 ns->ids[IPC_SEM_IDS] = NULL; 166 } 167 #endif 168 169 void __init sem_init (void) 170 { 171 __sem_init_ns(&init_ipc_ns, &init_sem_ids); 172 ipc_init_proc_interface("sysvipc/sem", 173 " key semid perms nsems uid gid cuid cgid otime ctime\n", 174 IPC_SEM_IDS, sysvipc_sem_proc_show); 175 } 176 177 /* 178 * Lockless wakeup algorithm: 179 * Without the check/retry algorithm a lockless wakeup is possible: 180 * - queue.status is initialized to -EINTR before blocking. 181 * - wakeup is performed by 182 * * unlinking the queue entry from sma->sem_pending 183 * * setting queue.status to IN_WAKEUP 184 * This is the notification for the blocked thread that a 185 * result value is imminent. 186 * * call wake_up_process 187 * * set queue.status to the final value. 188 * - the previously blocked thread checks queue.status: 189 * * if it's IN_WAKEUP, then it must wait until the value changes 190 * * if it's not -EINTR, then the operation was completed by 191 * update_queue. semtimedop can return queue.status without 192 * performing any operation on the sem array. 193 * * otherwise it must acquire the spinlock and check what's up. 194 * 195 * The two-stage algorithm is necessary to protect against the following 196 * races: 197 * - if queue.status is set after wake_up_process, then the woken up idle 198 * thread could race forward and try (and fail) to acquire sma->lock 199 * before update_queue had a chance to set queue.status 200 * - if queue.status is written before wake_up_process and if the 201 * blocked process is woken up by a signal between writing 202 * queue.status and the wake_up_process, then the woken up 203 * process could return from semtimedop and die by calling 204 * sys_exit before wake_up_process is called. Then wake_up_process 205 * will oops, because the task structure is already invalid. 206 * (yes, this happened on s390 with sysv msg). 207 * 208 */ 209 #define IN_WAKEUP 1 210 211 static int newary (struct ipc_namespace *ns, key_t key, int nsems, int semflg) 212 { 213 int id; 214 int retval; 215 struct sem_array *sma; 216 int size; 217 218 if (!nsems) 219 return -EINVAL; 220 if (ns->used_sems + nsems > ns->sc_semmns) 221 return -ENOSPC; 222 223 size = sizeof (*sma) + nsems * sizeof (struct sem); 224 sma = ipc_rcu_alloc(size); 225 if (!sma) { 226 return -ENOMEM; 227 } 228 memset (sma, 0, size); 229 230 sma->sem_perm.mode = (semflg & S_IRWXUGO); 231 sma->sem_perm.key = key; 232 233 sma->sem_perm.security = NULL; 234 retval = security_sem_alloc(sma); 235 if (retval) { 236 ipc_rcu_putref(sma); 237 return retval; 238 } 239 240 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); 241 if(id == -1) { 242 security_sem_free(sma); 243 ipc_rcu_putref(sma); 244 return -ENOSPC; 245 } 246 ns->used_sems += nsems; 247 248 sma->sem_id = sem_buildid(ns, id, sma->sem_perm.seq); 249 sma->sem_base = (struct sem *) &sma[1]; 250 /* sma->sem_pending = NULL; */ 251 sma->sem_pending_last = &sma->sem_pending; 252 /* sma->undo = NULL; */ 253 sma->sem_nsems = nsems; 254 sma->sem_ctime = get_seconds(); 255 sem_unlock(sma); 256 257 return sma->sem_id; 258 } 259 260 asmlinkage long sys_semget (key_t key, int nsems, int semflg) 261 { 262 int id, err = -EINVAL; 263 struct sem_array *sma; 264 struct ipc_namespace *ns; 265 266 ns = current->nsproxy->ipc_ns; 267 268 if (nsems < 0 || nsems > ns->sc_semmsl) 269 return -EINVAL; 270 mutex_lock(&sem_ids(ns).mutex); 271 272 if (key == IPC_PRIVATE) { 273 err = newary(ns, key, nsems, semflg); 274 } else if ((id = ipc_findkey(&sem_ids(ns), key)) == -1) { /* key not used */ 275 if (!(semflg & IPC_CREAT)) 276 err = -ENOENT; 277 else 278 err = newary(ns, key, nsems, semflg); 279 } else if (semflg & IPC_CREAT && semflg & IPC_EXCL) { 280 err = -EEXIST; 281 } else { 282 sma = sem_lock(ns, id); 283 BUG_ON(sma==NULL); 284 if (nsems > sma->sem_nsems) 285 err = -EINVAL; 286 else if (ipcperms(&sma->sem_perm, semflg)) 287 err = -EACCES; 288 else { 289 int semid = sem_buildid(ns, id, sma->sem_perm.seq); 290 err = security_sem_associate(sma, semflg); 291 if (!err) 292 err = semid; 293 } 294 sem_unlock(sma); 295 } 296 297 mutex_unlock(&sem_ids(ns).mutex); 298 return err; 299 } 300 301 /* Manage the doubly linked list sma->sem_pending as a FIFO: 302 * insert new queue elements at the tail sma->sem_pending_last. 303 */ 304 static inline void append_to_queue (struct sem_array * sma, 305 struct sem_queue * q) 306 { 307 *(q->prev = sma->sem_pending_last) = q; 308 *(sma->sem_pending_last = &q->next) = NULL; 309 } 310 311 static inline void prepend_to_queue (struct sem_array * sma, 312 struct sem_queue * q) 313 { 314 q->next = sma->sem_pending; 315 *(q->prev = &sma->sem_pending) = q; 316 if (q->next) 317 q->next->prev = &q->next; 318 else /* sma->sem_pending_last == &sma->sem_pending */ 319 sma->sem_pending_last = &q->next; 320 } 321 322 static inline void remove_from_queue (struct sem_array * sma, 323 struct sem_queue * q) 324 { 325 *(q->prev) = q->next; 326 if (q->next) 327 q->next->prev = q->prev; 328 else /* sma->sem_pending_last == &q->next */ 329 sma->sem_pending_last = q->prev; 330 q->prev = NULL; /* mark as removed */ 331 } 332 333 /* 334 * Determine whether a sequence of semaphore operations would succeed 335 * all at once. Return 0 if yes, 1 if need to sleep, else return error code. 336 */ 337 338 static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops, 339 int nsops, struct sem_undo *un, int pid) 340 { 341 int result, sem_op; 342 struct sembuf *sop; 343 struct sem * curr; 344 345 for (sop = sops; sop < sops + nsops; sop++) { 346 curr = sma->sem_base + sop->sem_num; 347 sem_op = sop->sem_op; 348 result = curr->semval; 349 350 if (!sem_op && result) 351 goto would_block; 352 353 result += sem_op; 354 if (result < 0) 355 goto would_block; 356 if (result > SEMVMX) 357 goto out_of_range; 358 if (sop->sem_flg & SEM_UNDO) { 359 int undo = un->semadj[sop->sem_num] - sem_op; 360 /* 361 * Exceeding the undo range is an error. 362 */ 363 if (undo < (-SEMAEM - 1) || undo > SEMAEM) 364 goto out_of_range; 365 } 366 curr->semval = result; 367 } 368 369 sop--; 370 while (sop >= sops) { 371 sma->sem_base[sop->sem_num].sempid = pid; 372 if (sop->sem_flg & SEM_UNDO) 373 un->semadj[sop->sem_num] -= sop->sem_op; 374 sop--; 375 } 376 377 sma->sem_otime = get_seconds(); 378 return 0; 379 380 out_of_range: 381 result = -ERANGE; 382 goto undo; 383 384 would_block: 385 if (sop->sem_flg & IPC_NOWAIT) 386 result = -EAGAIN; 387 else 388 result = 1; 389 390 undo: 391 sop--; 392 while (sop >= sops) { 393 sma->sem_base[sop->sem_num].semval -= sop->sem_op; 394 sop--; 395 } 396 397 return result; 398 } 399 400 /* Go through the pending queue for the indicated semaphore 401 * looking for tasks that can be completed. 402 */ 403 static void update_queue (struct sem_array * sma) 404 { 405 int error; 406 struct sem_queue * q; 407 408 q = sma->sem_pending; 409 while(q) { 410 error = try_atomic_semop(sma, q->sops, q->nsops, 411 q->undo, q->pid); 412 413 /* Does q->sleeper still need to sleep? */ 414 if (error <= 0) { 415 struct sem_queue *n; 416 remove_from_queue(sma,q); 417 q->status = IN_WAKEUP; 418 /* 419 * Continue scanning. The next operation 420 * that must be checked depends on the type of the 421 * completed operation: 422 * - if the operation modified the array, then 423 * restart from the head of the queue and 424 * check for threads that might be waiting 425 * for semaphore values to become 0. 426 * - if the operation didn't modify the array, 427 * then just continue. 428 */ 429 if (q->alter) 430 n = sma->sem_pending; 431 else 432 n = q->next; 433 wake_up_process(q->sleeper); 434 /* hands-off: q will disappear immediately after 435 * writing q->status. 436 */ 437 smp_wmb(); 438 q->status = error; 439 q = n; 440 } else { 441 q = q->next; 442 } 443 } 444 } 445 446 /* The following counts are associated to each semaphore: 447 * semncnt number of tasks waiting on semval being nonzero 448 * semzcnt number of tasks waiting on semval being zero 449 * This model assumes that a task waits on exactly one semaphore. 450 * Since semaphore operations are to be performed atomically, tasks actually 451 * wait on a whole sequence of semaphores simultaneously. 452 * The counts we return here are a rough approximation, but still 453 * warrant that semncnt+semzcnt>0 if the task is on the pending queue. 454 */ 455 static int count_semncnt (struct sem_array * sma, ushort semnum) 456 { 457 int semncnt; 458 struct sem_queue * q; 459 460 semncnt = 0; 461 for (q = sma->sem_pending; q; q = q->next) { 462 struct sembuf * sops = q->sops; 463 int nsops = q->nsops; 464 int i; 465 for (i = 0; i < nsops; i++) 466 if (sops[i].sem_num == semnum 467 && (sops[i].sem_op < 0) 468 && !(sops[i].sem_flg & IPC_NOWAIT)) 469 semncnt++; 470 } 471 return semncnt; 472 } 473 static int count_semzcnt (struct sem_array * sma, ushort semnum) 474 { 475 int semzcnt; 476 struct sem_queue * q; 477 478 semzcnt = 0; 479 for (q = sma->sem_pending; q; q = q->next) { 480 struct sembuf * sops = q->sops; 481 int nsops = q->nsops; 482 int i; 483 for (i = 0; i < nsops; i++) 484 if (sops[i].sem_num == semnum 485 && (sops[i].sem_op == 0) 486 && !(sops[i].sem_flg & IPC_NOWAIT)) 487 semzcnt++; 488 } 489 return semzcnt; 490 } 491 492 /* Free a semaphore set. freeary() is called with sem_ids.mutex locked and 493 * the spinlock for this semaphore set hold. sem_ids.mutex remains locked 494 * on exit. 495 */ 496 static void freeary (struct ipc_namespace *ns, struct sem_array *sma, int id) 497 { 498 struct sem_undo *un; 499 struct sem_queue *q; 500 int size; 501 502 /* Invalidate the existing undo structures for this semaphore set. 503 * (They will be freed without any further action in exit_sem() 504 * or during the next semop.) 505 */ 506 for (un = sma->undo; un; un = un->id_next) 507 un->semid = -1; 508 509 /* Wake up all pending processes and let them fail with EIDRM. */ 510 q = sma->sem_pending; 511 while(q) { 512 struct sem_queue *n; 513 /* lazy remove_from_queue: we are killing the whole queue */ 514 q->prev = NULL; 515 n = q->next; 516 q->status = IN_WAKEUP; 517 wake_up_process(q->sleeper); /* doesn't sleep */ 518 smp_wmb(); 519 q->status = -EIDRM; /* hands-off q */ 520 q = n; 521 } 522 523 /* Remove the semaphore set from the ID array*/ 524 sma = sem_rmid(ns, id); 525 sem_unlock(sma); 526 527 ns->used_sems -= sma->sem_nsems; 528 size = sizeof (*sma) + sma->sem_nsems * sizeof (struct sem); 529 security_sem_free(sma); 530 ipc_rcu_putref(sma); 531 } 532 533 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) 534 { 535 switch(version) { 536 case IPC_64: 537 return copy_to_user(buf, in, sizeof(*in)); 538 case IPC_OLD: 539 { 540 struct semid_ds out; 541 542 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); 543 544 out.sem_otime = in->sem_otime; 545 out.sem_ctime = in->sem_ctime; 546 out.sem_nsems = in->sem_nsems; 547 548 return copy_to_user(buf, &out, sizeof(out)); 549 } 550 default: 551 return -EINVAL; 552 } 553 } 554 555 static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, 556 int cmd, int version, union semun arg) 557 { 558 int err = -EINVAL; 559 struct sem_array *sma; 560 561 switch(cmd) { 562 case IPC_INFO: 563 case SEM_INFO: 564 { 565 struct seminfo seminfo; 566 int max_id; 567 568 err = security_sem_semctl(NULL, cmd); 569 if (err) 570 return err; 571 572 memset(&seminfo,0,sizeof(seminfo)); 573 seminfo.semmni = ns->sc_semmni; 574 seminfo.semmns = ns->sc_semmns; 575 seminfo.semmsl = ns->sc_semmsl; 576 seminfo.semopm = ns->sc_semopm; 577 seminfo.semvmx = SEMVMX; 578 seminfo.semmnu = SEMMNU; 579 seminfo.semmap = SEMMAP; 580 seminfo.semume = SEMUME; 581 mutex_lock(&sem_ids(ns).mutex); 582 if (cmd == SEM_INFO) { 583 seminfo.semusz = sem_ids(ns).in_use; 584 seminfo.semaem = ns->used_sems; 585 } else { 586 seminfo.semusz = SEMUSZ; 587 seminfo.semaem = SEMAEM; 588 } 589 max_id = sem_ids(ns).max_id; 590 mutex_unlock(&sem_ids(ns).mutex); 591 if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) 592 return -EFAULT; 593 return (max_id < 0) ? 0: max_id; 594 } 595 case SEM_STAT: 596 { 597 struct semid64_ds tbuf; 598 int id; 599 600 if(semid >= sem_ids(ns).entries->size) 601 return -EINVAL; 602 603 memset(&tbuf,0,sizeof(tbuf)); 604 605 sma = sem_lock(ns, semid); 606 if(sma == NULL) 607 return -EINVAL; 608 609 err = -EACCES; 610 if (ipcperms (&sma->sem_perm, S_IRUGO)) 611 goto out_unlock; 612 613 err = security_sem_semctl(sma, cmd); 614 if (err) 615 goto out_unlock; 616 617 id = sem_buildid(ns, semid, sma->sem_perm.seq); 618 619 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); 620 tbuf.sem_otime = sma->sem_otime; 621 tbuf.sem_ctime = sma->sem_ctime; 622 tbuf.sem_nsems = sma->sem_nsems; 623 sem_unlock(sma); 624 if (copy_semid_to_user (arg.buf, &tbuf, version)) 625 return -EFAULT; 626 return id; 627 } 628 default: 629 return -EINVAL; 630 } 631 return err; 632 out_unlock: 633 sem_unlock(sma); 634 return err; 635 } 636 637 static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, 638 int cmd, int version, union semun arg) 639 { 640 struct sem_array *sma; 641 struct sem* curr; 642 int err; 643 ushort fast_sem_io[SEMMSL_FAST]; 644 ushort* sem_io = fast_sem_io; 645 int nsems; 646 647 sma = sem_lock(ns, semid); 648 if(sma==NULL) 649 return -EINVAL; 650 651 nsems = sma->sem_nsems; 652 653 err=-EIDRM; 654 if (sem_checkid(ns,sma,semid)) 655 goto out_unlock; 656 657 err = -EACCES; 658 if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) 659 goto out_unlock; 660 661 err = security_sem_semctl(sma, cmd); 662 if (err) 663 goto out_unlock; 664 665 err = -EACCES; 666 switch (cmd) { 667 case GETALL: 668 { 669 ushort __user *array = arg.array; 670 int i; 671 672 if(nsems > SEMMSL_FAST) { 673 ipc_rcu_getref(sma); 674 sem_unlock(sma); 675 676 sem_io = ipc_alloc(sizeof(ushort)*nsems); 677 if(sem_io == NULL) { 678 ipc_lock_by_ptr(&sma->sem_perm); 679 ipc_rcu_putref(sma); 680 sem_unlock(sma); 681 return -ENOMEM; 682 } 683 684 ipc_lock_by_ptr(&sma->sem_perm); 685 ipc_rcu_putref(sma); 686 if (sma->sem_perm.deleted) { 687 sem_unlock(sma); 688 err = -EIDRM; 689 goto out_free; 690 } 691 } 692 693 for (i = 0; i < sma->sem_nsems; i++) 694 sem_io[i] = sma->sem_base[i].semval; 695 sem_unlock(sma); 696 err = 0; 697 if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) 698 err = -EFAULT; 699 goto out_free; 700 } 701 case SETALL: 702 { 703 int i; 704 struct sem_undo *un; 705 706 ipc_rcu_getref(sma); 707 sem_unlock(sma); 708 709 if(nsems > SEMMSL_FAST) { 710 sem_io = ipc_alloc(sizeof(ushort)*nsems); 711 if(sem_io == NULL) { 712 ipc_lock_by_ptr(&sma->sem_perm); 713 ipc_rcu_putref(sma); 714 sem_unlock(sma); 715 return -ENOMEM; 716 } 717 } 718 719 if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { 720 ipc_lock_by_ptr(&sma->sem_perm); 721 ipc_rcu_putref(sma); 722 sem_unlock(sma); 723 err = -EFAULT; 724 goto out_free; 725 } 726 727 for (i = 0; i < nsems; i++) { 728 if (sem_io[i] > SEMVMX) { 729 ipc_lock_by_ptr(&sma->sem_perm); 730 ipc_rcu_putref(sma); 731 sem_unlock(sma); 732 err = -ERANGE; 733 goto out_free; 734 } 735 } 736 ipc_lock_by_ptr(&sma->sem_perm); 737 ipc_rcu_putref(sma); 738 if (sma->sem_perm.deleted) { 739 sem_unlock(sma); 740 err = -EIDRM; 741 goto out_free; 742 } 743 744 for (i = 0; i < nsems; i++) 745 sma->sem_base[i].semval = sem_io[i]; 746 for (un = sma->undo; un; un = un->id_next) 747 for (i = 0; i < nsems; i++) 748 un->semadj[i] = 0; 749 sma->sem_ctime = get_seconds(); 750 /* maybe some queued-up processes were waiting for this */ 751 update_queue(sma); 752 err = 0; 753 goto out_unlock; 754 } 755 case IPC_STAT: 756 { 757 struct semid64_ds tbuf; 758 memset(&tbuf,0,sizeof(tbuf)); 759 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); 760 tbuf.sem_otime = sma->sem_otime; 761 tbuf.sem_ctime = sma->sem_ctime; 762 tbuf.sem_nsems = sma->sem_nsems; 763 sem_unlock(sma); 764 if (copy_semid_to_user (arg.buf, &tbuf, version)) 765 return -EFAULT; 766 return 0; 767 } 768 /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */ 769 } 770 err = -EINVAL; 771 if(semnum < 0 || semnum >= nsems) 772 goto out_unlock; 773 774 curr = &sma->sem_base[semnum]; 775 776 switch (cmd) { 777 case GETVAL: 778 err = curr->semval; 779 goto out_unlock; 780 case GETPID: 781 err = curr->sempid; 782 goto out_unlock; 783 case GETNCNT: 784 err = count_semncnt(sma,semnum); 785 goto out_unlock; 786 case GETZCNT: 787 err = count_semzcnt(sma,semnum); 788 goto out_unlock; 789 case SETVAL: 790 { 791 int val = arg.val; 792 struct sem_undo *un; 793 err = -ERANGE; 794 if (val > SEMVMX || val < 0) 795 goto out_unlock; 796 797 for (un = sma->undo; un; un = un->id_next) 798 un->semadj[semnum] = 0; 799 curr->semval = val; 800 curr->sempid = current->tgid; 801 sma->sem_ctime = get_seconds(); 802 /* maybe some queued-up processes were waiting for this */ 803 update_queue(sma); 804 err = 0; 805 goto out_unlock; 806 } 807 } 808 out_unlock: 809 sem_unlock(sma); 810 out_free: 811 if(sem_io != fast_sem_io) 812 ipc_free(sem_io, sizeof(ushort)*nsems); 813 return err; 814 } 815 816 struct sem_setbuf { 817 uid_t uid; 818 gid_t gid; 819 mode_t mode; 820 }; 821 822 static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __user *buf, int version) 823 { 824 switch(version) { 825 case IPC_64: 826 { 827 struct semid64_ds tbuf; 828 829 if(copy_from_user(&tbuf, buf, sizeof(tbuf))) 830 return -EFAULT; 831 832 out->uid = tbuf.sem_perm.uid; 833 out->gid = tbuf.sem_perm.gid; 834 out->mode = tbuf.sem_perm.mode; 835 836 return 0; 837 } 838 case IPC_OLD: 839 { 840 struct semid_ds tbuf_old; 841 842 if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 843 return -EFAULT; 844 845 out->uid = tbuf_old.sem_perm.uid; 846 out->gid = tbuf_old.sem_perm.gid; 847 out->mode = tbuf_old.sem_perm.mode; 848 849 return 0; 850 } 851 default: 852 return -EINVAL; 853 } 854 } 855 856 static int semctl_down(struct ipc_namespace *ns, int semid, int semnum, 857 int cmd, int version, union semun arg) 858 { 859 struct sem_array *sma; 860 int err; 861 struct sem_setbuf setbuf; 862 struct kern_ipc_perm *ipcp; 863 864 if(cmd == IPC_SET) { 865 if(copy_semid_from_user (&setbuf, arg.buf, version)) 866 return -EFAULT; 867 } 868 sma = sem_lock(ns, semid); 869 if(sma==NULL) 870 return -EINVAL; 871 872 if (sem_checkid(ns,sma,semid)) { 873 err=-EIDRM; 874 goto out_unlock; 875 } 876 ipcp = &sma->sem_perm; 877 878 err = audit_ipc_obj(ipcp); 879 if (err) 880 goto out_unlock; 881 882 if (cmd == IPC_SET) { 883 err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode); 884 if (err) 885 goto out_unlock; 886 } 887 if (current->euid != ipcp->cuid && 888 current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) { 889 err=-EPERM; 890 goto out_unlock; 891 } 892 893 err = security_sem_semctl(sma, cmd); 894 if (err) 895 goto out_unlock; 896 897 switch(cmd){ 898 case IPC_RMID: 899 freeary(ns, sma, semid); 900 err = 0; 901 break; 902 case IPC_SET: 903 ipcp->uid = setbuf.uid; 904 ipcp->gid = setbuf.gid; 905 ipcp->mode = (ipcp->mode & ~S_IRWXUGO) 906 | (setbuf.mode & S_IRWXUGO); 907 sma->sem_ctime = get_seconds(); 908 sem_unlock(sma); 909 err = 0; 910 break; 911 default: 912 sem_unlock(sma); 913 err = -EINVAL; 914 break; 915 } 916 return err; 917 918 out_unlock: 919 sem_unlock(sma); 920 return err; 921 } 922 923 asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) 924 { 925 int err = -EINVAL; 926 int version; 927 struct ipc_namespace *ns; 928 929 if (semid < 0) 930 return -EINVAL; 931 932 version = ipc_parse_version(&cmd); 933 ns = current->nsproxy->ipc_ns; 934 935 switch(cmd) { 936 case IPC_INFO: 937 case SEM_INFO: 938 case SEM_STAT: 939 err = semctl_nolock(ns,semid,semnum,cmd,version,arg); 940 return err; 941 case GETALL: 942 case GETVAL: 943 case GETPID: 944 case GETNCNT: 945 case GETZCNT: 946 case IPC_STAT: 947 case SETVAL: 948 case SETALL: 949 err = semctl_main(ns,semid,semnum,cmd,version,arg); 950 return err; 951 case IPC_RMID: 952 case IPC_SET: 953 mutex_lock(&sem_ids(ns).mutex); 954 err = semctl_down(ns,semid,semnum,cmd,version,arg); 955 mutex_unlock(&sem_ids(ns).mutex); 956 return err; 957 default: 958 return -EINVAL; 959 } 960 } 961 962 static inline void lock_semundo(void) 963 { 964 struct sem_undo_list *undo_list; 965 966 undo_list = current->sysvsem.undo_list; 967 if (undo_list) 968 spin_lock(&undo_list->lock); 969 } 970 971 /* This code has an interaction with copy_semundo(). 972 * Consider; two tasks are sharing the undo_list. task1 973 * acquires the undo_list lock in lock_semundo(). If task2 now 974 * exits before task1 releases the lock (by calling 975 * unlock_semundo()), then task1 will never call spin_unlock(). 976 * This leave the sem_undo_list in a locked state. If task1 now creats task3 977 * and once again shares the sem_undo_list, the sem_undo_list will still be 978 * locked, and future SEM_UNDO operations will deadlock. This case is 979 * dealt with in copy_semundo() by having it reinitialize the spin lock when 980 * the refcnt goes from 1 to 2. 981 */ 982 static inline void unlock_semundo(void) 983 { 984 struct sem_undo_list *undo_list; 985 986 undo_list = current->sysvsem.undo_list; 987 if (undo_list) 988 spin_unlock(&undo_list->lock); 989 } 990 991 992 /* If the task doesn't already have a undo_list, then allocate one 993 * here. We guarantee there is only one thread using this undo list, 994 * and current is THE ONE 995 * 996 * If this allocation and assignment succeeds, but later 997 * portions of this code fail, there is no need to free the sem_undo_list. 998 * Just let it stay associated with the task, and it'll be freed later 999 * at exit time. 1000 * 1001 * This can block, so callers must hold no locks. 1002 */ 1003 static inline int get_undo_list(struct sem_undo_list **undo_listp) 1004 { 1005 struct sem_undo_list *undo_list; 1006 1007 undo_list = current->sysvsem.undo_list; 1008 if (!undo_list) { 1009 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); 1010 if (undo_list == NULL) 1011 return -ENOMEM; 1012 spin_lock_init(&undo_list->lock); 1013 atomic_set(&undo_list->refcnt, 1); 1014 current->sysvsem.undo_list = undo_list; 1015 } 1016 *undo_listp = undo_list; 1017 return 0; 1018 } 1019 1020 static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) 1021 { 1022 struct sem_undo **last, *un; 1023 1024 last = &ulp->proc_list; 1025 un = *last; 1026 while(un != NULL) { 1027 if(un->semid==semid) 1028 break; 1029 if(un->semid==-1) { 1030 *last=un->proc_next; 1031 kfree(un); 1032 } else { 1033 last=&un->proc_next; 1034 } 1035 un=*last; 1036 } 1037 return un; 1038 } 1039 1040 static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) 1041 { 1042 struct sem_array *sma; 1043 struct sem_undo_list *ulp; 1044 struct sem_undo *un, *new; 1045 int nsems; 1046 int error; 1047 1048 error = get_undo_list(&ulp); 1049 if (error) 1050 return ERR_PTR(error); 1051 1052 lock_semundo(); 1053 un = lookup_undo(ulp, semid); 1054 unlock_semundo(); 1055 if (likely(un!=NULL)) 1056 goto out; 1057 1058 /* no undo structure around - allocate one. */ 1059 sma = sem_lock(ns, semid); 1060 un = ERR_PTR(-EINVAL); 1061 if(sma==NULL) 1062 goto out; 1063 un = ERR_PTR(-EIDRM); 1064 if (sem_checkid(ns,sma,semid)) { 1065 sem_unlock(sma); 1066 goto out; 1067 } 1068 nsems = sma->sem_nsems; 1069 ipc_rcu_getref(sma); 1070 sem_unlock(sma); 1071 1072 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); 1073 if (!new) { 1074 ipc_lock_by_ptr(&sma->sem_perm); 1075 ipc_rcu_putref(sma); 1076 sem_unlock(sma); 1077 return ERR_PTR(-ENOMEM); 1078 } 1079 new->semadj = (short *) &new[1]; 1080 new->semid = semid; 1081 1082 lock_semundo(); 1083 un = lookup_undo(ulp, semid); 1084 if (un) { 1085 unlock_semundo(); 1086 kfree(new); 1087 ipc_lock_by_ptr(&sma->sem_perm); 1088 ipc_rcu_putref(sma); 1089 sem_unlock(sma); 1090 goto out; 1091 } 1092 ipc_lock_by_ptr(&sma->sem_perm); 1093 ipc_rcu_putref(sma); 1094 if (sma->sem_perm.deleted) { 1095 sem_unlock(sma); 1096 unlock_semundo(); 1097 kfree(new); 1098 un = ERR_PTR(-EIDRM); 1099 goto out; 1100 } 1101 new->proc_next = ulp->proc_list; 1102 ulp->proc_list = new; 1103 new->id_next = sma->undo; 1104 sma->undo = new; 1105 sem_unlock(sma); 1106 un = new; 1107 unlock_semundo(); 1108 out: 1109 return un; 1110 } 1111 1112 asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, 1113 unsigned nsops, const struct timespec __user *timeout) 1114 { 1115 int error = -EINVAL; 1116 struct sem_array *sma; 1117 struct sembuf fast_sops[SEMOPM_FAST]; 1118 struct sembuf* sops = fast_sops, *sop; 1119 struct sem_undo *un; 1120 int undos = 0, alter = 0, max; 1121 struct sem_queue queue; 1122 unsigned long jiffies_left = 0; 1123 struct ipc_namespace *ns; 1124 1125 ns = current->nsproxy->ipc_ns; 1126 1127 if (nsops < 1 || semid < 0) 1128 return -EINVAL; 1129 if (nsops > ns->sc_semopm) 1130 return -E2BIG; 1131 if(nsops > SEMOPM_FAST) { 1132 sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); 1133 if(sops==NULL) 1134 return -ENOMEM; 1135 } 1136 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { 1137 error=-EFAULT; 1138 goto out_free; 1139 } 1140 if (timeout) { 1141 struct timespec _timeout; 1142 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { 1143 error = -EFAULT; 1144 goto out_free; 1145 } 1146 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || 1147 _timeout.tv_nsec >= 1000000000L) { 1148 error = -EINVAL; 1149 goto out_free; 1150 } 1151 jiffies_left = timespec_to_jiffies(&_timeout); 1152 } 1153 max = 0; 1154 for (sop = sops; sop < sops + nsops; sop++) { 1155 if (sop->sem_num >= max) 1156 max = sop->sem_num; 1157 if (sop->sem_flg & SEM_UNDO) 1158 undos = 1; 1159 if (sop->sem_op != 0) 1160 alter = 1; 1161 } 1162 1163 retry_undos: 1164 if (undos) { 1165 un = find_undo(ns, semid); 1166 if (IS_ERR(un)) { 1167 error = PTR_ERR(un); 1168 goto out_free; 1169 } 1170 } else 1171 un = NULL; 1172 1173 sma = sem_lock(ns, semid); 1174 error=-EINVAL; 1175 if(sma==NULL) 1176 goto out_free; 1177 error = -EIDRM; 1178 if (sem_checkid(ns,sma,semid)) 1179 goto out_unlock_free; 1180 /* 1181 * semid identifies are not unique - find_undo may have 1182 * allocated an undo structure, it was invalidated by an RMID 1183 * and now a new array with received the same id. Check and retry. 1184 */ 1185 if (un && un->semid == -1) { 1186 sem_unlock(sma); 1187 goto retry_undos; 1188 } 1189 error = -EFBIG; 1190 if (max >= sma->sem_nsems) 1191 goto out_unlock_free; 1192 1193 error = -EACCES; 1194 if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) 1195 goto out_unlock_free; 1196 1197 error = security_sem_semop(sma, sops, nsops, alter); 1198 if (error) 1199 goto out_unlock_free; 1200 1201 error = try_atomic_semop (sma, sops, nsops, un, current->tgid); 1202 if (error <= 0) { 1203 if (alter && error == 0) 1204 update_queue (sma); 1205 goto out_unlock_free; 1206 } 1207 1208 /* We need to sleep on this operation, so we put the current 1209 * task into the pending queue and go to sleep. 1210 */ 1211 1212 queue.sma = sma; 1213 queue.sops = sops; 1214 queue.nsops = nsops; 1215 queue.undo = un; 1216 queue.pid = current->tgid; 1217 queue.id = semid; 1218 queue.alter = alter; 1219 if (alter) 1220 append_to_queue(sma ,&queue); 1221 else 1222 prepend_to_queue(sma ,&queue); 1223 1224 queue.status = -EINTR; 1225 queue.sleeper = current; 1226 current->state = TASK_INTERRUPTIBLE; 1227 sem_unlock(sma); 1228 1229 if (timeout) 1230 jiffies_left = schedule_timeout(jiffies_left); 1231 else 1232 schedule(); 1233 1234 error = queue.status; 1235 while(unlikely(error == IN_WAKEUP)) { 1236 cpu_relax(); 1237 error = queue.status; 1238 } 1239 1240 if (error != -EINTR) { 1241 /* fast path: update_queue already obtained all requested 1242 * resources */ 1243 goto out_free; 1244 } 1245 1246 sma = sem_lock(ns, semid); 1247 if(sma==NULL) { 1248 BUG_ON(queue.prev != NULL); 1249 error = -EIDRM; 1250 goto out_free; 1251 } 1252 1253 /* 1254 * If queue.status != -EINTR we are woken up by another process 1255 */ 1256 error = queue.status; 1257 if (error != -EINTR) { 1258 goto out_unlock_free; 1259 } 1260 1261 /* 1262 * If an interrupt occurred we have to clean up the queue 1263 */ 1264 if (timeout && jiffies_left == 0) 1265 error = -EAGAIN; 1266 remove_from_queue(sma,&queue); 1267 goto out_unlock_free; 1268 1269 out_unlock_free: 1270 sem_unlock(sma); 1271 out_free: 1272 if(sops != fast_sops) 1273 kfree(sops); 1274 return error; 1275 } 1276 1277 asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsops) 1278 { 1279 return sys_semtimedop(semid, tsops, nsops, NULL); 1280 } 1281 1282 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between 1283 * parent and child tasks. 1284 * 1285 * See the notes above unlock_semundo() regarding the spin_lock_init() 1286 * in this code. Initialize the undo_list->lock here instead of get_undo_list() 1287 * because of the reasoning in the comment above unlock_semundo. 1288 */ 1289 1290 int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) 1291 { 1292 struct sem_undo_list *undo_list; 1293 int error; 1294 1295 if (clone_flags & CLONE_SYSVSEM) { 1296 error = get_undo_list(&undo_list); 1297 if (error) 1298 return error; 1299 atomic_inc(&undo_list->refcnt); 1300 tsk->sysvsem.undo_list = undo_list; 1301 } else 1302 tsk->sysvsem.undo_list = NULL; 1303 1304 return 0; 1305 } 1306 1307 /* 1308 * add semadj values to semaphores, free undo structures. 1309 * undo structures are not freed when semaphore arrays are destroyed 1310 * so some of them may be out of date. 1311 * IMPLEMENTATION NOTE: There is some confusion over whether the 1312 * set of adjustments that needs to be done should be done in an atomic 1313 * manner or not. That is, if we are attempting to decrement the semval 1314 * should we queue up and wait until we can do so legally? 1315 * The original implementation attempted to do this (queue and wait). 1316 * The current implementation does not do so. The POSIX standard 1317 * and SVID should be consulted to determine what behavior is mandated. 1318 */ 1319 void exit_sem(struct task_struct *tsk) 1320 { 1321 struct sem_undo_list *undo_list; 1322 struct sem_undo *u, **up; 1323 struct ipc_namespace *ns; 1324 1325 undo_list = tsk->sysvsem.undo_list; 1326 if (!undo_list) 1327 return; 1328 1329 if (!atomic_dec_and_test(&undo_list->refcnt)) 1330 return; 1331 1332 ns = tsk->nsproxy->ipc_ns; 1333 /* There's no need to hold the semundo list lock, as current 1334 * is the last task exiting for this undo list. 1335 */ 1336 for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) { 1337 struct sem_array *sma; 1338 int nsems, i; 1339 struct sem_undo *un, **unp; 1340 int semid; 1341 1342 semid = u->semid; 1343 1344 if(semid == -1) 1345 continue; 1346 sma = sem_lock(ns, semid); 1347 if (sma == NULL) 1348 continue; 1349 1350 if (u->semid == -1) 1351 goto next_entry; 1352 1353 BUG_ON(sem_checkid(ns,sma,u->semid)); 1354 1355 /* remove u from the sma->undo list */ 1356 for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { 1357 if (u == un) 1358 goto found; 1359 } 1360 printk ("exit_sem undo list error id=%d\n", u->semid); 1361 goto next_entry; 1362 found: 1363 *unp = un->id_next; 1364 /* perform adjustments registered in u */ 1365 nsems = sma->sem_nsems; 1366 for (i = 0; i < nsems; i++) { 1367 struct sem * semaphore = &sma->sem_base[i]; 1368 if (u->semadj[i]) { 1369 semaphore->semval += u->semadj[i]; 1370 /* 1371 * Range checks of the new semaphore value, 1372 * not defined by sus: 1373 * - Some unices ignore the undo entirely 1374 * (e.g. HP UX 11i 11.22, Tru64 V5.1) 1375 * - some cap the value (e.g. FreeBSD caps 1376 * at 0, but doesn't enforce SEMVMX) 1377 * 1378 * Linux caps the semaphore value, both at 0 1379 * and at SEMVMX. 1380 * 1381 * Manfred <manfred@colorfullife.com> 1382 */ 1383 if (semaphore->semval < 0) 1384 semaphore->semval = 0; 1385 if (semaphore->semval > SEMVMX) 1386 semaphore->semval = SEMVMX; 1387 semaphore->sempid = current->tgid; 1388 } 1389 } 1390 sma->sem_otime = get_seconds(); 1391 /* maybe some queued-up processes were waiting for this */ 1392 update_queue(sma); 1393 next_entry: 1394 sem_unlock(sma); 1395 } 1396 kfree(undo_list); 1397 } 1398 1399 #ifdef CONFIG_PROC_FS 1400 static int sysvipc_sem_proc_show(struct seq_file *s, void *it) 1401 { 1402 struct sem_array *sma = it; 1403 1404 return seq_printf(s, 1405 "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", 1406 sma->sem_perm.key, 1407 sma->sem_id, 1408 sma->sem_perm.mode, 1409 sma->sem_nsems, 1410 sma->sem_perm.uid, 1411 sma->sem_perm.gid, 1412 sma->sem_perm.cuid, 1413 sma->sem_perm.cgid, 1414 sma->sem_otime, 1415 sma->sem_ctime); 1416 } 1417 #endif 1418