xref: /openbmc/linux/ipc/sem.c (revision e23feb16)
1 /*
2  * linux/ipc/sem.c
3  * Copyright (C) 1992 Krishna Balasubramanian
4  * Copyright (C) 1995 Eric Schenk, Bruno Haible
5  *
6  * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
7  *
8  * SMP-threaded, sysctl's added
9  * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10  * Enforced range limit on SEM_UNDO
11  * (c) 2001 Red Hat Inc
12  * Lockless wakeup
13  * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14  * Further wakeup optimizations, documentation
15  * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
16  *
17  * support for audit of ipc object properties and permission changes
18  * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19  *
20  * namespaces support
21  * OpenVZ, SWsoft Inc.
22  * Pavel Emelianov <xemul@openvz.org>
23  *
24  * Implementation notes: (May 2010)
25  * This file implements System V semaphores.
26  *
27  * User space visible behavior:
28  * - FIFO ordering for semop() operations (just FIFO, not starvation
29  *   protection)
30  * - multiple semaphore operations that alter the same semaphore in
31  *   one semop() are handled.
32  * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
33  *   SETALL calls.
34  * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
35  * - undo adjustments at process exit are limited to 0..SEMVMX.
36  * - namespace are supported.
37  * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
38  *   to /proc/sys/kernel/sem.
39  * - statistics about the usage are reported in /proc/sysvipc/sem.
40  *
41  * Internals:
42  * - scalability:
43  *   - all global variables are read-mostly.
44  *   - semop() calls and semctl(RMID) are synchronized by RCU.
45  *   - most operations do write operations (actually: spin_lock calls) to
46  *     the per-semaphore array structure.
47  *   Thus: Perfect SMP scaling between independent semaphore arrays.
48  *         If multiple semaphores in one array are used, then cache line
49  *         trashing on the semaphore array spinlock will limit the scaling.
50  * - semncnt and semzcnt are calculated on demand in count_semncnt() and
51  *   count_semzcnt()
52  * - the task that performs a successful semop() scans the list of all
53  *   sleeping tasks and completes any pending operations that can be fulfilled.
54  *   Semaphores are actively given to waiting tasks (necessary for FIFO).
55  *   (see update_queue())
56  * - To improve the scalability, the actual wake-up calls are performed after
57  *   dropping all locks. (see wake_up_sem_queue_prepare(),
58  *   wake_up_sem_queue_do())
59  * - All work is done by the waker, the woken up task does not have to do
60  *   anything - not even acquiring a lock or dropping a refcount.
61  * - A woken up task may not even touch the semaphore array anymore, it may
62  *   have been destroyed already by a semctl(RMID).
63  * - The synchronizations between wake-ups due to a timeout/signal and a
64  *   wake-up due to a completed semaphore operation is achieved by using an
65  *   intermediate state (IN_WAKEUP).
66  * - UNDO values are stored in an array (one per process and per
67  *   semaphore array, lazily allocated). For backwards compatibility, multiple
68  *   modes for the UNDO variables are supported (per process, per thread)
69  *   (see copy_semundo, CLONE_SYSVSEM)
70  * - There are two lists of the pending operations: a per-array list
71  *   and per-semaphore list (stored in the array). This allows to achieve FIFO
72  *   ordering without always scanning all pending operations.
73  *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
74  */
75 
76 #include <linux/slab.h>
77 #include <linux/spinlock.h>
78 #include <linux/init.h>
79 #include <linux/proc_fs.h>
80 #include <linux/time.h>
81 #include <linux/security.h>
82 #include <linux/syscalls.h>
83 #include <linux/audit.h>
84 #include <linux/capability.h>
85 #include <linux/seq_file.h>
86 #include <linux/rwsem.h>
87 #include <linux/nsproxy.h>
88 #include <linux/ipc_namespace.h>
89 
90 #include <asm/uaccess.h>
91 #include "util.h"
92 
93 /* One semaphore structure for each semaphore in the system. */
94 struct sem {
95 	int	semval;		/* current value */
96 	int	sempid;		/* pid of last operation */
97 	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
98 	struct list_head pending_alter; /* pending single-sop operations */
99 					/* that alter the semaphore */
100 	struct list_head pending_const; /* pending single-sop operations */
101 					/* that do not alter the semaphore*/
102 	time_t	sem_otime;	/* candidate for sem_otime */
103 } ____cacheline_aligned_in_smp;
104 
105 /* One queue for each sleeping process in the system. */
106 struct sem_queue {
107 	struct list_head	list;	 /* queue of pending operations */
108 	struct task_struct	*sleeper; /* this process */
109 	struct sem_undo		*undo;	 /* undo structure */
110 	int			pid;	 /* process id of requesting process */
111 	int			status;	 /* completion status of operation */
112 	struct sembuf		*sops;	 /* array of pending operations */
113 	int			nsops;	 /* number of operations */
114 	int			alter;	 /* does *sops alter the array? */
115 };
116 
117 /* Each task has a list of undo requests. They are executed automatically
118  * when the process exits.
119  */
120 struct sem_undo {
121 	struct list_head	list_proc;	/* per-process list: *
122 						 * all undos from one process
123 						 * rcu protected */
124 	struct rcu_head		rcu;		/* rcu struct for sem_undo */
125 	struct sem_undo_list	*ulp;		/* back ptr to sem_undo_list */
126 	struct list_head	list_id;	/* per semaphore array list:
127 						 * all undos for one array */
128 	int			semid;		/* semaphore set identifier */
129 	short			*semadj;	/* array of adjustments */
130 						/* one per semaphore */
131 };
132 
133 /* sem_undo_list controls shared access to the list of sem_undo structures
134  * that may be shared among all a CLONE_SYSVSEM task group.
135  */
136 struct sem_undo_list {
137 	atomic_t		refcnt;
138 	spinlock_t		lock;
139 	struct list_head	list_proc;
140 };
141 
142 
143 #define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])
144 
145 #define sem_checkid(sma, semid)	ipc_checkid(&sma->sem_perm, semid)
146 
147 static int newary(struct ipc_namespace *, struct ipc_params *);
148 static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
149 #ifdef CONFIG_PROC_FS
150 static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
151 #endif
152 
153 #define SEMMSL_FAST	256 /* 512 bytes on stack */
154 #define SEMOPM_FAST	64  /* ~ 372 bytes on stack */
155 
156 /*
157  * Locking:
158  *	sem_undo.id_next,
159  *	sem_array.complex_count,
160  *	sem_array.pending{_alter,_cont},
161  *	sem_array.sem_undo: global sem_lock() for read/write
162  *	sem_undo.proc_next: only "current" is allowed to read/write that field.
163  *
164  *	sem_array.sem_base[i].pending_{const,alter}:
165  *		global or semaphore sem_lock() for read/write
166  */
167 
168 #define sc_semmsl	sem_ctls[0]
169 #define sc_semmns	sem_ctls[1]
170 #define sc_semopm	sem_ctls[2]
171 #define sc_semmni	sem_ctls[3]
172 
173 void sem_init_ns(struct ipc_namespace *ns)
174 {
175 	ns->sc_semmsl = SEMMSL;
176 	ns->sc_semmns = SEMMNS;
177 	ns->sc_semopm = SEMOPM;
178 	ns->sc_semmni = SEMMNI;
179 	ns->used_sems = 0;
180 	ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
181 }
182 
183 #ifdef CONFIG_IPC_NS
184 void sem_exit_ns(struct ipc_namespace *ns)
185 {
186 	free_ipcs(ns, &sem_ids(ns), freeary);
187 	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
188 }
189 #endif
190 
191 void __init sem_init (void)
192 {
193 	sem_init_ns(&init_ipc_ns);
194 	ipc_init_proc_interface("sysvipc/sem",
195 				"       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
196 				IPC_SEM_IDS, sysvipc_sem_proc_show);
197 }
198 
199 /**
200  * unmerge_queues - unmerge queues, if possible.
201  * @sma: semaphore array
202  *
203  * The function unmerges the wait queues if complex_count is 0.
204  * It must be called prior to dropping the global semaphore array lock.
205  */
206 static void unmerge_queues(struct sem_array *sma)
207 {
208 	struct sem_queue *q, *tq;
209 
210 	/* complex operations still around? */
211 	if (sma->complex_count)
212 		return;
213 	/*
214 	 * We will switch back to simple mode.
215 	 * Move all pending operation back into the per-semaphore
216 	 * queues.
217 	 */
218 	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
219 		struct sem *curr;
220 		curr = &sma->sem_base[q->sops[0].sem_num];
221 
222 		list_add_tail(&q->list, &curr->pending_alter);
223 	}
224 	INIT_LIST_HEAD(&sma->pending_alter);
225 }
226 
227 /**
228  * merge_queues - Merge single semop queues into global queue
229  * @sma: semaphore array
230  *
231  * This function merges all per-semaphore queues into the global queue.
232  * It is necessary to achieve FIFO ordering for the pending single-sop
233  * operations when a multi-semop operation must sleep.
234  * Only the alter operations must be moved, the const operations can stay.
235  */
236 static void merge_queues(struct sem_array *sma)
237 {
238 	int i;
239 	for (i = 0; i < sma->sem_nsems; i++) {
240 		struct sem *sem = sma->sem_base + i;
241 
242 		list_splice_init(&sem->pending_alter, &sma->pending_alter);
243 	}
244 }
245 
246 static void sem_rcu_free(struct rcu_head *head)
247 {
248 	struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
249 	struct sem_array *sma = ipc_rcu_to_struct(p);
250 
251 	security_sem_free(sma);
252 	ipc_rcu_free(head);
253 }
254 
255 /*
256  * Wait until all currently ongoing simple ops have completed.
257  * Caller must own sem_perm.lock.
258  * New simple ops cannot start, because simple ops first check
259  * that sem_perm.lock is free.
260  * that a) sem_perm.lock is free and b) complex_count is 0.
261  */
262 static void sem_wait_array(struct sem_array *sma)
263 {
264 	int i;
265 	struct sem *sem;
266 
267 	if (sma->complex_count)  {
268 		/* The thread that increased sma->complex_count waited on
269 		 * all sem->lock locks. Thus we don't need to wait again.
270 		 */
271 		return;
272 	}
273 
274 	for (i = 0; i < sma->sem_nsems; i++) {
275 		sem = sma->sem_base + i;
276 		spin_unlock_wait(&sem->lock);
277 	}
278 }
279 
280 /*
281  * If the request contains only one semaphore operation, and there are
282  * no complex transactions pending, lock only the semaphore involved.
283  * Otherwise, lock the entire semaphore array, since we either have
284  * multiple semaphores in our own semops, or we need to look at
285  * semaphores from other pending complex operations.
286  */
287 static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
288 			      int nsops)
289 {
290 	struct sem *sem;
291 
292 	if (nsops != 1) {
293 		/* Complex operation - acquire a full lock */
294 		ipc_lock_object(&sma->sem_perm);
295 
296 		/* And wait until all simple ops that are processed
297 		 * right now have dropped their locks.
298 		 */
299 		sem_wait_array(sma);
300 		return -1;
301 	}
302 
303 	/*
304 	 * Only one semaphore affected - try to optimize locking.
305 	 * The rules are:
306 	 * - optimized locking is possible if no complex operation
307 	 *   is either enqueued or processed right now.
308 	 * - The test for enqueued complex ops is simple:
309 	 *      sma->complex_count != 0
310 	 * - Testing for complex ops that are processed right now is
311 	 *   a bit more difficult. Complex ops acquire the full lock
312 	 *   and first wait that the running simple ops have completed.
313 	 *   (see above)
314 	 *   Thus: If we own a simple lock and the global lock is free
315 	 *	and complex_count is now 0, then it will stay 0 and
316 	 *	thus just locking sem->lock is sufficient.
317 	 */
318 	sem = sma->sem_base + sops->sem_num;
319 
320 	if (sma->complex_count == 0) {
321 		/*
322 		 * It appears that no complex operation is around.
323 		 * Acquire the per-semaphore lock.
324 		 */
325 		spin_lock(&sem->lock);
326 
327 		/* Then check that the global lock is free */
328 		if (!spin_is_locked(&sma->sem_perm.lock)) {
329 			/* spin_is_locked() is not a memory barrier */
330 			smp_mb();
331 
332 			/* Now repeat the test of complex_count:
333 			 * It can't change anymore until we drop sem->lock.
334 			 * Thus: if is now 0, then it will stay 0.
335 			 */
336 			if (sma->complex_count == 0) {
337 				/* fast path successful! */
338 				return sops->sem_num;
339 			}
340 		}
341 		spin_unlock(&sem->lock);
342 	}
343 
344 	/* slow path: acquire the full lock */
345 	ipc_lock_object(&sma->sem_perm);
346 
347 	if (sma->complex_count == 0) {
348 		/* False alarm:
349 		 * There is no complex operation, thus we can switch
350 		 * back to the fast path.
351 		 */
352 		spin_lock(&sem->lock);
353 		ipc_unlock_object(&sma->sem_perm);
354 		return sops->sem_num;
355 	} else {
356 		/* Not a false alarm, thus complete the sequence for a
357 		 * full lock.
358 		 */
359 		sem_wait_array(sma);
360 		return -1;
361 	}
362 }
363 
364 static inline void sem_unlock(struct sem_array *sma, int locknum)
365 {
366 	if (locknum == -1) {
367 		unmerge_queues(sma);
368 		ipc_unlock_object(&sma->sem_perm);
369 	} else {
370 		struct sem *sem = sma->sem_base + locknum;
371 		spin_unlock(&sem->lock);
372 	}
373 }
374 
375 /*
376  * sem_lock_(check_) routines are called in the paths where the rwsem
377  * is not held.
378  *
379  * The caller holds the RCU read lock.
380  */
381 static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
382 			int id, struct sembuf *sops, int nsops, int *locknum)
383 {
384 	struct kern_ipc_perm *ipcp;
385 	struct sem_array *sma;
386 
387 	ipcp = ipc_obtain_object(&sem_ids(ns), id);
388 	if (IS_ERR(ipcp))
389 		return ERR_CAST(ipcp);
390 
391 	sma = container_of(ipcp, struct sem_array, sem_perm);
392 	*locknum = sem_lock(sma, sops, nsops);
393 
394 	/* ipc_rmid() may have already freed the ID while sem_lock
395 	 * was spinning: verify that the structure is still valid
396 	 */
397 	if (!ipcp->deleted)
398 		return container_of(ipcp, struct sem_array, sem_perm);
399 
400 	sem_unlock(sma, *locknum);
401 	return ERR_PTR(-EINVAL);
402 }
403 
404 static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
405 {
406 	struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id);
407 
408 	if (IS_ERR(ipcp))
409 		return ERR_CAST(ipcp);
410 
411 	return container_of(ipcp, struct sem_array, sem_perm);
412 }
413 
414 static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
415 							int id)
416 {
417 	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
418 
419 	if (IS_ERR(ipcp))
420 		return ERR_CAST(ipcp);
421 
422 	return container_of(ipcp, struct sem_array, sem_perm);
423 }
424 
425 static inline void sem_lock_and_putref(struct sem_array *sma)
426 {
427 	sem_lock(sma, NULL, -1);
428 	ipc_rcu_putref(sma, ipc_rcu_free);
429 }
430 
431 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
432 {
433 	ipc_rmid(&sem_ids(ns), &s->sem_perm);
434 }
435 
436 /*
437  * Lockless wakeup algorithm:
438  * Without the check/retry algorithm a lockless wakeup is possible:
439  * - queue.status is initialized to -EINTR before blocking.
440  * - wakeup is performed by
441  *	* unlinking the queue entry from the pending list
442  *	* setting queue.status to IN_WAKEUP
443  *	  This is the notification for the blocked thread that a
444  *	  result value is imminent.
445  *	* call wake_up_process
446  *	* set queue.status to the final value.
447  * - the previously blocked thread checks queue.status:
448  *   	* if it's IN_WAKEUP, then it must wait until the value changes
449  *   	* if it's not -EINTR, then the operation was completed by
450  *   	  update_queue. semtimedop can return queue.status without
451  *   	  performing any operation on the sem array.
452  *   	* otherwise it must acquire the spinlock and check what's up.
453  *
454  * The two-stage algorithm is necessary to protect against the following
455  * races:
456  * - if queue.status is set after wake_up_process, then the woken up idle
457  *   thread could race forward and try (and fail) to acquire sma->lock
458  *   before update_queue had a chance to set queue.status
459  * - if queue.status is written before wake_up_process and if the
460  *   blocked process is woken up by a signal between writing
461  *   queue.status and the wake_up_process, then the woken up
462  *   process could return from semtimedop and die by calling
463  *   sys_exit before wake_up_process is called. Then wake_up_process
464  *   will oops, because the task structure is already invalid.
465  *   (yes, this happened on s390 with sysv msg).
466  *
467  */
468 #define IN_WAKEUP	1
469 
470 /**
471  * newary - Create a new semaphore set
472  * @ns: namespace
473  * @params: ptr to the structure that contains key, semflg and nsems
474  *
475  * Called with sem_ids.rwsem held (as a writer)
476  */
477 
478 static int newary(struct ipc_namespace *ns, struct ipc_params *params)
479 {
480 	int id;
481 	int retval;
482 	struct sem_array *sma;
483 	int size;
484 	key_t key = params->key;
485 	int nsems = params->u.nsems;
486 	int semflg = params->flg;
487 	int i;
488 
489 	if (!nsems)
490 		return -EINVAL;
491 	if (ns->used_sems + nsems > ns->sc_semmns)
492 		return -ENOSPC;
493 
494 	size = sizeof (*sma) + nsems * sizeof (struct sem);
495 	sma = ipc_rcu_alloc(size);
496 	if (!sma) {
497 		return -ENOMEM;
498 	}
499 	memset (sma, 0, size);
500 
501 	sma->sem_perm.mode = (semflg & S_IRWXUGO);
502 	sma->sem_perm.key = key;
503 
504 	sma->sem_perm.security = NULL;
505 	retval = security_sem_alloc(sma);
506 	if (retval) {
507 		ipc_rcu_putref(sma, ipc_rcu_free);
508 		return retval;
509 	}
510 
511 	id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
512 	if (id < 0) {
513 		ipc_rcu_putref(sma, sem_rcu_free);
514 		return id;
515 	}
516 	ns->used_sems += nsems;
517 
518 	sma->sem_base = (struct sem *) &sma[1];
519 
520 	for (i = 0; i < nsems; i++) {
521 		INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
522 		INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
523 		spin_lock_init(&sma->sem_base[i].lock);
524 	}
525 
526 	sma->complex_count = 0;
527 	INIT_LIST_HEAD(&sma->pending_alter);
528 	INIT_LIST_HEAD(&sma->pending_const);
529 	INIT_LIST_HEAD(&sma->list_id);
530 	sma->sem_nsems = nsems;
531 	sma->sem_ctime = get_seconds();
532 	sem_unlock(sma, -1);
533 	rcu_read_unlock();
534 
535 	return sma->sem_perm.id;
536 }
537 
538 
539 /*
540  * Called with sem_ids.rwsem and ipcp locked.
541  */
542 static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
543 {
544 	struct sem_array *sma;
545 
546 	sma = container_of(ipcp, struct sem_array, sem_perm);
547 	return security_sem_associate(sma, semflg);
548 }
549 
550 /*
551  * Called with sem_ids.rwsem and ipcp locked.
552  */
553 static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
554 				struct ipc_params *params)
555 {
556 	struct sem_array *sma;
557 
558 	sma = container_of(ipcp, struct sem_array, sem_perm);
559 	if (params->u.nsems > sma->sem_nsems)
560 		return -EINVAL;
561 
562 	return 0;
563 }
564 
565 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
566 {
567 	struct ipc_namespace *ns;
568 	struct ipc_ops sem_ops;
569 	struct ipc_params sem_params;
570 
571 	ns = current->nsproxy->ipc_ns;
572 
573 	if (nsems < 0 || nsems > ns->sc_semmsl)
574 		return -EINVAL;
575 
576 	sem_ops.getnew = newary;
577 	sem_ops.associate = sem_security;
578 	sem_ops.more_checks = sem_more_checks;
579 
580 	sem_params.key = key;
581 	sem_params.flg = semflg;
582 	sem_params.u.nsems = nsems;
583 
584 	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
585 }
586 
587 /** perform_atomic_semop - Perform (if possible) a semaphore operation
588  * @sma: semaphore array
589  * @sops: array with operations that should be checked
590  * @nsems: number of sops
591  * @un: undo array
592  * @pid: pid that did the change
593  *
594  * Returns 0 if the operation was possible.
595  * Returns 1 if the operation is impossible, the caller must sleep.
596  * Negative values are error codes.
597  */
598 
599 static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
600 			     int nsops, struct sem_undo *un, int pid)
601 {
602 	int result, sem_op;
603 	struct sembuf *sop;
604 	struct sem * curr;
605 
606 	for (sop = sops; sop < sops + nsops; sop++) {
607 		curr = sma->sem_base + sop->sem_num;
608 		sem_op = sop->sem_op;
609 		result = curr->semval;
610 
611 		if (!sem_op && result)
612 			goto would_block;
613 
614 		result += sem_op;
615 		if (result < 0)
616 			goto would_block;
617 		if (result > SEMVMX)
618 			goto out_of_range;
619 		if (sop->sem_flg & SEM_UNDO) {
620 			int undo = un->semadj[sop->sem_num] - sem_op;
621 			/*
622 	 		 *	Exceeding the undo range is an error.
623 			 */
624 			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
625 				goto out_of_range;
626 		}
627 		curr->semval = result;
628 	}
629 
630 	sop--;
631 	while (sop >= sops) {
632 		sma->sem_base[sop->sem_num].sempid = pid;
633 		if (sop->sem_flg & SEM_UNDO)
634 			un->semadj[sop->sem_num] -= sop->sem_op;
635 		sop--;
636 	}
637 
638 	return 0;
639 
640 out_of_range:
641 	result = -ERANGE;
642 	goto undo;
643 
644 would_block:
645 	if (sop->sem_flg & IPC_NOWAIT)
646 		result = -EAGAIN;
647 	else
648 		result = 1;
649 
650 undo:
651 	sop--;
652 	while (sop >= sops) {
653 		sma->sem_base[sop->sem_num].semval -= sop->sem_op;
654 		sop--;
655 	}
656 
657 	return result;
658 }
659 
660 /** wake_up_sem_queue_prepare(q, error): Prepare wake-up
661  * @q: queue entry that must be signaled
662  * @error: Error value for the signal
663  *
664  * Prepare the wake-up of the queue entry q.
665  */
666 static void wake_up_sem_queue_prepare(struct list_head *pt,
667 				struct sem_queue *q, int error)
668 {
669 	if (list_empty(pt)) {
670 		/*
671 		 * Hold preempt off so that we don't get preempted and have the
672 		 * wakee busy-wait until we're scheduled back on.
673 		 */
674 		preempt_disable();
675 	}
676 	q->status = IN_WAKEUP;
677 	q->pid = error;
678 
679 	list_add_tail(&q->list, pt);
680 }
681 
682 /**
683  * wake_up_sem_queue_do(pt) - do the actual wake-up
684  * @pt: list of tasks to be woken up
685  *
686  * Do the actual wake-up.
687  * The function is called without any locks held, thus the semaphore array
688  * could be destroyed already and the tasks can disappear as soon as the
689  * status is set to the actual return code.
690  */
691 static void wake_up_sem_queue_do(struct list_head *pt)
692 {
693 	struct sem_queue *q, *t;
694 	int did_something;
695 
696 	did_something = !list_empty(pt);
697 	list_for_each_entry_safe(q, t, pt, list) {
698 		wake_up_process(q->sleeper);
699 		/* q can disappear immediately after writing q->status. */
700 		smp_wmb();
701 		q->status = q->pid;
702 	}
703 	if (did_something)
704 		preempt_enable();
705 }
706 
707 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
708 {
709 	list_del(&q->list);
710 	if (q->nsops > 1)
711 		sma->complex_count--;
712 }
713 
714 /** check_restart(sma, q)
715  * @sma: semaphore array
716  * @q: the operation that just completed
717  *
718  * update_queue is O(N^2) when it restarts scanning the whole queue of
719  * waiting operations. Therefore this function checks if the restart is
720  * really necessary. It is called after a previously waiting operation
721  * modified the array.
722  * Note that wait-for-zero operations are handled without restart.
723  */
724 static int check_restart(struct sem_array *sma, struct sem_queue *q)
725 {
726 	/* pending complex alter operations are too difficult to analyse */
727 	if (!list_empty(&sma->pending_alter))
728 		return 1;
729 
730 	/* we were a sleeping complex operation. Too difficult */
731 	if (q->nsops > 1)
732 		return 1;
733 
734 	/* It is impossible that someone waits for the new value:
735 	 * - complex operations always restart.
736 	 * - wait-for-zero are handled seperately.
737 	 * - q is a previously sleeping simple operation that
738 	 *   altered the array. It must be a decrement, because
739 	 *   simple increments never sleep.
740 	 * - If there are older (higher priority) decrements
741 	 *   in the queue, then they have observed the original
742 	 *   semval value and couldn't proceed. The operation
743 	 *   decremented to value - thus they won't proceed either.
744 	 */
745 	return 0;
746 }
747 
748 /**
749  * wake_const_ops(sma, semnum, pt) - Wake up non-alter tasks
750  * @sma: semaphore array.
751  * @semnum: semaphore that was modified.
752  * @pt: list head for the tasks that must be woken up.
753  *
754  * wake_const_ops must be called after a semaphore in a semaphore array
755  * was set to 0. If complex const operations are pending, wake_const_ops must
756  * be called with semnum = -1, as well as with the number of each modified
757  * semaphore.
758  * The tasks that must be woken up are added to @pt. The return code
759  * is stored in q->pid.
760  * The function returns 1 if at least one operation was completed successfully.
761  */
762 static int wake_const_ops(struct sem_array *sma, int semnum,
763 				struct list_head *pt)
764 {
765 	struct sem_queue *q;
766 	struct list_head *walk;
767 	struct list_head *pending_list;
768 	int semop_completed = 0;
769 
770 	if (semnum == -1)
771 		pending_list = &sma->pending_const;
772 	else
773 		pending_list = &sma->sem_base[semnum].pending_const;
774 
775 	walk = pending_list->next;
776 	while (walk != pending_list) {
777 		int error;
778 
779 		q = container_of(walk, struct sem_queue, list);
780 		walk = walk->next;
781 
782 		error = perform_atomic_semop(sma, q->sops, q->nsops,
783 						 q->undo, q->pid);
784 
785 		if (error <= 0) {
786 			/* operation completed, remove from queue & wakeup */
787 
788 			unlink_queue(sma, q);
789 
790 			wake_up_sem_queue_prepare(pt, q, error);
791 			if (error == 0)
792 				semop_completed = 1;
793 		}
794 	}
795 	return semop_completed;
796 }
797 
798 /**
799  * do_smart_wakeup_zero(sma, sops, nsops, pt) - wakeup all wait for zero tasks
800  * @sma: semaphore array
801  * @sops: operations that were performed
802  * @nsops: number of operations
803  * @pt: list head of the tasks that must be woken up.
804  *
805  * do_smart_wakeup_zero() checks all required queue for wait-for-zero
806  * operations, based on the actual changes that were performed on the
807  * semaphore array.
808  * The function returns 1 if at least one operation was completed successfully.
809  */
810 static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
811 					int nsops, struct list_head *pt)
812 {
813 	int i;
814 	int semop_completed = 0;
815 	int got_zero = 0;
816 
817 	/* first: the per-semaphore queues, if known */
818 	if (sops) {
819 		for (i = 0; i < nsops; i++) {
820 			int num = sops[i].sem_num;
821 
822 			if (sma->sem_base[num].semval == 0) {
823 				got_zero = 1;
824 				semop_completed |= wake_const_ops(sma, num, pt);
825 			}
826 		}
827 	} else {
828 		/*
829 		 * No sops means modified semaphores not known.
830 		 * Assume all were changed.
831 		 */
832 		for (i = 0; i < sma->sem_nsems; i++) {
833 			if (sma->sem_base[i].semval == 0) {
834 				got_zero = 1;
835 				semop_completed |= wake_const_ops(sma, i, pt);
836 			}
837 		}
838 	}
839 	/*
840 	 * If one of the modified semaphores got 0,
841 	 * then check the global queue, too.
842 	 */
843 	if (got_zero)
844 		semop_completed |= wake_const_ops(sma, -1, pt);
845 
846 	return semop_completed;
847 }
848 
849 
850 /**
851  * update_queue(sma, semnum): Look for tasks that can be completed.
852  * @sma: semaphore array.
853  * @semnum: semaphore that was modified.
854  * @pt: list head for the tasks that must be woken up.
855  *
856  * update_queue must be called after a semaphore in a semaphore array
857  * was modified. If multiple semaphores were modified, update_queue must
858  * be called with semnum = -1, as well as with the number of each modified
859  * semaphore.
860  * The tasks that must be woken up are added to @pt. The return code
861  * is stored in q->pid.
862  * The function internally checks if const operations can now succeed.
863  *
864  * The function return 1 if at least one semop was completed successfully.
865  */
866 static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
867 {
868 	struct sem_queue *q;
869 	struct list_head *walk;
870 	struct list_head *pending_list;
871 	int semop_completed = 0;
872 
873 	if (semnum == -1)
874 		pending_list = &sma->pending_alter;
875 	else
876 		pending_list = &sma->sem_base[semnum].pending_alter;
877 
878 again:
879 	walk = pending_list->next;
880 	while (walk != pending_list) {
881 		int error, restart;
882 
883 		q = container_of(walk, struct sem_queue, list);
884 		walk = walk->next;
885 
886 		/* If we are scanning the single sop, per-semaphore list of
887 		 * one semaphore and that semaphore is 0, then it is not
888 		 * necessary to scan further: simple increments
889 		 * that affect only one entry succeed immediately and cannot
890 		 * be in the  per semaphore pending queue, and decrements
891 		 * cannot be successful if the value is already 0.
892 		 */
893 		if (semnum != -1 && sma->sem_base[semnum].semval == 0)
894 			break;
895 
896 		error = perform_atomic_semop(sma, q->sops, q->nsops,
897 					 q->undo, q->pid);
898 
899 		/* Does q->sleeper still need to sleep? */
900 		if (error > 0)
901 			continue;
902 
903 		unlink_queue(sma, q);
904 
905 		if (error) {
906 			restart = 0;
907 		} else {
908 			semop_completed = 1;
909 			do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
910 			restart = check_restart(sma, q);
911 		}
912 
913 		wake_up_sem_queue_prepare(pt, q, error);
914 		if (restart)
915 			goto again;
916 	}
917 	return semop_completed;
918 }
919 
920 /**
921  * set_semotime(sma, sops) - set sem_otime
922  * @sma: semaphore array
923  * @sops: operations that modified the array, may be NULL
924  *
925  * sem_otime is replicated to avoid cache line trashing.
926  * This function sets one instance to the current time.
927  */
928 static void set_semotime(struct sem_array *sma, struct sembuf *sops)
929 {
930 	if (sops == NULL) {
931 		sma->sem_base[0].sem_otime = get_seconds();
932 	} else {
933 		sma->sem_base[sops[0].sem_num].sem_otime =
934 							get_seconds();
935 	}
936 }
937 
938 /**
939  * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
940  * @sma: semaphore array
941  * @sops: operations that were performed
942  * @nsops: number of operations
943  * @otime: force setting otime
944  * @pt: list head of the tasks that must be woken up.
945  *
946  * do_smart_update() does the required calls to update_queue and wakeup_zero,
947  * based on the actual changes that were performed on the semaphore array.
948  * Note that the function does not do the actual wake-up: the caller is
949  * responsible for calling wake_up_sem_queue_do(@pt).
950  * It is safe to perform this call after dropping all locks.
951  */
952 static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
953 			int otime, struct list_head *pt)
954 {
955 	int i;
956 
957 	otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
958 
959 	if (!list_empty(&sma->pending_alter)) {
960 		/* semaphore array uses the global queue - just process it. */
961 		otime |= update_queue(sma, -1, pt);
962 	} else {
963 		if (!sops) {
964 			/*
965 			 * No sops, thus the modified semaphores are not
966 			 * known. Check all.
967 			 */
968 			for (i = 0; i < sma->sem_nsems; i++)
969 				otime |= update_queue(sma, i, pt);
970 		} else {
971 			/*
972 			 * Check the semaphores that were increased:
973 			 * - No complex ops, thus all sleeping ops are
974 			 *   decrease.
975 			 * - if we decreased the value, then any sleeping
976 			 *   semaphore ops wont be able to run: If the
977 			 *   previous value was too small, then the new
978 			 *   value will be too small, too.
979 			 */
980 			for (i = 0; i < nsops; i++) {
981 				if (sops[i].sem_op > 0) {
982 					otime |= update_queue(sma,
983 							sops[i].sem_num, pt);
984 				}
985 			}
986 		}
987 	}
988 	if (otime)
989 		set_semotime(sma, sops);
990 }
991 
992 /* The following counts are associated to each semaphore:
993  *   semncnt        number of tasks waiting on semval being nonzero
994  *   semzcnt        number of tasks waiting on semval being zero
995  * This model assumes that a task waits on exactly one semaphore.
996  * Since semaphore operations are to be performed atomically, tasks actually
997  * wait on a whole sequence of semaphores simultaneously.
998  * The counts we return here are a rough approximation, but still
999  * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
1000  */
1001 static int count_semncnt (struct sem_array * sma, ushort semnum)
1002 {
1003 	int semncnt;
1004 	struct sem_queue * q;
1005 
1006 	semncnt = 0;
1007 	list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) {
1008 		struct sembuf * sops = q->sops;
1009 		BUG_ON(sops->sem_num != semnum);
1010 		if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
1011 			semncnt++;
1012 	}
1013 
1014 	list_for_each_entry(q, &sma->pending_alter, list) {
1015 		struct sembuf * sops = q->sops;
1016 		int nsops = q->nsops;
1017 		int i;
1018 		for (i = 0; i < nsops; i++)
1019 			if (sops[i].sem_num == semnum
1020 			    && (sops[i].sem_op < 0)
1021 			    && !(sops[i].sem_flg & IPC_NOWAIT))
1022 				semncnt++;
1023 	}
1024 	return semncnt;
1025 }
1026 
1027 static int count_semzcnt (struct sem_array * sma, ushort semnum)
1028 {
1029 	int semzcnt;
1030 	struct sem_queue * q;
1031 
1032 	semzcnt = 0;
1033 	list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) {
1034 		struct sembuf * sops = q->sops;
1035 		BUG_ON(sops->sem_num != semnum);
1036 		if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
1037 			semzcnt++;
1038 	}
1039 
1040 	list_for_each_entry(q, &sma->pending_const, list) {
1041 		struct sembuf * sops = q->sops;
1042 		int nsops = q->nsops;
1043 		int i;
1044 		for (i = 0; i < nsops; i++)
1045 			if (sops[i].sem_num == semnum
1046 			    && (sops[i].sem_op == 0)
1047 			    && !(sops[i].sem_flg & IPC_NOWAIT))
1048 				semzcnt++;
1049 	}
1050 	return semzcnt;
1051 }
1052 
1053 /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1054  * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1055  * remains locked on exit.
1056  */
1057 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1058 {
1059 	struct sem_undo *un, *tu;
1060 	struct sem_queue *q, *tq;
1061 	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1062 	struct list_head tasks;
1063 	int i;
1064 
1065 	/* Free the existing undo structures for this semaphore set.  */
1066 	ipc_assert_locked_object(&sma->sem_perm);
1067 	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1068 		list_del(&un->list_id);
1069 		spin_lock(&un->ulp->lock);
1070 		un->semid = -1;
1071 		list_del_rcu(&un->list_proc);
1072 		spin_unlock(&un->ulp->lock);
1073 		kfree_rcu(un, rcu);
1074 	}
1075 
1076 	/* Wake up all pending processes and let them fail with EIDRM. */
1077 	INIT_LIST_HEAD(&tasks);
1078 	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1079 		unlink_queue(sma, q);
1080 		wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1081 	}
1082 
1083 	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1084 		unlink_queue(sma, q);
1085 		wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1086 	}
1087 	for (i = 0; i < sma->sem_nsems; i++) {
1088 		struct sem *sem = sma->sem_base + i;
1089 		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1090 			unlink_queue(sma, q);
1091 			wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1092 		}
1093 		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1094 			unlink_queue(sma, q);
1095 			wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1096 		}
1097 	}
1098 
1099 	/* Remove the semaphore set from the IDR */
1100 	sem_rmid(ns, sma);
1101 	sem_unlock(sma, -1);
1102 	rcu_read_unlock();
1103 
1104 	wake_up_sem_queue_do(&tasks);
1105 	ns->used_sems -= sma->sem_nsems;
1106 	ipc_rcu_putref(sma, sem_rcu_free);
1107 }
1108 
1109 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1110 {
1111 	switch(version) {
1112 	case IPC_64:
1113 		return copy_to_user(buf, in, sizeof(*in));
1114 	case IPC_OLD:
1115 	    {
1116 		struct semid_ds out;
1117 
1118 		memset(&out, 0, sizeof(out));
1119 
1120 		ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1121 
1122 		out.sem_otime	= in->sem_otime;
1123 		out.sem_ctime	= in->sem_ctime;
1124 		out.sem_nsems	= in->sem_nsems;
1125 
1126 		return copy_to_user(buf, &out, sizeof(out));
1127 	    }
1128 	default:
1129 		return -EINVAL;
1130 	}
1131 }
1132 
1133 static time_t get_semotime(struct sem_array *sma)
1134 {
1135 	int i;
1136 	time_t res;
1137 
1138 	res = sma->sem_base[0].sem_otime;
1139 	for (i = 1; i < sma->sem_nsems; i++) {
1140 		time_t to = sma->sem_base[i].sem_otime;
1141 
1142 		if (to > res)
1143 			res = to;
1144 	}
1145 	return res;
1146 }
1147 
1148 static int semctl_nolock(struct ipc_namespace *ns, int semid,
1149 			 int cmd, int version, void __user *p)
1150 {
1151 	int err;
1152 	struct sem_array *sma;
1153 
1154 	switch(cmd) {
1155 	case IPC_INFO:
1156 	case SEM_INFO:
1157 	{
1158 		struct seminfo seminfo;
1159 		int max_id;
1160 
1161 		err = security_sem_semctl(NULL, cmd);
1162 		if (err)
1163 			return err;
1164 
1165 		memset(&seminfo,0,sizeof(seminfo));
1166 		seminfo.semmni = ns->sc_semmni;
1167 		seminfo.semmns = ns->sc_semmns;
1168 		seminfo.semmsl = ns->sc_semmsl;
1169 		seminfo.semopm = ns->sc_semopm;
1170 		seminfo.semvmx = SEMVMX;
1171 		seminfo.semmnu = SEMMNU;
1172 		seminfo.semmap = SEMMAP;
1173 		seminfo.semume = SEMUME;
1174 		down_read(&sem_ids(ns).rwsem);
1175 		if (cmd == SEM_INFO) {
1176 			seminfo.semusz = sem_ids(ns).in_use;
1177 			seminfo.semaem = ns->used_sems;
1178 		} else {
1179 			seminfo.semusz = SEMUSZ;
1180 			seminfo.semaem = SEMAEM;
1181 		}
1182 		max_id = ipc_get_maxid(&sem_ids(ns));
1183 		up_read(&sem_ids(ns).rwsem);
1184 		if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1185 			return -EFAULT;
1186 		return (max_id < 0) ? 0: max_id;
1187 	}
1188 	case IPC_STAT:
1189 	case SEM_STAT:
1190 	{
1191 		struct semid64_ds tbuf;
1192 		int id = 0;
1193 
1194 		memset(&tbuf, 0, sizeof(tbuf));
1195 
1196 		rcu_read_lock();
1197 		if (cmd == SEM_STAT) {
1198 			sma = sem_obtain_object(ns, semid);
1199 			if (IS_ERR(sma)) {
1200 				err = PTR_ERR(sma);
1201 				goto out_unlock;
1202 			}
1203 			id = sma->sem_perm.id;
1204 		} else {
1205 			sma = sem_obtain_object_check(ns, semid);
1206 			if (IS_ERR(sma)) {
1207 				err = PTR_ERR(sma);
1208 				goto out_unlock;
1209 			}
1210 		}
1211 
1212 		err = -EACCES;
1213 		if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1214 			goto out_unlock;
1215 
1216 		err = security_sem_semctl(sma, cmd);
1217 		if (err)
1218 			goto out_unlock;
1219 
1220 		kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1221 		tbuf.sem_otime = get_semotime(sma);
1222 		tbuf.sem_ctime = sma->sem_ctime;
1223 		tbuf.sem_nsems = sma->sem_nsems;
1224 		rcu_read_unlock();
1225 		if (copy_semid_to_user(p, &tbuf, version))
1226 			return -EFAULT;
1227 		return id;
1228 	}
1229 	default:
1230 		return -EINVAL;
1231 	}
1232 out_unlock:
1233 	rcu_read_unlock();
1234 	return err;
1235 }
1236 
1237 static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1238 		unsigned long arg)
1239 {
1240 	struct sem_undo *un;
1241 	struct sem_array *sma;
1242 	struct sem* curr;
1243 	int err;
1244 	struct list_head tasks;
1245 	int val;
1246 #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1247 	/* big-endian 64bit */
1248 	val = arg >> 32;
1249 #else
1250 	/* 32bit or little-endian 64bit */
1251 	val = arg;
1252 #endif
1253 
1254 	if (val > SEMVMX || val < 0)
1255 		return -ERANGE;
1256 
1257 	INIT_LIST_HEAD(&tasks);
1258 
1259 	rcu_read_lock();
1260 	sma = sem_obtain_object_check(ns, semid);
1261 	if (IS_ERR(sma)) {
1262 		rcu_read_unlock();
1263 		return PTR_ERR(sma);
1264 	}
1265 
1266 	if (semnum < 0 || semnum >= sma->sem_nsems) {
1267 		rcu_read_unlock();
1268 		return -EINVAL;
1269 	}
1270 
1271 
1272 	if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1273 		rcu_read_unlock();
1274 		return -EACCES;
1275 	}
1276 
1277 	err = security_sem_semctl(sma, SETVAL);
1278 	if (err) {
1279 		rcu_read_unlock();
1280 		return -EACCES;
1281 	}
1282 
1283 	sem_lock(sma, NULL, -1);
1284 
1285 	curr = &sma->sem_base[semnum];
1286 
1287 	ipc_assert_locked_object(&sma->sem_perm);
1288 	list_for_each_entry(un, &sma->list_id, list_id)
1289 		un->semadj[semnum] = 0;
1290 
1291 	curr->semval = val;
1292 	curr->sempid = task_tgid_vnr(current);
1293 	sma->sem_ctime = get_seconds();
1294 	/* maybe some queued-up processes were waiting for this */
1295 	do_smart_update(sma, NULL, 0, 0, &tasks);
1296 	sem_unlock(sma, -1);
1297 	rcu_read_unlock();
1298 	wake_up_sem_queue_do(&tasks);
1299 	return 0;
1300 }
1301 
1302 static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1303 		int cmd, void __user *p)
1304 {
1305 	struct sem_array *sma;
1306 	struct sem* curr;
1307 	int err, nsems;
1308 	ushort fast_sem_io[SEMMSL_FAST];
1309 	ushort* sem_io = fast_sem_io;
1310 	struct list_head tasks;
1311 
1312 	INIT_LIST_HEAD(&tasks);
1313 
1314 	rcu_read_lock();
1315 	sma = sem_obtain_object_check(ns, semid);
1316 	if (IS_ERR(sma)) {
1317 		rcu_read_unlock();
1318 		return PTR_ERR(sma);
1319 	}
1320 
1321 	nsems = sma->sem_nsems;
1322 
1323 	err = -EACCES;
1324 	if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1325 		goto out_rcu_wakeup;
1326 
1327 	err = security_sem_semctl(sma, cmd);
1328 	if (err)
1329 		goto out_rcu_wakeup;
1330 
1331 	err = -EACCES;
1332 	switch (cmd) {
1333 	case GETALL:
1334 	{
1335 		ushort __user *array = p;
1336 		int i;
1337 
1338 		sem_lock(sma, NULL, -1);
1339 		if(nsems > SEMMSL_FAST) {
1340 			if (!ipc_rcu_getref(sma)) {
1341 				sem_unlock(sma, -1);
1342 				rcu_read_unlock();
1343 				err = -EIDRM;
1344 				goto out_free;
1345 			}
1346 			sem_unlock(sma, -1);
1347 			rcu_read_unlock();
1348 			sem_io = ipc_alloc(sizeof(ushort)*nsems);
1349 			if(sem_io == NULL) {
1350 				ipc_rcu_putref(sma, ipc_rcu_free);
1351 				return -ENOMEM;
1352 			}
1353 
1354 			rcu_read_lock();
1355 			sem_lock_and_putref(sma);
1356 			if (sma->sem_perm.deleted) {
1357 				sem_unlock(sma, -1);
1358 				rcu_read_unlock();
1359 				err = -EIDRM;
1360 				goto out_free;
1361 			}
1362 		}
1363 		for (i = 0; i < sma->sem_nsems; i++)
1364 			sem_io[i] = sma->sem_base[i].semval;
1365 		sem_unlock(sma, -1);
1366 		rcu_read_unlock();
1367 		err = 0;
1368 		if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1369 			err = -EFAULT;
1370 		goto out_free;
1371 	}
1372 	case SETALL:
1373 	{
1374 		int i;
1375 		struct sem_undo *un;
1376 
1377 		if (!ipc_rcu_getref(sma)) {
1378 			rcu_read_unlock();
1379 			return -EIDRM;
1380 		}
1381 		rcu_read_unlock();
1382 
1383 		if(nsems > SEMMSL_FAST) {
1384 			sem_io = ipc_alloc(sizeof(ushort)*nsems);
1385 			if(sem_io == NULL) {
1386 				ipc_rcu_putref(sma, ipc_rcu_free);
1387 				return -ENOMEM;
1388 			}
1389 		}
1390 
1391 		if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
1392 			ipc_rcu_putref(sma, ipc_rcu_free);
1393 			err = -EFAULT;
1394 			goto out_free;
1395 		}
1396 
1397 		for (i = 0; i < nsems; i++) {
1398 			if (sem_io[i] > SEMVMX) {
1399 				ipc_rcu_putref(sma, ipc_rcu_free);
1400 				err = -ERANGE;
1401 				goto out_free;
1402 			}
1403 		}
1404 		rcu_read_lock();
1405 		sem_lock_and_putref(sma);
1406 		if (sma->sem_perm.deleted) {
1407 			sem_unlock(sma, -1);
1408 			rcu_read_unlock();
1409 			err = -EIDRM;
1410 			goto out_free;
1411 		}
1412 
1413 		for (i = 0; i < nsems; i++)
1414 			sma->sem_base[i].semval = sem_io[i];
1415 
1416 		ipc_assert_locked_object(&sma->sem_perm);
1417 		list_for_each_entry(un, &sma->list_id, list_id) {
1418 			for (i = 0; i < nsems; i++)
1419 				un->semadj[i] = 0;
1420 		}
1421 		sma->sem_ctime = get_seconds();
1422 		/* maybe some queued-up processes were waiting for this */
1423 		do_smart_update(sma, NULL, 0, 0, &tasks);
1424 		err = 0;
1425 		goto out_unlock;
1426 	}
1427 	/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1428 	}
1429 	err = -EINVAL;
1430 	if (semnum < 0 || semnum >= nsems)
1431 		goto out_rcu_wakeup;
1432 
1433 	sem_lock(sma, NULL, -1);
1434 	curr = &sma->sem_base[semnum];
1435 
1436 	switch (cmd) {
1437 	case GETVAL:
1438 		err = curr->semval;
1439 		goto out_unlock;
1440 	case GETPID:
1441 		err = curr->sempid;
1442 		goto out_unlock;
1443 	case GETNCNT:
1444 		err = count_semncnt(sma,semnum);
1445 		goto out_unlock;
1446 	case GETZCNT:
1447 		err = count_semzcnt(sma,semnum);
1448 		goto out_unlock;
1449 	}
1450 
1451 out_unlock:
1452 	sem_unlock(sma, -1);
1453 out_rcu_wakeup:
1454 	rcu_read_unlock();
1455 	wake_up_sem_queue_do(&tasks);
1456 out_free:
1457 	if(sem_io != fast_sem_io)
1458 		ipc_free(sem_io, sizeof(ushort)*nsems);
1459 	return err;
1460 }
1461 
1462 static inline unsigned long
1463 copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1464 {
1465 	switch(version) {
1466 	case IPC_64:
1467 		if (copy_from_user(out, buf, sizeof(*out)))
1468 			return -EFAULT;
1469 		return 0;
1470 	case IPC_OLD:
1471 	    {
1472 		struct semid_ds tbuf_old;
1473 
1474 		if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1475 			return -EFAULT;
1476 
1477 		out->sem_perm.uid	= tbuf_old.sem_perm.uid;
1478 		out->sem_perm.gid	= tbuf_old.sem_perm.gid;
1479 		out->sem_perm.mode	= tbuf_old.sem_perm.mode;
1480 
1481 		return 0;
1482 	    }
1483 	default:
1484 		return -EINVAL;
1485 	}
1486 }
1487 
1488 /*
1489  * This function handles some semctl commands which require the rwsem
1490  * to be held in write mode.
1491  * NOTE: no locks must be held, the rwsem is taken inside this function.
1492  */
1493 static int semctl_down(struct ipc_namespace *ns, int semid,
1494 		       int cmd, int version, void __user *p)
1495 {
1496 	struct sem_array *sma;
1497 	int err;
1498 	struct semid64_ds semid64;
1499 	struct kern_ipc_perm *ipcp;
1500 
1501 	if(cmd == IPC_SET) {
1502 		if (copy_semid_from_user(&semid64, p, version))
1503 			return -EFAULT;
1504 	}
1505 
1506 	down_write(&sem_ids(ns).rwsem);
1507 	rcu_read_lock();
1508 
1509 	ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1510 				      &semid64.sem_perm, 0);
1511 	if (IS_ERR(ipcp)) {
1512 		err = PTR_ERR(ipcp);
1513 		goto out_unlock1;
1514 	}
1515 
1516 	sma = container_of(ipcp, struct sem_array, sem_perm);
1517 
1518 	err = security_sem_semctl(sma, cmd);
1519 	if (err)
1520 		goto out_unlock1;
1521 
1522 	switch (cmd) {
1523 	case IPC_RMID:
1524 		sem_lock(sma, NULL, -1);
1525 		/* freeary unlocks the ipc object and rcu */
1526 		freeary(ns, ipcp);
1527 		goto out_up;
1528 	case IPC_SET:
1529 		sem_lock(sma, NULL, -1);
1530 		err = ipc_update_perm(&semid64.sem_perm, ipcp);
1531 		if (err)
1532 			goto out_unlock0;
1533 		sma->sem_ctime = get_seconds();
1534 		break;
1535 	default:
1536 		err = -EINVAL;
1537 		goto out_unlock1;
1538 	}
1539 
1540 out_unlock0:
1541 	sem_unlock(sma, -1);
1542 out_unlock1:
1543 	rcu_read_unlock();
1544 out_up:
1545 	up_write(&sem_ids(ns).rwsem);
1546 	return err;
1547 }
1548 
1549 SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1550 {
1551 	int version;
1552 	struct ipc_namespace *ns;
1553 	void __user *p = (void __user *)arg;
1554 
1555 	if (semid < 0)
1556 		return -EINVAL;
1557 
1558 	version = ipc_parse_version(&cmd);
1559 	ns = current->nsproxy->ipc_ns;
1560 
1561 	switch(cmd) {
1562 	case IPC_INFO:
1563 	case SEM_INFO:
1564 	case IPC_STAT:
1565 	case SEM_STAT:
1566 		return semctl_nolock(ns, semid, cmd, version, p);
1567 	case GETALL:
1568 	case GETVAL:
1569 	case GETPID:
1570 	case GETNCNT:
1571 	case GETZCNT:
1572 	case SETALL:
1573 		return semctl_main(ns, semid, semnum, cmd, p);
1574 	case SETVAL:
1575 		return semctl_setval(ns, semid, semnum, arg);
1576 	case IPC_RMID:
1577 	case IPC_SET:
1578 		return semctl_down(ns, semid, cmd, version, p);
1579 	default:
1580 		return -EINVAL;
1581 	}
1582 }
1583 
1584 /* If the task doesn't already have a undo_list, then allocate one
1585  * here.  We guarantee there is only one thread using this undo list,
1586  * and current is THE ONE
1587  *
1588  * If this allocation and assignment succeeds, but later
1589  * portions of this code fail, there is no need to free the sem_undo_list.
1590  * Just let it stay associated with the task, and it'll be freed later
1591  * at exit time.
1592  *
1593  * This can block, so callers must hold no locks.
1594  */
1595 static inline int get_undo_list(struct sem_undo_list **undo_listp)
1596 {
1597 	struct sem_undo_list *undo_list;
1598 
1599 	undo_list = current->sysvsem.undo_list;
1600 	if (!undo_list) {
1601 		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1602 		if (undo_list == NULL)
1603 			return -ENOMEM;
1604 		spin_lock_init(&undo_list->lock);
1605 		atomic_set(&undo_list->refcnt, 1);
1606 		INIT_LIST_HEAD(&undo_list->list_proc);
1607 
1608 		current->sysvsem.undo_list = undo_list;
1609 	}
1610 	*undo_listp = undo_list;
1611 	return 0;
1612 }
1613 
1614 static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1615 {
1616 	struct sem_undo *un;
1617 
1618 	list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1619 		if (un->semid == semid)
1620 			return un;
1621 	}
1622 	return NULL;
1623 }
1624 
1625 static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1626 {
1627 	struct sem_undo *un;
1628 
1629   	assert_spin_locked(&ulp->lock);
1630 
1631 	un = __lookup_undo(ulp, semid);
1632 	if (un) {
1633 		list_del_rcu(&un->list_proc);
1634 		list_add_rcu(&un->list_proc, &ulp->list_proc);
1635 	}
1636 	return un;
1637 }
1638 
1639 /**
1640  * find_alloc_undo - Lookup (and if not present create) undo array
1641  * @ns: namespace
1642  * @semid: semaphore array id
1643  *
1644  * The function looks up (and if not present creates) the undo structure.
1645  * The size of the undo structure depends on the size of the semaphore
1646  * array, thus the alloc path is not that straightforward.
1647  * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1648  * performs a rcu_read_lock().
1649  */
1650 static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1651 {
1652 	struct sem_array *sma;
1653 	struct sem_undo_list *ulp;
1654 	struct sem_undo *un, *new;
1655 	int nsems, error;
1656 
1657 	error = get_undo_list(&ulp);
1658 	if (error)
1659 		return ERR_PTR(error);
1660 
1661 	rcu_read_lock();
1662 	spin_lock(&ulp->lock);
1663 	un = lookup_undo(ulp, semid);
1664 	spin_unlock(&ulp->lock);
1665 	if (likely(un!=NULL))
1666 		goto out;
1667 
1668 	/* no undo structure around - allocate one. */
1669 	/* step 1: figure out the size of the semaphore array */
1670 	sma = sem_obtain_object_check(ns, semid);
1671 	if (IS_ERR(sma)) {
1672 		rcu_read_unlock();
1673 		return ERR_CAST(sma);
1674 	}
1675 
1676 	nsems = sma->sem_nsems;
1677 	if (!ipc_rcu_getref(sma)) {
1678 		rcu_read_unlock();
1679 		un = ERR_PTR(-EIDRM);
1680 		goto out;
1681 	}
1682 	rcu_read_unlock();
1683 
1684 	/* step 2: allocate new undo structure */
1685 	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1686 	if (!new) {
1687 		ipc_rcu_putref(sma, ipc_rcu_free);
1688 		return ERR_PTR(-ENOMEM);
1689 	}
1690 
1691 	/* step 3: Acquire the lock on semaphore array */
1692 	rcu_read_lock();
1693 	sem_lock_and_putref(sma);
1694 	if (sma->sem_perm.deleted) {
1695 		sem_unlock(sma, -1);
1696 		rcu_read_unlock();
1697 		kfree(new);
1698 		un = ERR_PTR(-EIDRM);
1699 		goto out;
1700 	}
1701 	spin_lock(&ulp->lock);
1702 
1703 	/*
1704 	 * step 4: check for races: did someone else allocate the undo struct?
1705 	 */
1706 	un = lookup_undo(ulp, semid);
1707 	if (un) {
1708 		kfree(new);
1709 		goto success;
1710 	}
1711 	/* step 5: initialize & link new undo structure */
1712 	new->semadj = (short *) &new[1];
1713 	new->ulp = ulp;
1714 	new->semid = semid;
1715 	assert_spin_locked(&ulp->lock);
1716 	list_add_rcu(&new->list_proc, &ulp->list_proc);
1717 	ipc_assert_locked_object(&sma->sem_perm);
1718 	list_add(&new->list_id, &sma->list_id);
1719 	un = new;
1720 
1721 success:
1722 	spin_unlock(&ulp->lock);
1723 	sem_unlock(sma, -1);
1724 out:
1725 	return un;
1726 }
1727 
1728 
1729 /**
1730  * get_queue_result - Retrieve the result code from sem_queue
1731  * @q: Pointer to queue structure
1732  *
1733  * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1734  * q->status, then we must loop until the value is replaced with the final
1735  * value: This may happen if a task is woken up by an unrelated event (e.g.
1736  * signal) and in parallel the task is woken up by another task because it got
1737  * the requested semaphores.
1738  *
1739  * The function can be called with or without holding the semaphore spinlock.
1740  */
1741 static int get_queue_result(struct sem_queue *q)
1742 {
1743 	int error;
1744 
1745 	error = q->status;
1746 	while (unlikely(error == IN_WAKEUP)) {
1747 		cpu_relax();
1748 		error = q->status;
1749 	}
1750 
1751 	return error;
1752 }
1753 
1754 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1755 		unsigned, nsops, const struct timespec __user *, timeout)
1756 {
1757 	int error = -EINVAL;
1758 	struct sem_array *sma;
1759 	struct sembuf fast_sops[SEMOPM_FAST];
1760 	struct sembuf* sops = fast_sops, *sop;
1761 	struct sem_undo *un;
1762 	int undos = 0, alter = 0, max, locknum;
1763 	struct sem_queue queue;
1764 	unsigned long jiffies_left = 0;
1765 	struct ipc_namespace *ns;
1766 	struct list_head tasks;
1767 
1768 	ns = current->nsproxy->ipc_ns;
1769 
1770 	if (nsops < 1 || semid < 0)
1771 		return -EINVAL;
1772 	if (nsops > ns->sc_semopm)
1773 		return -E2BIG;
1774 	if(nsops > SEMOPM_FAST) {
1775 		sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1776 		if(sops==NULL)
1777 			return -ENOMEM;
1778 	}
1779 	if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1780 		error=-EFAULT;
1781 		goto out_free;
1782 	}
1783 	if (timeout) {
1784 		struct timespec _timeout;
1785 		if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1786 			error = -EFAULT;
1787 			goto out_free;
1788 		}
1789 		if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1790 			_timeout.tv_nsec >= 1000000000L) {
1791 			error = -EINVAL;
1792 			goto out_free;
1793 		}
1794 		jiffies_left = timespec_to_jiffies(&_timeout);
1795 	}
1796 	max = 0;
1797 	for (sop = sops; sop < sops + nsops; sop++) {
1798 		if (sop->sem_num >= max)
1799 			max = sop->sem_num;
1800 		if (sop->sem_flg & SEM_UNDO)
1801 			undos = 1;
1802 		if (sop->sem_op != 0)
1803 			alter = 1;
1804 	}
1805 
1806 	INIT_LIST_HEAD(&tasks);
1807 
1808 	if (undos) {
1809 		/* On success, find_alloc_undo takes the rcu_read_lock */
1810 		un = find_alloc_undo(ns, semid);
1811 		if (IS_ERR(un)) {
1812 			error = PTR_ERR(un);
1813 			goto out_free;
1814 		}
1815 	} else {
1816 		un = NULL;
1817 		rcu_read_lock();
1818 	}
1819 
1820 	sma = sem_obtain_object_check(ns, semid);
1821 	if (IS_ERR(sma)) {
1822 		rcu_read_unlock();
1823 		error = PTR_ERR(sma);
1824 		goto out_free;
1825 	}
1826 
1827 	error = -EFBIG;
1828 	if (max >= sma->sem_nsems)
1829 		goto out_rcu_wakeup;
1830 
1831 	error = -EACCES;
1832 	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1833 		goto out_rcu_wakeup;
1834 
1835 	error = security_sem_semop(sma, sops, nsops, alter);
1836 	if (error)
1837 		goto out_rcu_wakeup;
1838 
1839 	/*
1840 	 * semid identifiers are not unique - find_alloc_undo may have
1841 	 * allocated an undo structure, it was invalidated by an RMID
1842 	 * and now a new array with received the same id. Check and fail.
1843 	 * This case can be detected checking un->semid. The existence of
1844 	 * "un" itself is guaranteed by rcu.
1845 	 */
1846 	error = -EIDRM;
1847 	locknum = sem_lock(sma, sops, nsops);
1848 	if (un && un->semid == -1)
1849 		goto out_unlock_free;
1850 
1851 	error = perform_atomic_semop(sma, sops, nsops, un,
1852 					task_tgid_vnr(current));
1853 	if (error == 0) {
1854 		/* If the operation was successful, then do
1855 		 * the required updates.
1856 		 */
1857 		if (alter)
1858 			do_smart_update(sma, sops, nsops, 1, &tasks);
1859 		else
1860 			set_semotime(sma, sops);
1861 	}
1862 	if (error <= 0)
1863 		goto out_unlock_free;
1864 
1865 	/* We need to sleep on this operation, so we put the current
1866 	 * task into the pending queue and go to sleep.
1867 	 */
1868 
1869 	queue.sops = sops;
1870 	queue.nsops = nsops;
1871 	queue.undo = un;
1872 	queue.pid = task_tgid_vnr(current);
1873 	queue.alter = alter;
1874 
1875 	if (nsops == 1) {
1876 		struct sem *curr;
1877 		curr = &sma->sem_base[sops->sem_num];
1878 
1879 		if (alter) {
1880 			if (sma->complex_count) {
1881 				list_add_tail(&queue.list,
1882 						&sma->pending_alter);
1883 			} else {
1884 
1885 				list_add_tail(&queue.list,
1886 						&curr->pending_alter);
1887 			}
1888 		} else {
1889 			list_add_tail(&queue.list, &curr->pending_const);
1890 		}
1891 	} else {
1892 		if (!sma->complex_count)
1893 			merge_queues(sma);
1894 
1895 		if (alter)
1896 			list_add_tail(&queue.list, &sma->pending_alter);
1897 		else
1898 			list_add_tail(&queue.list, &sma->pending_const);
1899 
1900 		sma->complex_count++;
1901 	}
1902 
1903 	queue.status = -EINTR;
1904 	queue.sleeper = current;
1905 
1906 sleep_again:
1907 	current->state = TASK_INTERRUPTIBLE;
1908 	sem_unlock(sma, locknum);
1909 	rcu_read_unlock();
1910 
1911 	if (timeout)
1912 		jiffies_left = schedule_timeout(jiffies_left);
1913 	else
1914 		schedule();
1915 
1916 	error = get_queue_result(&queue);
1917 
1918 	if (error != -EINTR) {
1919 		/* fast path: update_queue already obtained all requested
1920 		 * resources.
1921 		 * Perform a smp_mb(): User space could assume that semop()
1922 		 * is a memory barrier: Without the mb(), the cpu could
1923 		 * speculatively read in user space stale data that was
1924 		 * overwritten by the previous owner of the semaphore.
1925 		 */
1926 		smp_mb();
1927 
1928 		goto out_free;
1929 	}
1930 
1931 	rcu_read_lock();
1932 	sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
1933 
1934 	/*
1935 	 * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1936 	 */
1937 	error = get_queue_result(&queue);
1938 
1939 	/*
1940 	 * Array removed? If yes, leave without sem_unlock().
1941 	 */
1942 	if (IS_ERR(sma)) {
1943 		rcu_read_unlock();
1944 		goto out_free;
1945 	}
1946 
1947 
1948 	/*
1949 	 * If queue.status != -EINTR we are woken up by another process.
1950 	 * Leave without unlink_queue(), but with sem_unlock().
1951 	 */
1952 
1953 	if (error != -EINTR) {
1954 		goto out_unlock_free;
1955 	}
1956 
1957 	/*
1958 	 * If an interrupt occurred we have to clean up the queue
1959 	 */
1960 	if (timeout && jiffies_left == 0)
1961 		error = -EAGAIN;
1962 
1963 	/*
1964 	 * If the wakeup was spurious, just retry
1965 	 */
1966 	if (error == -EINTR && !signal_pending(current))
1967 		goto sleep_again;
1968 
1969 	unlink_queue(sma, &queue);
1970 
1971 out_unlock_free:
1972 	sem_unlock(sma, locknum);
1973 out_rcu_wakeup:
1974 	rcu_read_unlock();
1975 	wake_up_sem_queue_do(&tasks);
1976 out_free:
1977 	if(sops != fast_sops)
1978 		kfree(sops);
1979 	return error;
1980 }
1981 
1982 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1983 		unsigned, nsops)
1984 {
1985 	return sys_semtimedop(semid, tsops, nsops, NULL);
1986 }
1987 
1988 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1989  * parent and child tasks.
1990  */
1991 
1992 int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1993 {
1994 	struct sem_undo_list *undo_list;
1995 	int error;
1996 
1997 	if (clone_flags & CLONE_SYSVSEM) {
1998 		error = get_undo_list(&undo_list);
1999 		if (error)
2000 			return error;
2001 		atomic_inc(&undo_list->refcnt);
2002 		tsk->sysvsem.undo_list = undo_list;
2003 	} else
2004 		tsk->sysvsem.undo_list = NULL;
2005 
2006 	return 0;
2007 }
2008 
2009 /*
2010  * add semadj values to semaphores, free undo structures.
2011  * undo structures are not freed when semaphore arrays are destroyed
2012  * so some of them may be out of date.
2013  * IMPLEMENTATION NOTE: There is some confusion over whether the
2014  * set of adjustments that needs to be done should be done in an atomic
2015  * manner or not. That is, if we are attempting to decrement the semval
2016  * should we queue up and wait until we can do so legally?
2017  * The original implementation attempted to do this (queue and wait).
2018  * The current implementation does not do so. The POSIX standard
2019  * and SVID should be consulted to determine what behavior is mandated.
2020  */
2021 void exit_sem(struct task_struct *tsk)
2022 {
2023 	struct sem_undo_list *ulp;
2024 
2025 	ulp = tsk->sysvsem.undo_list;
2026 	if (!ulp)
2027 		return;
2028 	tsk->sysvsem.undo_list = NULL;
2029 
2030 	if (!atomic_dec_and_test(&ulp->refcnt))
2031 		return;
2032 
2033 	for (;;) {
2034 		struct sem_array *sma;
2035 		struct sem_undo *un;
2036 		struct list_head tasks;
2037 		int semid, i;
2038 
2039 		rcu_read_lock();
2040 		un = list_entry_rcu(ulp->list_proc.next,
2041 				    struct sem_undo, list_proc);
2042 		if (&un->list_proc == &ulp->list_proc)
2043 			semid = -1;
2044 		 else
2045 			semid = un->semid;
2046 
2047 		if (semid == -1) {
2048 			rcu_read_unlock();
2049 			break;
2050 		}
2051 
2052 		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
2053 		/* exit_sem raced with IPC_RMID, nothing to do */
2054 		if (IS_ERR(sma)) {
2055 			rcu_read_unlock();
2056 			continue;
2057 		}
2058 
2059 		sem_lock(sma, NULL, -1);
2060 		un = __lookup_undo(ulp, semid);
2061 		if (un == NULL) {
2062 			/* exit_sem raced with IPC_RMID+semget() that created
2063 			 * exactly the same semid. Nothing to do.
2064 			 */
2065 			sem_unlock(sma, -1);
2066 			rcu_read_unlock();
2067 			continue;
2068 		}
2069 
2070 		/* remove un from the linked lists */
2071 		ipc_assert_locked_object(&sma->sem_perm);
2072 		list_del(&un->list_id);
2073 
2074 		spin_lock(&ulp->lock);
2075 		list_del_rcu(&un->list_proc);
2076 		spin_unlock(&ulp->lock);
2077 
2078 		/* perform adjustments registered in un */
2079 		for (i = 0; i < sma->sem_nsems; i++) {
2080 			struct sem * semaphore = &sma->sem_base[i];
2081 			if (un->semadj[i]) {
2082 				semaphore->semval += un->semadj[i];
2083 				/*
2084 				 * Range checks of the new semaphore value,
2085 				 * not defined by sus:
2086 				 * - Some unices ignore the undo entirely
2087 				 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2088 				 * - some cap the value (e.g. FreeBSD caps
2089 				 *   at 0, but doesn't enforce SEMVMX)
2090 				 *
2091 				 * Linux caps the semaphore value, both at 0
2092 				 * and at SEMVMX.
2093 				 *
2094 				 * 	Manfred <manfred@colorfullife.com>
2095 				 */
2096 				if (semaphore->semval < 0)
2097 					semaphore->semval = 0;
2098 				if (semaphore->semval > SEMVMX)
2099 					semaphore->semval = SEMVMX;
2100 				semaphore->sempid = task_tgid_vnr(current);
2101 			}
2102 		}
2103 		/* maybe some queued-up processes were waiting for this */
2104 		INIT_LIST_HEAD(&tasks);
2105 		do_smart_update(sma, NULL, 0, 1, &tasks);
2106 		sem_unlock(sma, -1);
2107 		rcu_read_unlock();
2108 		wake_up_sem_queue_do(&tasks);
2109 
2110 		kfree_rcu(un, rcu);
2111 	}
2112 	kfree(ulp);
2113 }
2114 
2115 #ifdef CONFIG_PROC_FS
2116 static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2117 {
2118 	struct user_namespace *user_ns = seq_user_ns(s);
2119 	struct sem_array *sma = it;
2120 	time_t sem_otime;
2121 
2122 	/*
2123 	 * The proc interface isn't aware of sem_lock(), it calls
2124 	 * ipc_lock_object() directly (in sysvipc_find_ipc).
2125 	 * In order to stay compatible with sem_lock(), we must wait until
2126 	 * all simple semop() calls have left their critical regions.
2127 	 */
2128 	sem_wait_array(sma);
2129 
2130 	sem_otime = get_semotime(sma);
2131 
2132 	return seq_printf(s,
2133 			  "%10d %10d  %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2134 			  sma->sem_perm.key,
2135 			  sma->sem_perm.id,
2136 			  sma->sem_perm.mode,
2137 			  sma->sem_nsems,
2138 			  from_kuid_munged(user_ns, sma->sem_perm.uid),
2139 			  from_kgid_munged(user_ns, sma->sem_perm.gid),
2140 			  from_kuid_munged(user_ns, sma->sem_perm.cuid),
2141 			  from_kgid_munged(user_ns, sma->sem_perm.cgid),
2142 			  sem_otime,
2143 			  sma->sem_ctime);
2144 }
2145 #endif
2146