xref: /openbmc/linux/kernel/locking/semaphore.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
13e456101SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2e25a64c4SPeter Zijlstra /*
3e25a64c4SPeter Zijlstra  * Copyright (c) 2008 Intel Corporation
4e25a64c4SPeter Zijlstra  * Author: Matthew Wilcox <willy@linux.intel.com>
5e25a64c4SPeter Zijlstra  *
6e25a64c4SPeter Zijlstra  * This file implements counting semaphores.
7e25a64c4SPeter Zijlstra  * A counting semaphore may be acquired 'n' times before sleeping.
8e25a64c4SPeter Zijlstra  * See mutex.c for single-acquisition sleeping locks which enforce
9e25a64c4SPeter Zijlstra  * rules which allow code to be debugged more easily.
10e25a64c4SPeter Zijlstra  */
11e25a64c4SPeter Zijlstra 
12e25a64c4SPeter Zijlstra /*
13e25a64c4SPeter Zijlstra  * Some notes on the implementation:
14e25a64c4SPeter Zijlstra  *
15e25a64c4SPeter Zijlstra  * The spinlock controls access to the other members of the semaphore.
16e25a64c4SPeter Zijlstra  * down_trylock() and up() can be called from interrupt context, so we
17e25a64c4SPeter Zijlstra  * have to disable interrupts when taking the lock.  It turns out various
18e25a64c4SPeter Zijlstra  * parts of the kernel expect to be able to use down() on a semaphore in
19e25a64c4SPeter Zijlstra  * interrupt context when they know it will succeed, so we have to use
20e25a64c4SPeter Zijlstra  * irqsave variants for down(), down_interruptible() and down_killable()
21e25a64c4SPeter Zijlstra  * too.
22e25a64c4SPeter Zijlstra  *
23e25a64c4SPeter Zijlstra  * The ->count variable represents how many more tasks can acquire this
24e25a64c4SPeter Zijlstra  * semaphore.  If it's zero, there may be tasks waiting on the wait_list.
25e25a64c4SPeter Zijlstra  */
26e25a64c4SPeter Zijlstra 
27e25a64c4SPeter Zijlstra #include <linux/compiler.h>
28e25a64c4SPeter Zijlstra #include <linux/kernel.h>
29e25a64c4SPeter Zijlstra #include <linux/export.h>
30e25a64c4SPeter Zijlstra #include <linux/sched.h>
31b17b0153SIngo Molnar #include <linux/sched/debug.h>
32e25a64c4SPeter Zijlstra #include <linux/semaphore.h>
33e25a64c4SPeter Zijlstra #include <linux/spinlock.h>
34e25a64c4SPeter Zijlstra #include <linux/ftrace.h>
35ee042be1SNamhyung Kim #include <trace/events/lock.h>
36e25a64c4SPeter Zijlstra 
37e25a64c4SPeter Zijlstra static noinline void __down(struct semaphore *sem);
38e25a64c4SPeter Zijlstra static noinline int __down_interruptible(struct semaphore *sem);
39e25a64c4SPeter Zijlstra static noinline int __down_killable(struct semaphore *sem);
4031542769SMark Rustad static noinline int __down_timeout(struct semaphore *sem, long timeout);
41e25a64c4SPeter Zijlstra static noinline void __up(struct semaphore *sem);
42e25a64c4SPeter Zijlstra 
43e25a64c4SPeter Zijlstra /**
44e25a64c4SPeter Zijlstra  * down - acquire the semaphore
45e25a64c4SPeter Zijlstra  * @sem: the semaphore to be acquired
46e25a64c4SPeter Zijlstra  *
47e25a64c4SPeter Zijlstra  * Acquires the semaphore.  If no more tasks are allowed to acquire the
48e25a64c4SPeter Zijlstra  * semaphore, calling this function will put the task to sleep until the
49e25a64c4SPeter Zijlstra  * semaphore is released.
50e25a64c4SPeter Zijlstra  *
51e25a64c4SPeter Zijlstra  * Use of this function is deprecated, please use down_interruptible() or
52e25a64c4SPeter Zijlstra  * down_killable() instead.
53e25a64c4SPeter Zijlstra  */
down(struct semaphore * sem)54*0d97db02SNamhyung Kim void __sched down(struct semaphore *sem)
55e25a64c4SPeter Zijlstra {
56e25a64c4SPeter Zijlstra 	unsigned long flags;
57e25a64c4SPeter Zijlstra 
5899409b93SXiaoming Ni 	might_sleep();
59e25a64c4SPeter Zijlstra 	raw_spin_lock_irqsave(&sem->lock, flags);
60e25a64c4SPeter Zijlstra 	if (likely(sem->count > 0))
61e25a64c4SPeter Zijlstra 		sem->count--;
62e25a64c4SPeter Zijlstra 	else
63e25a64c4SPeter Zijlstra 		__down(sem);
64e25a64c4SPeter Zijlstra 	raw_spin_unlock_irqrestore(&sem->lock, flags);
65e25a64c4SPeter Zijlstra }
66e25a64c4SPeter Zijlstra EXPORT_SYMBOL(down);
67e25a64c4SPeter Zijlstra 
68e25a64c4SPeter Zijlstra /**
69e25a64c4SPeter Zijlstra  * down_interruptible - acquire the semaphore unless interrupted
70e25a64c4SPeter Zijlstra  * @sem: the semaphore to be acquired
71e25a64c4SPeter Zijlstra  *
72e25a64c4SPeter Zijlstra  * Attempts to acquire the semaphore.  If no more tasks are allowed to
73e25a64c4SPeter Zijlstra  * acquire the semaphore, calling this function will put the task to sleep.
74e25a64c4SPeter Zijlstra  * If the sleep is interrupted by a signal, this function will return -EINTR.
75e25a64c4SPeter Zijlstra  * If the semaphore is successfully acquired, this function returns 0.
76e25a64c4SPeter Zijlstra  */
down_interruptible(struct semaphore * sem)77*0d97db02SNamhyung Kim int __sched down_interruptible(struct semaphore *sem)
78e25a64c4SPeter Zijlstra {
79e25a64c4SPeter Zijlstra 	unsigned long flags;
80e25a64c4SPeter Zijlstra 	int result = 0;
81e25a64c4SPeter Zijlstra 
8299409b93SXiaoming Ni 	might_sleep();
83e25a64c4SPeter Zijlstra 	raw_spin_lock_irqsave(&sem->lock, flags);
84e25a64c4SPeter Zijlstra 	if (likely(sem->count > 0))
85e25a64c4SPeter Zijlstra 		sem->count--;
86e25a64c4SPeter Zijlstra 	else
87e25a64c4SPeter Zijlstra 		result = __down_interruptible(sem);
88e25a64c4SPeter Zijlstra 	raw_spin_unlock_irqrestore(&sem->lock, flags);
89e25a64c4SPeter Zijlstra 
90e25a64c4SPeter Zijlstra 	return result;
91e25a64c4SPeter Zijlstra }
92e25a64c4SPeter Zijlstra EXPORT_SYMBOL(down_interruptible);
93e25a64c4SPeter Zijlstra 
94e25a64c4SPeter Zijlstra /**
95e25a64c4SPeter Zijlstra  * down_killable - acquire the semaphore unless killed
96e25a64c4SPeter Zijlstra  * @sem: the semaphore to be acquired
97e25a64c4SPeter Zijlstra  *
98e25a64c4SPeter Zijlstra  * Attempts to acquire the semaphore.  If no more tasks are allowed to
99e25a64c4SPeter Zijlstra  * acquire the semaphore, calling this function will put the task to sleep.
100e25a64c4SPeter Zijlstra  * If the sleep is interrupted by a fatal signal, this function will return
101e25a64c4SPeter Zijlstra  * -EINTR.  If the semaphore is successfully acquired, this function returns
102e25a64c4SPeter Zijlstra  * 0.
103e25a64c4SPeter Zijlstra  */
down_killable(struct semaphore * sem)104*0d97db02SNamhyung Kim int __sched down_killable(struct semaphore *sem)
105e25a64c4SPeter Zijlstra {
106e25a64c4SPeter Zijlstra 	unsigned long flags;
107e25a64c4SPeter Zijlstra 	int result = 0;
108e25a64c4SPeter Zijlstra 
10999409b93SXiaoming Ni 	might_sleep();
110e25a64c4SPeter Zijlstra 	raw_spin_lock_irqsave(&sem->lock, flags);
111e25a64c4SPeter Zijlstra 	if (likely(sem->count > 0))
112e25a64c4SPeter Zijlstra 		sem->count--;
113e25a64c4SPeter Zijlstra 	else
114e25a64c4SPeter Zijlstra 		result = __down_killable(sem);
115e25a64c4SPeter Zijlstra 	raw_spin_unlock_irqrestore(&sem->lock, flags);
116e25a64c4SPeter Zijlstra 
117e25a64c4SPeter Zijlstra 	return result;
118e25a64c4SPeter Zijlstra }
119e25a64c4SPeter Zijlstra EXPORT_SYMBOL(down_killable);
120e25a64c4SPeter Zijlstra 
121e25a64c4SPeter Zijlstra /**
122e25a64c4SPeter Zijlstra  * down_trylock - try to acquire the semaphore, without waiting
123e25a64c4SPeter Zijlstra  * @sem: the semaphore to be acquired
124e25a64c4SPeter Zijlstra  *
125e25a64c4SPeter Zijlstra  * Try to acquire the semaphore atomically.  Returns 0 if the semaphore has
126c034f48eSRandy Dunlap  * been acquired successfully or 1 if it cannot be acquired.
127e25a64c4SPeter Zijlstra  *
128e25a64c4SPeter Zijlstra  * NOTE: This return value is inverted from both spin_trylock and
129e25a64c4SPeter Zijlstra  * mutex_trylock!  Be careful about this when converting code.
130e25a64c4SPeter Zijlstra  *
131e25a64c4SPeter Zijlstra  * Unlike mutex_trylock, this function can be used from interrupt context,
132e25a64c4SPeter Zijlstra  * and the semaphore can be released by any task or interrupt.
133e25a64c4SPeter Zijlstra  */
down_trylock(struct semaphore * sem)134*0d97db02SNamhyung Kim int __sched down_trylock(struct semaphore *sem)
135e25a64c4SPeter Zijlstra {
136e25a64c4SPeter Zijlstra 	unsigned long flags;
137e25a64c4SPeter Zijlstra 	int count;
138e25a64c4SPeter Zijlstra 
139e25a64c4SPeter Zijlstra 	raw_spin_lock_irqsave(&sem->lock, flags);
140e25a64c4SPeter Zijlstra 	count = sem->count - 1;
141e25a64c4SPeter Zijlstra 	if (likely(count >= 0))
142e25a64c4SPeter Zijlstra 		sem->count = count;
143e25a64c4SPeter Zijlstra 	raw_spin_unlock_irqrestore(&sem->lock, flags);
144e25a64c4SPeter Zijlstra 
145e25a64c4SPeter Zijlstra 	return (count < 0);
146e25a64c4SPeter Zijlstra }
147e25a64c4SPeter Zijlstra EXPORT_SYMBOL(down_trylock);
148e25a64c4SPeter Zijlstra 
149e25a64c4SPeter Zijlstra /**
150e25a64c4SPeter Zijlstra  * down_timeout - acquire the semaphore within a specified time
151e25a64c4SPeter Zijlstra  * @sem: the semaphore to be acquired
15231542769SMark Rustad  * @timeout: how long to wait before failing
153e25a64c4SPeter Zijlstra  *
154e25a64c4SPeter Zijlstra  * Attempts to acquire the semaphore.  If no more tasks are allowed to
155e25a64c4SPeter Zijlstra  * acquire the semaphore, calling this function will put the task to sleep.
156e25a64c4SPeter Zijlstra  * If the semaphore is not released within the specified number of jiffies,
157e25a64c4SPeter Zijlstra  * this function returns -ETIME.  It returns 0 if the semaphore was acquired.
158e25a64c4SPeter Zijlstra  */
down_timeout(struct semaphore * sem,long timeout)159*0d97db02SNamhyung Kim int __sched down_timeout(struct semaphore *sem, long timeout)
160e25a64c4SPeter Zijlstra {
161e25a64c4SPeter Zijlstra 	unsigned long flags;
162e25a64c4SPeter Zijlstra 	int result = 0;
163e25a64c4SPeter Zijlstra 
16499409b93SXiaoming Ni 	might_sleep();
165e25a64c4SPeter Zijlstra 	raw_spin_lock_irqsave(&sem->lock, flags);
166e25a64c4SPeter Zijlstra 	if (likely(sem->count > 0))
167e25a64c4SPeter Zijlstra 		sem->count--;
168e25a64c4SPeter Zijlstra 	else
16931542769SMark Rustad 		result = __down_timeout(sem, timeout);
170e25a64c4SPeter Zijlstra 	raw_spin_unlock_irqrestore(&sem->lock, flags);
171e25a64c4SPeter Zijlstra 
172e25a64c4SPeter Zijlstra 	return result;
173e25a64c4SPeter Zijlstra }
174e25a64c4SPeter Zijlstra EXPORT_SYMBOL(down_timeout);
175e25a64c4SPeter Zijlstra 
176e25a64c4SPeter Zijlstra /**
177e25a64c4SPeter Zijlstra  * up - release the semaphore
178e25a64c4SPeter Zijlstra  * @sem: the semaphore to release
179e25a64c4SPeter Zijlstra  *
180e25a64c4SPeter Zijlstra  * Release the semaphore.  Unlike mutexes, up() may be called from any
181e25a64c4SPeter Zijlstra  * context and even by tasks which have never called down().
182e25a64c4SPeter Zijlstra  */
up(struct semaphore * sem)183*0d97db02SNamhyung Kim void __sched up(struct semaphore *sem)
184e25a64c4SPeter Zijlstra {
185e25a64c4SPeter Zijlstra 	unsigned long flags;
186e25a64c4SPeter Zijlstra 
187e25a64c4SPeter Zijlstra 	raw_spin_lock_irqsave(&sem->lock, flags);
188e25a64c4SPeter Zijlstra 	if (likely(list_empty(&sem->wait_list)))
189e25a64c4SPeter Zijlstra 		sem->count++;
190e25a64c4SPeter Zijlstra 	else
191e25a64c4SPeter Zijlstra 		__up(sem);
192e25a64c4SPeter Zijlstra 	raw_spin_unlock_irqrestore(&sem->lock, flags);
193e25a64c4SPeter Zijlstra }
194e25a64c4SPeter Zijlstra EXPORT_SYMBOL(up);
195e25a64c4SPeter Zijlstra 
196e25a64c4SPeter Zijlstra /* Functions for the contended case */
197e25a64c4SPeter Zijlstra 
198e25a64c4SPeter Zijlstra struct semaphore_waiter {
199e25a64c4SPeter Zijlstra 	struct list_head list;
200e25a64c4SPeter Zijlstra 	struct task_struct *task;
201e25a64c4SPeter Zijlstra 	bool up;
202e25a64c4SPeter Zijlstra };
203e25a64c4SPeter Zijlstra 
204e25a64c4SPeter Zijlstra /*
205e25a64c4SPeter Zijlstra  * Because this function is inlined, the 'state' parameter will be
206e25a64c4SPeter Zijlstra  * constant, and thus optimised away by the compiler.  Likewise the
207e25a64c4SPeter Zijlstra  * 'timeout' parameter for the cases without timeouts.
208e25a64c4SPeter Zijlstra  */
___down_common(struct semaphore * sem,long state,long timeout)209ee042be1SNamhyung Kim static inline int __sched ___down_common(struct semaphore *sem, long state,
210e25a64c4SPeter Zijlstra 								long timeout)
211e25a64c4SPeter Zijlstra {
212e25a64c4SPeter Zijlstra 	struct semaphore_waiter waiter;
213e25a64c4SPeter Zijlstra 
214e25a64c4SPeter Zijlstra 	list_add_tail(&waiter.list, &sem->wait_list);
215d269a8b8SDavidlohr Bueso 	waiter.task = current;
216e25a64c4SPeter Zijlstra 	waiter.up = false;
217e25a64c4SPeter Zijlstra 
218e25a64c4SPeter Zijlstra 	for (;;) {
219d269a8b8SDavidlohr Bueso 		if (signal_pending_state(state, current))
220e25a64c4SPeter Zijlstra 			goto interrupted;
221e25a64c4SPeter Zijlstra 		if (unlikely(timeout <= 0))
222e25a64c4SPeter Zijlstra 			goto timed_out;
223642fa448SDavidlohr Bueso 		__set_current_state(state);
224e25a64c4SPeter Zijlstra 		raw_spin_unlock_irq(&sem->lock);
225e25a64c4SPeter Zijlstra 		timeout = schedule_timeout(timeout);
226e25a64c4SPeter Zijlstra 		raw_spin_lock_irq(&sem->lock);
227e25a64c4SPeter Zijlstra 		if (waiter.up)
228e25a64c4SPeter Zijlstra 			return 0;
229e25a64c4SPeter Zijlstra 	}
230e25a64c4SPeter Zijlstra 
231e25a64c4SPeter Zijlstra  timed_out:
232e25a64c4SPeter Zijlstra 	list_del(&waiter.list);
233e25a64c4SPeter Zijlstra 	return -ETIME;
234e25a64c4SPeter Zijlstra 
235e25a64c4SPeter Zijlstra  interrupted:
236e25a64c4SPeter Zijlstra 	list_del(&waiter.list);
237e25a64c4SPeter Zijlstra 	return -EINTR;
238e25a64c4SPeter Zijlstra }
239e25a64c4SPeter Zijlstra 
__down_common(struct semaphore * sem,long state,long timeout)240ee042be1SNamhyung Kim static inline int __sched __down_common(struct semaphore *sem, long state,
241ee042be1SNamhyung Kim 					long timeout)
242ee042be1SNamhyung Kim {
243ee042be1SNamhyung Kim 	int ret;
244ee042be1SNamhyung Kim 
245ee042be1SNamhyung Kim 	trace_contention_begin(sem, 0);
246ee042be1SNamhyung Kim 	ret = ___down_common(sem, state, timeout);
247ee042be1SNamhyung Kim 	trace_contention_end(sem, ret);
248ee042be1SNamhyung Kim 
249ee042be1SNamhyung Kim 	return ret;
250ee042be1SNamhyung Kim }
251ee042be1SNamhyung Kim 
__down(struct semaphore * sem)252e25a64c4SPeter Zijlstra static noinline void __sched __down(struct semaphore *sem)
253e25a64c4SPeter Zijlstra {
254e25a64c4SPeter Zijlstra 	__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
255e25a64c4SPeter Zijlstra }
256e25a64c4SPeter Zijlstra 
__down_interruptible(struct semaphore * sem)257e25a64c4SPeter Zijlstra static noinline int __sched __down_interruptible(struct semaphore *sem)
258e25a64c4SPeter Zijlstra {
259e25a64c4SPeter Zijlstra 	return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
260e25a64c4SPeter Zijlstra }
261e25a64c4SPeter Zijlstra 
__down_killable(struct semaphore * sem)262e25a64c4SPeter Zijlstra static noinline int __sched __down_killable(struct semaphore *sem)
263e25a64c4SPeter Zijlstra {
264e25a64c4SPeter Zijlstra 	return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
265e25a64c4SPeter Zijlstra }
266e25a64c4SPeter Zijlstra 
__down_timeout(struct semaphore * sem,long timeout)26731542769SMark Rustad static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
268e25a64c4SPeter Zijlstra {
26931542769SMark Rustad 	return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
270e25a64c4SPeter Zijlstra }
271e25a64c4SPeter Zijlstra 
__up(struct semaphore * sem)272e25a64c4SPeter Zijlstra static noinline void __sched __up(struct semaphore *sem)
273e25a64c4SPeter Zijlstra {
274e25a64c4SPeter Zijlstra 	struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
275e25a64c4SPeter Zijlstra 						struct semaphore_waiter, list);
276e25a64c4SPeter Zijlstra 	list_del(&waiter->list);
277e25a64c4SPeter Zijlstra 	waiter->up = true;
278e25a64c4SPeter Zijlstra 	wake_up_process(waiter->task);
279e25a64c4SPeter Zijlstra }
280