1 /* 2 * Copyright (c) 2008 Intel Corporation 3 * Author: Matthew Wilcox <willy@linux.intel.com> 4 * 5 * Distributed under the terms of the GNU GPL, version 2 6 * 7 * This file implements counting semaphores. 8 * A counting semaphore may be acquired 'n' times before sleeping. 9 * See mutex.c for single-acquisition sleeping locks which enforce 10 * rules which allow code to be debugged more easily. 11 */ 12 13 /* 14 * Some notes on the implementation: 15 * 16 * The spinlock controls access to the other members of the semaphore. 17 * down_trylock() and up() can be called from interrupt context, so we 18 * have to disable interrupts when taking the lock. It turns out various 19 * parts of the kernel expect to be able to use down() on a semaphore in 20 * interrupt context when they know it will succeed, so we have to use 21 * irqsave variants for down(), down_interruptible() and down_killable() 22 * too. 23 * 24 * The ->count variable represents how many more tasks can acquire this 25 * semaphore. If it's zero, there may be tasks waiting on the wait_list. 26 */ 27 28 #include <linux/compiler.h> 29 #include <linux/kernel.h> 30 #include <linux/export.h> 31 #include <linux/sched.h> 32 #include <linux/semaphore.h> 33 #include <linux/spinlock.h> 34 #include <linux/ftrace.h> 35 36 static noinline void __down(struct semaphore *sem); 37 static noinline int __down_interruptible(struct semaphore *sem); 38 static noinline int __down_killable(struct semaphore *sem); 39 static noinline int __down_timeout(struct semaphore *sem, long jiffies); 40 static noinline void __up(struct semaphore *sem); 41 42 /** 43 * down - acquire the semaphore 44 * @sem: the semaphore to be acquired 45 * 46 * Acquires the semaphore. If no more tasks are allowed to acquire the 47 * semaphore, calling this function will put the task to sleep until the 48 * semaphore is released. 49 * 50 * Use of this function is deprecated, please use down_interruptible() or 51 * down_killable() instead. 52 */ 53 void down(struct semaphore *sem) 54 { 55 unsigned long flags; 56 57 raw_spin_lock_irqsave(&sem->lock, flags); 58 if (likely(sem->count > 0)) 59 sem->count--; 60 else 61 __down(sem); 62 raw_spin_unlock_irqrestore(&sem->lock, flags); 63 } 64 EXPORT_SYMBOL(down); 65 66 /** 67 * down_interruptible - acquire the semaphore unless interrupted 68 * @sem: the semaphore to be acquired 69 * 70 * Attempts to acquire the semaphore. If no more tasks are allowed to 71 * acquire the semaphore, calling this function will put the task to sleep. 72 * If the sleep is interrupted by a signal, this function will return -EINTR. 73 * If the semaphore is successfully acquired, this function returns 0. 74 */ 75 int down_interruptible(struct semaphore *sem) 76 { 77 unsigned long flags; 78 int result = 0; 79 80 raw_spin_lock_irqsave(&sem->lock, flags); 81 if (likely(sem->count > 0)) 82 sem->count--; 83 else 84 result = __down_interruptible(sem); 85 raw_spin_unlock_irqrestore(&sem->lock, flags); 86 87 return result; 88 } 89 EXPORT_SYMBOL(down_interruptible); 90 91 /** 92 * down_killable - acquire the semaphore unless killed 93 * @sem: the semaphore to be acquired 94 * 95 * Attempts to acquire the semaphore. If no more tasks are allowed to 96 * acquire the semaphore, calling this function will put the task to sleep. 97 * If the sleep is interrupted by a fatal signal, this function will return 98 * -EINTR. If the semaphore is successfully acquired, this function returns 99 * 0. 100 */ 101 int down_killable(struct semaphore *sem) 102 { 103 unsigned long flags; 104 int result = 0; 105 106 raw_spin_lock_irqsave(&sem->lock, flags); 107 if (likely(sem->count > 0)) 108 sem->count--; 109 else 110 result = __down_killable(sem); 111 raw_spin_unlock_irqrestore(&sem->lock, flags); 112 113 return result; 114 } 115 EXPORT_SYMBOL(down_killable); 116 117 /** 118 * down_trylock - try to acquire the semaphore, without waiting 119 * @sem: the semaphore to be acquired 120 * 121 * Try to acquire the semaphore atomically. Returns 0 if the semaphore has 122 * been acquired successfully or 1 if it it cannot be acquired. 123 * 124 * NOTE: This return value is inverted from both spin_trylock and 125 * mutex_trylock! Be careful about this when converting code. 126 * 127 * Unlike mutex_trylock, this function can be used from interrupt context, 128 * and the semaphore can be released by any task or interrupt. 129 */ 130 int down_trylock(struct semaphore *sem) 131 { 132 unsigned long flags; 133 int count; 134 135 raw_spin_lock_irqsave(&sem->lock, flags); 136 count = sem->count - 1; 137 if (likely(count >= 0)) 138 sem->count = count; 139 raw_spin_unlock_irqrestore(&sem->lock, flags); 140 141 return (count < 0); 142 } 143 EXPORT_SYMBOL(down_trylock); 144 145 /** 146 * down_timeout - acquire the semaphore within a specified time 147 * @sem: the semaphore to be acquired 148 * @jiffies: how long to wait before failing 149 * 150 * Attempts to acquire the semaphore. If no more tasks are allowed to 151 * acquire the semaphore, calling this function will put the task to sleep. 152 * If the semaphore is not released within the specified number of jiffies, 153 * this function returns -ETIME. It returns 0 if the semaphore was acquired. 154 */ 155 int down_timeout(struct semaphore *sem, long jiffies) 156 { 157 unsigned long flags; 158 int result = 0; 159 160 raw_spin_lock_irqsave(&sem->lock, flags); 161 if (likely(sem->count > 0)) 162 sem->count--; 163 else 164 result = __down_timeout(sem, jiffies); 165 raw_spin_unlock_irqrestore(&sem->lock, flags); 166 167 return result; 168 } 169 EXPORT_SYMBOL(down_timeout); 170 171 /** 172 * up - release the semaphore 173 * @sem: the semaphore to release 174 * 175 * Release the semaphore. Unlike mutexes, up() may be called from any 176 * context and even by tasks which have never called down(). 177 */ 178 void up(struct semaphore *sem) 179 { 180 unsigned long flags; 181 182 raw_spin_lock_irqsave(&sem->lock, flags); 183 if (likely(list_empty(&sem->wait_list))) 184 sem->count++; 185 else 186 __up(sem); 187 raw_spin_unlock_irqrestore(&sem->lock, flags); 188 } 189 EXPORT_SYMBOL(up); 190 191 /* Functions for the contended case */ 192 193 struct semaphore_waiter { 194 struct list_head list; 195 struct task_struct *task; 196 bool up; 197 }; 198 199 /* 200 * Because this function is inlined, the 'state' parameter will be 201 * constant, and thus optimised away by the compiler. Likewise the 202 * 'timeout' parameter for the cases without timeouts. 203 */ 204 static inline int __sched __down_common(struct semaphore *sem, long state, 205 long timeout) 206 { 207 struct task_struct *task = current; 208 struct semaphore_waiter waiter; 209 210 list_add_tail(&waiter.list, &sem->wait_list); 211 waiter.task = task; 212 waiter.up = false; 213 214 for (;;) { 215 if (signal_pending_state(state, task)) 216 goto interrupted; 217 if (unlikely(timeout <= 0)) 218 goto timed_out; 219 __set_task_state(task, state); 220 raw_spin_unlock_irq(&sem->lock); 221 timeout = schedule_timeout(timeout); 222 raw_spin_lock_irq(&sem->lock); 223 if (waiter.up) 224 return 0; 225 } 226 227 timed_out: 228 list_del(&waiter.list); 229 return -ETIME; 230 231 interrupted: 232 list_del(&waiter.list); 233 return -EINTR; 234 } 235 236 static noinline void __sched __down(struct semaphore *sem) 237 { 238 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 239 } 240 241 static noinline int __sched __down_interruptible(struct semaphore *sem) 242 { 243 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 244 } 245 246 static noinline int __sched __down_killable(struct semaphore *sem) 247 { 248 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT); 249 } 250 251 static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies) 252 { 253 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); 254 } 255 256 static noinline void __sched __up(struct semaphore *sem) 257 { 258 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, 259 struct semaphore_waiter, list); 260 list_del(&waiter->list); 261 waiter->up = true; 262 wake_up_process(waiter->task); 263 } 264