1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #ifndef INTEL_WAKEREF_H 8 #define INTEL_WAKEREF_H 9 10 #include <linux/atomic.h> 11 #include <linux/bitfield.h> 12 #include <linux/bits.h> 13 #include <linux/lockdep.h> 14 #include <linux/mutex.h> 15 #include <linux/refcount.h> 16 #include <linux/stackdepot.h> 17 #include <linux/timer.h> 18 #include <linux/workqueue.h> 19 20 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) 21 #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr) 22 #else 23 #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) 24 #endif 25 26 struct intel_runtime_pm; 27 struct intel_wakeref; 28 29 typedef depot_stack_handle_t intel_wakeref_t; 30 31 struct intel_wakeref_ops { 32 int (*get)(struct intel_wakeref *wf); 33 int (*put)(struct intel_wakeref *wf); 34 }; 35 36 struct intel_wakeref { 37 atomic_t count; 38 struct mutex mutex; 39 40 intel_wakeref_t wakeref; 41 42 struct intel_runtime_pm *rpm; 43 const struct intel_wakeref_ops *ops; 44 45 struct delayed_work work; 46 }; 47 48 struct intel_wakeref_lockclass { 49 struct lock_class_key mutex; 50 struct lock_class_key work; 51 }; 52 53 void __intel_wakeref_init(struct intel_wakeref *wf, 54 struct intel_runtime_pm *rpm, 55 const struct intel_wakeref_ops *ops, 56 struct intel_wakeref_lockclass *key); 57 #define intel_wakeref_init(wf, rpm, ops) do { \ 58 static struct intel_wakeref_lockclass __key; \ 59 \ 60 __intel_wakeref_init((wf), (rpm), (ops), &__key); \ 61 } while (0) 62 63 int __intel_wakeref_get_first(struct intel_wakeref *wf); 64 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags); 65 66 /** 67 * intel_wakeref_get: Acquire the wakeref 68 * @wf: the wakeref 69 * 70 * Acquire a hold on the wakeref. The first user to do so, will acquire 71 * the runtime pm wakeref and then call the @fn underneath the wakeref 72 * mutex. 73 * 74 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref 75 * will be released and the acquisition unwound, and an error reported. 76 * 77 * Returns: 0 if the wakeref was acquired successfully, or a negative error 78 * code otherwise. 79 */ 80 static inline int 81 intel_wakeref_get(struct intel_wakeref *wf) 82 { 83 might_sleep(); 84 if (unlikely(!atomic_inc_not_zero(&wf->count))) 85 return __intel_wakeref_get_first(wf); 86 87 return 0; 88 } 89 90 /** 91 * __intel_wakeref_get: Acquire the wakeref, again 92 * @wf: the wakeref 93 * 94 * Increment the wakeref counter, only valid if it is already held by 95 * the caller. 96 * 97 * See intel_wakeref_get(). 98 */ 99 static inline void 100 __intel_wakeref_get(struct intel_wakeref *wf) 101 { 102 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); 103 atomic_inc(&wf->count); 104 } 105 106 /** 107 * intel_wakeref_get_if_in_use: Acquire the wakeref 108 * @wf: the wakeref 109 * 110 * Acquire a hold on the wakeref, but only if the wakeref is already 111 * active. 112 * 113 * Returns: true if the wakeref was acquired, false otherwise. 114 */ 115 static inline bool 116 intel_wakeref_get_if_active(struct intel_wakeref *wf) 117 { 118 return atomic_inc_not_zero(&wf->count); 119 } 120 121 enum { 122 INTEL_WAKEREF_PUT_ASYNC_BIT = 0, 123 __INTEL_WAKEREF_PUT_LAST_BIT__ 124 }; 125 126 /** 127 * intel_wakeref_put_flags: Release the wakeref 128 * @wf: the wakeref 129 * @flags: control flags 130 * 131 * Release our hold on the wakeref. When there are no more users, 132 * the runtime pm wakeref will be released after the @fn callback is called 133 * underneath the wakeref mutex. 134 * 135 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref 136 * is retained and an error reported. 137 * 138 * Returns: 0 if the wakeref was released successfully, or a negative error 139 * code otherwise. 140 */ 141 static inline void 142 __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags) 143 #define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT) 144 #define INTEL_WAKEREF_PUT_DELAY \ 145 GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__) 146 { 147 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); 148 if (unlikely(!atomic_add_unless(&wf->count, -1, 1))) 149 __intel_wakeref_put_last(wf, flags); 150 } 151 152 static inline void 153 intel_wakeref_put(struct intel_wakeref *wf) 154 { 155 might_sleep(); 156 __intel_wakeref_put(wf, 0); 157 } 158 159 static inline void 160 intel_wakeref_put_async(struct intel_wakeref *wf) 161 { 162 __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC); 163 } 164 165 static inline void 166 intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay) 167 { 168 __intel_wakeref_put(wf, 169 INTEL_WAKEREF_PUT_ASYNC | 170 FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay)); 171 } 172 173 /** 174 * intel_wakeref_lock: Lock the wakeref (mutex) 175 * @wf: the wakeref 176 * 177 * Locks the wakeref to prevent it being acquired or released. New users 178 * can still adjust the counter, but the wakeref itself (and callback) 179 * cannot be acquired or released. 180 */ 181 static inline void 182 intel_wakeref_lock(struct intel_wakeref *wf) 183 __acquires(wf->mutex) 184 { 185 mutex_lock(&wf->mutex); 186 } 187 188 /** 189 * intel_wakeref_unlock: Unlock the wakeref 190 * @wf: the wakeref 191 * 192 * Releases a previously acquired intel_wakeref_lock(). 193 */ 194 static inline void 195 intel_wakeref_unlock(struct intel_wakeref *wf) 196 __releases(wf->mutex) 197 { 198 mutex_unlock(&wf->mutex); 199 } 200 201 /** 202 * intel_wakeref_unlock_wait: Wait until the active callback is complete 203 * @wf: the wakeref 204 * 205 * Waits for the active callback (under the @wf->mutex or another CPU) is 206 * complete. 207 */ 208 static inline void 209 intel_wakeref_unlock_wait(struct intel_wakeref *wf) 210 { 211 mutex_lock(&wf->mutex); 212 mutex_unlock(&wf->mutex); 213 flush_delayed_work(&wf->work); 214 } 215 216 /** 217 * intel_wakeref_is_active: Query whether the wakeref is currently held 218 * @wf: the wakeref 219 * 220 * Returns: true if the wakeref is currently held. 221 */ 222 static inline bool 223 intel_wakeref_is_active(const struct intel_wakeref *wf) 224 { 225 return READ_ONCE(wf->wakeref); 226 } 227 228 /** 229 * __intel_wakeref_defer_park: Defer the current park callback 230 * @wf: the wakeref 231 */ 232 static inline void 233 __intel_wakeref_defer_park(struct intel_wakeref *wf) 234 { 235 lockdep_assert_held(&wf->mutex); 236 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count)); 237 atomic_set_release(&wf->count, 1); 238 } 239 240 /** 241 * intel_wakeref_wait_for_idle: Wait until the wakeref is idle 242 * @wf: the wakeref 243 * 244 * Wait for the earlier asynchronous release of the wakeref. Note 245 * this will wait for any third party as well, so make sure you only wait 246 * when you have control over the wakeref and trust no one else is acquiring 247 * it. 248 * 249 * Return: 0 on success, error code if killed. 250 */ 251 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf); 252 253 struct intel_wakeref_auto { 254 struct intel_runtime_pm *rpm; 255 struct timer_list timer; 256 intel_wakeref_t wakeref; 257 spinlock_t lock; 258 refcount_t count; 259 }; 260 261 /** 262 * intel_wakeref_auto: Delay the runtime-pm autosuspend 263 * @wf: the wakeref 264 * @timeout: relative timeout in jiffies 265 * 266 * The runtime-pm core uses a suspend delay after the last wakeref 267 * is released before triggering runtime suspend of the device. That 268 * delay is configurable via sysfs with little regard to the device 269 * characteristics. Instead, we want to tune the autosuspend based on our 270 * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied 271 * timeout. 272 * 273 * Pass @timeout = 0 to cancel a previous autosuspend by executing the 274 * suspend immediately. 275 */ 276 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout); 277 278 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, 279 struct intel_runtime_pm *rpm); 280 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf); 281 282 #endif /* INTEL_WAKEREF_H */ 283