1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #ifndef INTEL_WAKEREF_H 8 #define INTEL_WAKEREF_H 9 10 #include <linux/atomic.h> 11 #include <linux/mutex.h> 12 #include <linux/stackdepot.h> 13 14 struct drm_i915_private; 15 16 typedef depot_stack_handle_t intel_wakeref_t; 17 18 struct intel_wakeref { 19 atomic_t count; 20 struct mutex mutex; 21 intel_wakeref_t wakeref; 22 }; 23 24 void __intel_wakeref_init(struct intel_wakeref *wf, 25 struct lock_class_key *key); 26 #define intel_wakeref_init(wf) do { \ 27 static struct lock_class_key __key; \ 28 \ 29 __intel_wakeref_init((wf), &__key); \ 30 } while (0) 31 32 int __intel_wakeref_get_first(struct drm_i915_private *i915, 33 struct intel_wakeref *wf, 34 int (*fn)(struct intel_wakeref *wf)); 35 int __intel_wakeref_put_last(struct drm_i915_private *i915, 36 struct intel_wakeref *wf, 37 int (*fn)(struct intel_wakeref *wf)); 38 39 /** 40 * intel_wakeref_get: Acquire the wakeref 41 * @i915: the drm_i915_private device 42 * @wf: the wakeref 43 * @fn: callback for acquired the wakeref, called only on first acquire. 44 * 45 * Acquire a hold on the wakeref. The first user to do so, will acquire 46 * the runtime pm wakeref and then call the @fn underneath the wakeref 47 * mutex. 48 * 49 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref 50 * will be released and the acquisition unwound, and an error reported. 51 * 52 * Returns: 0 if the wakeref was acquired successfully, or a negative error 53 * code otherwise. 54 */ 55 static inline int 56 intel_wakeref_get(struct drm_i915_private *i915, 57 struct intel_wakeref *wf, 58 int (*fn)(struct intel_wakeref *wf)) 59 { 60 if (unlikely(!atomic_inc_not_zero(&wf->count))) 61 return __intel_wakeref_get_first(i915, wf, fn); 62 63 return 0; 64 } 65 66 /** 67 * intel_wakeref_put: Release the wakeref 68 * @i915: the drm_i915_private device 69 * @wf: the wakeref 70 * @fn: callback for releasing the wakeref, called only on final release. 71 * 72 * Release our hold on the wakeref. When there are no more users, 73 * the runtime pm wakeref will be released after the @fn callback is called 74 * underneath the wakeref mutex. 75 * 76 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref 77 * is retained and an error reported. 78 * 79 * Returns: 0 if the wakeref was released successfully, or a negative error 80 * code otherwise. 81 */ 82 static inline int 83 intel_wakeref_put(struct drm_i915_private *i915, 84 struct intel_wakeref *wf, 85 int (*fn)(struct intel_wakeref *wf)) 86 { 87 if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex)) 88 return __intel_wakeref_put_last(i915, wf, fn); 89 90 return 0; 91 } 92 93 /** 94 * intel_wakeref_lock: Lock the wakeref (mutex) 95 * @wf: the wakeref 96 * 97 * Locks the wakeref to prevent it being acquired or released. New users 98 * can still adjust the counter, but the wakeref itself (and callback) 99 * cannot be acquired or released. 100 */ 101 static inline void 102 intel_wakeref_lock(struct intel_wakeref *wf) 103 __acquires(wf->mutex) 104 { 105 mutex_lock(&wf->mutex); 106 } 107 108 /** 109 * intel_wakeref_unlock: Unlock the wakeref 110 * @wf: the wakeref 111 * 112 * Releases a previously acquired intel_wakeref_lock(). 113 */ 114 static inline void 115 intel_wakeref_unlock(struct intel_wakeref *wf) 116 __releases(wf->mutex) 117 { 118 mutex_unlock(&wf->mutex); 119 } 120 121 /** 122 * intel_wakeref_active: Query whether the wakeref is currently held 123 * @wf: the wakeref 124 * 125 * Returns: true if the wakeref is currently held. 126 */ 127 static inline bool 128 intel_wakeref_active(struct intel_wakeref *wf) 129 { 130 return READ_ONCE(wf->wakeref); 131 } 132 133 #endif /* INTEL_WAKEREF_H */ 134