1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #ifndef INTEL_WAKEREF_H
8 #define INTEL_WAKEREF_H
9 
10 #include <linux/atomic.h>
11 #include <linux/bits.h>
12 #include <linux/lockdep.h>
13 #include <linux/mutex.h>
14 #include <linux/refcount.h>
15 #include <linux/stackdepot.h>
16 #include <linux/timer.h>
17 #include <linux/workqueue.h>
18 
19 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
20 #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
21 #else
22 #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
23 #endif
24 
25 struct intel_runtime_pm;
26 struct intel_wakeref;
27 
28 typedef depot_stack_handle_t intel_wakeref_t;
29 
30 struct intel_wakeref_ops {
31 	int (*get)(struct intel_wakeref *wf);
32 	int (*put)(struct intel_wakeref *wf);
33 };
34 
35 struct intel_wakeref {
36 	atomic_t count;
37 	struct mutex mutex;
38 
39 	intel_wakeref_t wakeref;
40 
41 	struct intel_runtime_pm *rpm;
42 	const struct intel_wakeref_ops *ops;
43 
44 	struct work_struct work;
45 };
46 
47 void __intel_wakeref_init(struct intel_wakeref *wf,
48 			  struct intel_runtime_pm *rpm,
49 			  const struct intel_wakeref_ops *ops,
50 			  struct lock_class_key *key);
51 #define intel_wakeref_init(wf, rpm, ops) do {				\
52 	static struct lock_class_key __key;				\
53 									\
54 	__intel_wakeref_init((wf), (rpm), (ops), &__key);		\
55 } while (0)
56 
57 int __intel_wakeref_get_first(struct intel_wakeref *wf);
58 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
59 
60 /**
61  * intel_wakeref_get: Acquire the wakeref
62  * @i915: the drm_i915_private device
63  * @wf: the wakeref
64  * @fn: callback for acquired the wakeref, called only on first acquire.
65  *
66  * Acquire a hold on the wakeref. The first user to do so, will acquire
67  * the runtime pm wakeref and then call the @fn underneath the wakeref
68  * mutex.
69  *
70  * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
71  * will be released and the acquisition unwound, and an error reported.
72  *
73  * Returns: 0 if the wakeref was acquired successfully, or a negative error
74  * code otherwise.
75  */
76 static inline int
77 intel_wakeref_get(struct intel_wakeref *wf)
78 {
79 	if (unlikely(!atomic_inc_not_zero(&wf->count)))
80 		return __intel_wakeref_get_first(wf);
81 
82 	return 0;
83 }
84 
85 /**
86  * intel_wakeref_get_if_in_use: Acquire the wakeref
87  * @wf: the wakeref
88  *
89  * Acquire a hold on the wakeref, but only if the wakeref is already
90  * active.
91  *
92  * Returns: true if the wakeref was acquired, false otherwise.
93  */
94 static inline bool
95 intel_wakeref_get_if_active(struct intel_wakeref *wf)
96 {
97 	return atomic_inc_not_zero(&wf->count);
98 }
99 
100 /**
101  * intel_wakeref_put_flags: Release the wakeref
102  * @wf: the wakeref
103  * @flags: control flags
104  *
105  * Release our hold on the wakeref. When there are no more users,
106  * the runtime pm wakeref will be released after the @fn callback is called
107  * underneath the wakeref mutex.
108  *
109  * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
110  * is retained and an error reported.
111  *
112  * Returns: 0 if the wakeref was released successfully, or a negative error
113  * code otherwise.
114  */
115 static inline void
116 __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
117 #define INTEL_WAKEREF_PUT_ASYNC BIT(0)
118 {
119 	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
120 	if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
121 		__intel_wakeref_put_last(wf, flags);
122 }
123 
124 static inline void
125 intel_wakeref_put(struct intel_wakeref *wf)
126 {
127 	might_sleep();
128 	__intel_wakeref_put(wf, 0);
129 }
130 
131 static inline void
132 intel_wakeref_put_async(struct intel_wakeref *wf)
133 {
134 	__intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
135 }
136 
137 /**
138  * intel_wakeref_lock: Lock the wakeref (mutex)
139  * @wf: the wakeref
140  *
141  * Locks the wakeref to prevent it being acquired or released. New users
142  * can still adjust the counter, but the wakeref itself (and callback)
143  * cannot be acquired or released.
144  */
145 static inline void
146 intel_wakeref_lock(struct intel_wakeref *wf)
147 	__acquires(wf->mutex)
148 {
149 	mutex_lock(&wf->mutex);
150 }
151 
152 /**
153  * intel_wakeref_unlock: Unlock the wakeref
154  * @wf: the wakeref
155  *
156  * Releases a previously acquired intel_wakeref_lock().
157  */
158 static inline void
159 intel_wakeref_unlock(struct intel_wakeref *wf)
160 	__releases(wf->mutex)
161 {
162 	mutex_unlock(&wf->mutex);
163 }
164 
165 /**
166  * intel_wakeref_unlock_wait: Wait until the active callback is complete
167  * @wf: the wakeref
168  *
169  * Waits for the active callback (under the @wf->mutex or another CPU) is
170  * complete.
171  */
172 static inline void
173 intel_wakeref_unlock_wait(struct intel_wakeref *wf)
174 {
175 	mutex_lock(&wf->mutex);
176 	mutex_unlock(&wf->mutex);
177 	flush_work(&wf->work);
178 }
179 
180 /**
181  * intel_wakeref_is_active: Query whether the wakeref is currently held
182  * @wf: the wakeref
183  *
184  * Returns: true if the wakeref is currently held.
185  */
186 static inline bool
187 intel_wakeref_is_active(const struct intel_wakeref *wf)
188 {
189 	return READ_ONCE(wf->wakeref);
190 }
191 
192 /**
193  * __intel_wakeref_defer_park: Defer the current park callback
194  * @wf: the wakeref
195  */
196 static inline void
197 __intel_wakeref_defer_park(struct intel_wakeref *wf)
198 {
199 	lockdep_assert_held(&wf->mutex);
200 	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
201 	atomic_set_release(&wf->count, 1);
202 }
203 
204 /**
205  * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
206  * @wf: the wakeref
207  *
208  * Wait for the earlier asynchronous release of the wakeref. Note
209  * this will wait for any third party as well, so make sure you only wait
210  * when you have control over the wakeref and trust no one else is acquiring
211  * it.
212  *
213  * Return: 0 on success, error code if killed.
214  */
215 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
216 
217 struct intel_wakeref_auto {
218 	struct intel_runtime_pm *rpm;
219 	struct timer_list timer;
220 	intel_wakeref_t wakeref;
221 	spinlock_t lock;
222 	refcount_t count;
223 };
224 
225 /**
226  * intel_wakeref_auto: Delay the runtime-pm autosuspend
227  * @wf: the wakeref
228  * @timeout: relative timeout in jiffies
229  *
230  * The runtime-pm core uses a suspend delay after the last wakeref
231  * is released before triggering runtime suspend of the device. That
232  * delay is configurable via sysfs with little regard to the device
233  * characteristics. Instead, we want to tune the autosuspend based on our
234  * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
235  * timeout.
236  *
237  * Pass @timeout = 0 to cancel a previous autosuspend by executing the
238  * suspend immediately.
239  */
240 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
241 
242 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
243 			     struct intel_runtime_pm *rpm);
244 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
245 
246 #endif /* INTEL_WAKEREF_H */
247