1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_gt_pm.h"
9 
10 #include "i915_drv.h"
11 #include "i915_globals.h"
12 
13 static void call_idle_barriers(struct intel_engine_cs *engine)
14 {
15 	struct llist_node *node, *next;
16 
17 	llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
18 		struct i915_active_request *active =
19 			container_of((struct list_head *)node,
20 				     typeof(*active), link);
21 
22 		INIT_LIST_HEAD(&active->link);
23 		RCU_INIT_POINTER(active->request, NULL);
24 
25 		active->retire(active, NULL);
26 	}
27 }
28 
29 static void i915_gem_park(struct drm_i915_private *i915)
30 {
31 	struct intel_engine_cs *engine;
32 	enum intel_engine_id id;
33 
34 	lockdep_assert_held(&i915->drm.struct_mutex);
35 
36 	for_each_engine(engine, i915, id) {
37 		call_idle_barriers(engine); /* cleanup after wedging */
38 		i915_gem_batch_pool_fini(&engine->batch_pool);
39 	}
40 
41 	i915_timelines_park(i915);
42 	i915_vma_parked(i915);
43 
44 	i915_globals_park();
45 }
46 
47 static void idle_work_handler(struct work_struct *work)
48 {
49 	struct drm_i915_private *i915 =
50 		container_of(work, typeof(*i915), gem.idle_work);
51 	bool park;
52 
53 	cancel_delayed_work_sync(&i915->gem.retire_work);
54 	mutex_lock(&i915->drm.struct_mutex);
55 
56 	intel_wakeref_lock(&i915->gt.wakeref);
57 	park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work);
58 	intel_wakeref_unlock(&i915->gt.wakeref);
59 	if (park)
60 		i915_gem_park(i915);
61 	else
62 		queue_delayed_work(i915->wq,
63 				   &i915->gem.retire_work,
64 				   round_jiffies_up_relative(HZ));
65 
66 	mutex_unlock(&i915->drm.struct_mutex);
67 }
68 
69 static void retire_work_handler(struct work_struct *work)
70 {
71 	struct drm_i915_private *i915 =
72 		container_of(work, typeof(*i915), gem.retire_work.work);
73 
74 	/* Come back later if the device is busy... */
75 	if (mutex_trylock(&i915->drm.struct_mutex)) {
76 		i915_retire_requests(i915);
77 		mutex_unlock(&i915->drm.struct_mutex);
78 	}
79 
80 	queue_delayed_work(i915->wq,
81 			   &i915->gem.retire_work,
82 			   round_jiffies_up_relative(HZ));
83 }
84 
85 static int pm_notifier(struct notifier_block *nb,
86 		       unsigned long action,
87 		       void *data)
88 {
89 	struct drm_i915_private *i915 =
90 		container_of(nb, typeof(*i915), gem.pm_notifier);
91 
92 	switch (action) {
93 	case INTEL_GT_UNPARK:
94 		i915_globals_unpark();
95 		queue_delayed_work(i915->wq,
96 				   &i915->gem.retire_work,
97 				   round_jiffies_up_relative(HZ));
98 		break;
99 
100 	case INTEL_GT_PARK:
101 		queue_work(i915->wq, &i915->gem.idle_work);
102 		break;
103 	}
104 
105 	return NOTIFY_OK;
106 }
107 
108 static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
109 {
110 	bool result = !i915_terminally_wedged(i915);
111 
112 	do {
113 		if (i915_gem_wait_for_idle(i915,
114 					   I915_WAIT_LOCKED |
115 					   I915_WAIT_FOR_IDLE_BOOST,
116 					   I915_GEM_IDLE_TIMEOUT) == -ETIME) {
117 			/* XXX hide warning from gem_eio */
118 			if (i915_modparams.reset) {
119 				dev_err(i915->drm.dev,
120 					"Failed to idle engines, declaring wedged!\n");
121 				GEM_TRACE_DUMP();
122 			}
123 
124 			/*
125 			 * Forcibly cancel outstanding work and leave
126 			 * the gpu quiet.
127 			 */
128 			i915_gem_set_wedged(i915);
129 			result = false;
130 		}
131 	} while (i915_retire_requests(i915) && result);
132 
133 	GEM_BUG_ON(i915->gt.awake);
134 	return result;
135 }
136 
137 bool i915_gem_load_power_context(struct drm_i915_private *i915)
138 {
139 	return switch_to_kernel_context_sync(i915);
140 }
141 
142 void i915_gem_suspend(struct drm_i915_private *i915)
143 {
144 	GEM_TRACE("\n");
145 
146 	intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
147 	flush_workqueue(i915->wq);
148 
149 	mutex_lock(&i915->drm.struct_mutex);
150 
151 	/*
152 	 * We have to flush all the executing contexts to main memory so
153 	 * that they can saved in the hibernation image. To ensure the last
154 	 * context image is coherent, we have to switch away from it. That
155 	 * leaves the i915->kernel_context still active when
156 	 * we actually suspend, and its image in memory may not match the GPU
157 	 * state. Fortunately, the kernel_context is disposable and we do
158 	 * not rely on its state.
159 	 */
160 	switch_to_kernel_context_sync(i915);
161 
162 	mutex_unlock(&i915->drm.struct_mutex);
163 
164 	/*
165 	 * Assert that we successfully flushed all the work and
166 	 * reset the GPU back to its idle, low power state.
167 	 */
168 	GEM_BUG_ON(i915->gt.awake);
169 	flush_work(&i915->gem.idle_work);
170 
171 	cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
172 
173 	i915_gem_drain_freed_objects(i915);
174 
175 	intel_uc_suspend(i915);
176 }
177 
178 static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
179 {
180 	return list_first_entry_or_null(list,
181 					struct drm_i915_gem_object,
182 					mm.link);
183 }
184 
185 void i915_gem_suspend_late(struct drm_i915_private *i915)
186 {
187 	struct drm_i915_gem_object *obj;
188 	struct list_head *phases[] = {
189 		&i915->mm.shrink_list,
190 		&i915->mm.purge_list,
191 		NULL
192 	}, **phase;
193 	unsigned long flags;
194 
195 	/*
196 	 * Neither the BIOS, ourselves or any other kernel
197 	 * expects the system to be in execlists mode on startup,
198 	 * so we need to reset the GPU back to legacy mode. And the only
199 	 * known way to disable logical contexts is through a GPU reset.
200 	 *
201 	 * So in order to leave the system in a known default configuration,
202 	 * always reset the GPU upon unload and suspend. Afterwards we then
203 	 * clean up the GEM state tracking, flushing off the requests and
204 	 * leaving the system in a known idle state.
205 	 *
206 	 * Note that is of the upmost importance that the GPU is idle and
207 	 * all stray writes are flushed *before* we dismantle the backing
208 	 * storage for the pinned objects.
209 	 *
210 	 * However, since we are uncertain that resetting the GPU on older
211 	 * machines is a good idea, we don't - just in case it leaves the
212 	 * machine in an unusable condition.
213 	 */
214 
215 	spin_lock_irqsave(&i915->mm.obj_lock, flags);
216 	for (phase = phases; *phase; phase++) {
217 		LIST_HEAD(keep);
218 
219 		while ((obj = first_mm_object(*phase))) {
220 			list_move_tail(&obj->mm.link, &keep);
221 
222 			/* Beware the background _i915_gem_free_objects */
223 			if (!kref_get_unless_zero(&obj->base.refcount))
224 				continue;
225 
226 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
227 
228 			i915_gem_object_lock(obj);
229 			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
230 			i915_gem_object_unlock(obj);
231 			i915_gem_object_put(obj);
232 
233 			spin_lock_irqsave(&i915->mm.obj_lock, flags);
234 		}
235 
236 		list_splice_tail(&keep, *phase);
237 	}
238 	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
239 
240 	intel_uc_sanitize(i915);
241 	i915_gem_sanitize(i915);
242 }
243 
244 void i915_gem_resume(struct drm_i915_private *i915)
245 {
246 	GEM_TRACE("\n");
247 
248 	WARN_ON(i915->gt.awake);
249 
250 	mutex_lock(&i915->drm.struct_mutex);
251 	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
252 
253 	i915_gem_restore_gtt_mappings(i915);
254 	i915_gem_restore_fences(i915);
255 
256 	/*
257 	 * As we didn't flush the kernel context before suspend, we cannot
258 	 * guarantee that the context image is complete. So let's just reset
259 	 * it and start again.
260 	 */
261 	intel_gt_resume(i915);
262 
263 	if (i915_gem_init_hw(i915))
264 		goto err_wedged;
265 
266 	intel_uc_resume(i915);
267 
268 	/* Always reload a context for powersaving. */
269 	if (!i915_gem_load_power_context(i915))
270 		goto err_wedged;
271 
272 out_unlock:
273 	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
274 	mutex_unlock(&i915->drm.struct_mutex);
275 	return;
276 
277 err_wedged:
278 	if (!i915_reset_failed(i915)) {
279 		dev_err(i915->drm.dev,
280 			"Failed to re-initialize GPU, declaring it wedged!\n");
281 		i915_gem_set_wedged(i915);
282 	}
283 	goto out_unlock;
284 }
285 
286 void i915_gem_init__pm(struct drm_i915_private *i915)
287 {
288 	INIT_WORK(&i915->gem.idle_work, idle_work_handler);
289 	INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
290 
291 	i915->gem.pm_notifier.notifier_call = pm_notifier;
292 	blocking_notifier_chain_register(&i915->gt.pm_notifications,
293 					 &i915->gem.pm_notifier);
294 }
295