1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_gt.h"
9 #include "gt/intel_gt_pm.h"
10 
11 #include "i915_drv.h"
12 #include "i915_globals.h"
13 
14 static void call_idle_barriers(struct intel_engine_cs *engine)
15 {
16 	struct llist_node *node, *next;
17 
18 	llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
19 		struct i915_active_request *active =
20 			container_of((struct list_head *)node,
21 				     typeof(*active), link);
22 
23 		INIT_LIST_HEAD(&active->link);
24 		RCU_INIT_POINTER(active->request, NULL);
25 
26 		active->retire(active, NULL);
27 	}
28 }
29 
30 static void i915_gem_park(struct drm_i915_private *i915)
31 {
32 	struct intel_engine_cs *engine;
33 	enum intel_engine_id id;
34 
35 	lockdep_assert_held(&i915->drm.struct_mutex);
36 
37 	for_each_engine(engine, i915, id) {
38 		call_idle_barriers(engine); /* cleanup after wedging */
39 		i915_gem_batch_pool_fini(&engine->batch_pool);
40 	}
41 
42 	intel_timelines_park(i915);
43 	i915_vma_parked(i915);
44 
45 	i915_globals_park();
46 }
47 
48 static void idle_work_handler(struct work_struct *work)
49 {
50 	struct drm_i915_private *i915 =
51 		container_of(work, typeof(*i915), gem.idle_work);
52 	bool park;
53 
54 	cancel_delayed_work_sync(&i915->gem.retire_work);
55 	mutex_lock(&i915->drm.struct_mutex);
56 
57 	intel_wakeref_lock(&i915->gt.wakeref);
58 	park = (!intel_wakeref_is_active(&i915->gt.wakeref) &&
59 		!work_pending(work));
60 	intel_wakeref_unlock(&i915->gt.wakeref);
61 	if (park)
62 		i915_gem_park(i915);
63 	else
64 		queue_delayed_work(i915->wq,
65 				   &i915->gem.retire_work,
66 				   round_jiffies_up_relative(HZ));
67 
68 	mutex_unlock(&i915->drm.struct_mutex);
69 }
70 
71 static void retire_work_handler(struct work_struct *work)
72 {
73 	struct drm_i915_private *i915 =
74 		container_of(work, typeof(*i915), gem.retire_work.work);
75 
76 	/* Come back later if the device is busy... */
77 	if (mutex_trylock(&i915->drm.struct_mutex)) {
78 		i915_retire_requests(i915);
79 		mutex_unlock(&i915->drm.struct_mutex);
80 	}
81 
82 	queue_delayed_work(i915->wq,
83 			   &i915->gem.retire_work,
84 			   round_jiffies_up_relative(HZ));
85 }
86 
87 static int pm_notifier(struct notifier_block *nb,
88 		       unsigned long action,
89 		       void *data)
90 {
91 	struct drm_i915_private *i915 =
92 		container_of(nb, typeof(*i915), gem.pm_notifier);
93 
94 	switch (action) {
95 	case INTEL_GT_UNPARK:
96 		i915_globals_unpark();
97 		queue_delayed_work(i915->wq,
98 				   &i915->gem.retire_work,
99 				   round_jiffies_up_relative(HZ));
100 		break;
101 
102 	case INTEL_GT_PARK:
103 		queue_work(i915->wq, &i915->gem.idle_work);
104 		break;
105 	}
106 
107 	return NOTIFY_OK;
108 }
109 
110 static bool switch_to_kernel_context_sync(struct intel_gt *gt)
111 {
112 	bool result = !intel_gt_is_wedged(gt);
113 
114 	do {
115 		if (i915_gem_wait_for_idle(gt->i915,
116 					   I915_WAIT_LOCKED |
117 					   I915_WAIT_FOR_IDLE_BOOST,
118 					   I915_GEM_IDLE_TIMEOUT) == -ETIME) {
119 			/* XXX hide warning from gem_eio */
120 			if (i915_modparams.reset) {
121 				dev_err(gt->i915->drm.dev,
122 					"Failed to idle engines, declaring wedged!\n");
123 				GEM_TRACE_DUMP();
124 			}
125 
126 			/*
127 			 * Forcibly cancel outstanding work and leave
128 			 * the gpu quiet.
129 			 */
130 			intel_gt_set_wedged(gt);
131 			result = false;
132 		}
133 	} while (i915_retire_requests(gt->i915) && result);
134 
135 	GEM_BUG_ON(gt->awake);
136 	return result;
137 }
138 
139 bool i915_gem_load_power_context(struct drm_i915_private *i915)
140 {
141 	return switch_to_kernel_context_sync(&i915->gt);
142 }
143 
144 void i915_gem_suspend(struct drm_i915_private *i915)
145 {
146 	GEM_TRACE("\n");
147 
148 	intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
149 	flush_workqueue(i915->wq);
150 
151 	mutex_lock(&i915->drm.struct_mutex);
152 
153 	/*
154 	 * We have to flush all the executing contexts to main memory so
155 	 * that they can saved in the hibernation image. To ensure the last
156 	 * context image is coherent, we have to switch away from it. That
157 	 * leaves the i915->kernel_context still active when
158 	 * we actually suspend, and its image in memory may not match the GPU
159 	 * state. Fortunately, the kernel_context is disposable and we do
160 	 * not rely on its state.
161 	 */
162 	switch_to_kernel_context_sync(&i915->gt);
163 
164 	mutex_unlock(&i915->drm.struct_mutex);
165 
166 	/*
167 	 * Assert that we successfully flushed all the work and
168 	 * reset the GPU back to its idle, low power state.
169 	 */
170 	GEM_BUG_ON(i915->gt.awake);
171 	flush_work(&i915->gem.idle_work);
172 
173 	cancel_delayed_work_sync(&i915->gt.hangcheck.work);
174 
175 	i915_gem_drain_freed_objects(i915);
176 
177 	intel_uc_suspend(&i915->gt.uc);
178 }
179 
180 static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
181 {
182 	return list_first_entry_or_null(list,
183 					struct drm_i915_gem_object,
184 					mm.link);
185 }
186 
187 void i915_gem_suspend_late(struct drm_i915_private *i915)
188 {
189 	struct drm_i915_gem_object *obj;
190 	struct list_head *phases[] = {
191 		&i915->mm.shrink_list,
192 		&i915->mm.purge_list,
193 		NULL
194 	}, **phase;
195 	unsigned long flags;
196 
197 	/*
198 	 * Neither the BIOS, ourselves or any other kernel
199 	 * expects the system to be in execlists mode on startup,
200 	 * so we need to reset the GPU back to legacy mode. And the only
201 	 * known way to disable logical contexts is through a GPU reset.
202 	 *
203 	 * So in order to leave the system in a known default configuration,
204 	 * always reset the GPU upon unload and suspend. Afterwards we then
205 	 * clean up the GEM state tracking, flushing off the requests and
206 	 * leaving the system in a known idle state.
207 	 *
208 	 * Note that is of the upmost importance that the GPU is idle and
209 	 * all stray writes are flushed *before* we dismantle the backing
210 	 * storage for the pinned objects.
211 	 *
212 	 * However, since we are uncertain that resetting the GPU on older
213 	 * machines is a good idea, we don't - just in case it leaves the
214 	 * machine in an unusable condition.
215 	 */
216 
217 	spin_lock_irqsave(&i915->mm.obj_lock, flags);
218 	for (phase = phases; *phase; phase++) {
219 		LIST_HEAD(keep);
220 
221 		while ((obj = first_mm_object(*phase))) {
222 			list_move_tail(&obj->mm.link, &keep);
223 
224 			/* Beware the background _i915_gem_free_objects */
225 			if (!kref_get_unless_zero(&obj->base.refcount))
226 				continue;
227 
228 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
229 
230 			i915_gem_object_lock(obj);
231 			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
232 			i915_gem_object_unlock(obj);
233 			i915_gem_object_put(obj);
234 
235 			spin_lock_irqsave(&i915->mm.obj_lock, flags);
236 		}
237 
238 		list_splice_tail(&keep, *phase);
239 	}
240 	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
241 
242 	i915_gem_sanitize(i915);
243 }
244 
245 void i915_gem_resume(struct drm_i915_private *i915)
246 {
247 	GEM_TRACE("\n");
248 
249 	WARN_ON(i915->gt.awake);
250 
251 	mutex_lock(&i915->drm.struct_mutex);
252 	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
253 
254 	i915_gem_restore_gtt_mappings(i915);
255 	i915_gem_restore_fences(i915);
256 
257 	if (i915_gem_init_hw(i915))
258 		goto err_wedged;
259 
260 	/*
261 	 * As we didn't flush the kernel context before suspend, we cannot
262 	 * guarantee that the context image is complete. So let's just reset
263 	 * it and start again.
264 	 */
265 	if (intel_gt_resume(&i915->gt))
266 		goto err_wedged;
267 
268 	intel_uc_resume(&i915->gt.uc);
269 
270 	/* Always reload a context for powersaving. */
271 	if (!i915_gem_load_power_context(i915))
272 		goto err_wedged;
273 
274 out_unlock:
275 	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
276 	mutex_unlock(&i915->drm.struct_mutex);
277 	return;
278 
279 err_wedged:
280 	if (!intel_gt_is_wedged(&i915->gt)) {
281 		dev_err(i915->drm.dev,
282 			"Failed to re-initialize GPU, declaring it wedged!\n");
283 		intel_gt_set_wedged(&i915->gt);
284 	}
285 	goto out_unlock;
286 }
287 
288 void i915_gem_init__pm(struct drm_i915_private *i915)
289 {
290 	INIT_WORK(&i915->gem.idle_work, idle_work_handler);
291 	INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
292 
293 	i915->gem.pm_notifier.notifier_call = pm_notifier;
294 	blocking_notifier_chain_register(&i915->gt.pm_notifications,
295 					 &i915->gem.pm_notifier);
296 }
297