1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 #include "gem/i915_gem_ttm_pm.h"
9 #include "gt/intel_gt.h"
10 #include "gt/intel_gt_pm.h"
11 #include "gt/intel_gt_requests.h"
12 
13 #include "i915_drv.h"
14 
15 #if defined(CONFIG_X86)
16 #include <asm/smp.h>
17 #else
18 #define wbinvd_on_all_cpus() \
19 	pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
20 #endif
21 
22 void i915_gem_suspend(struct drm_i915_private *i915)
23 {
24 	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
25 
26 	intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
27 	flush_workqueue(i915->wq);
28 
29 	/*
30 	 * We have to flush all the executing contexts to main memory so
31 	 * that they can saved in the hibernation image. To ensure the last
32 	 * context image is coherent, we have to switch away from it. That
33 	 * leaves the i915->kernel_context still active when
34 	 * we actually suspend, and its image in memory may not match the GPU
35 	 * state. Fortunately, the kernel_context is disposable and we do
36 	 * not rely on its state.
37 	 */
38 	intel_gt_suspend_prepare(&i915->gt);
39 
40 	i915_gem_drain_freed_objects(i915);
41 }
42 
43 static int lmem_restore(struct drm_i915_private *i915, u32 flags)
44 {
45 	struct intel_memory_region *mr;
46 	int ret = 0, id;
47 
48 	for_each_memory_region(mr, i915, id) {
49 		if (mr->type == INTEL_MEMORY_LOCAL) {
50 			ret = i915_ttm_restore_region(mr, flags);
51 			if (ret)
52 				break;
53 		}
54 	}
55 
56 	return ret;
57 }
58 
59 static int lmem_suspend(struct drm_i915_private *i915, u32 flags)
60 {
61 	struct intel_memory_region *mr;
62 	int ret = 0, id;
63 
64 	for_each_memory_region(mr, i915, id) {
65 		if (mr->type == INTEL_MEMORY_LOCAL) {
66 			ret = i915_ttm_backup_region(mr, flags);
67 			if (ret)
68 				break;
69 		}
70 	}
71 
72 	return ret;
73 }
74 
75 static void lmem_recover(struct drm_i915_private *i915)
76 {
77 	struct intel_memory_region *mr;
78 	int id;
79 
80 	for_each_memory_region(mr, i915, id)
81 		if (mr->type == INTEL_MEMORY_LOCAL)
82 			i915_ttm_recover_region(mr);
83 }
84 
85 int i915_gem_backup_suspend(struct drm_i915_private *i915)
86 {
87 	int ret;
88 
89 	/* Opportunistically try to evict unpinned objects */
90 	ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU);
91 	if (ret)
92 		goto out_recover;
93 
94 	i915_gem_suspend(i915);
95 
96 	/*
97 	 * More objects may have become unpinned as requests were
98 	 * retired. Now try to evict again. The gt may be wedged here
99 	 * in which case we automatically fall back to memcpy.
100 	 * We allow also backing up pinned objects that have not been
101 	 * marked for early recover, and that may contain, for example,
102 	 * page-tables for the migrate context.
103 	 */
104 	ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU |
105 			   I915_TTM_BACKUP_PINNED);
106 	if (ret)
107 		goto out_recover;
108 
109 	/*
110 	 * Remaining objects are backed up using memcpy once we've stopped
111 	 * using the migrate context.
112 	 */
113 	ret = lmem_suspend(i915, I915_TTM_BACKUP_PINNED);
114 	if (ret)
115 		goto out_recover;
116 
117 	return 0;
118 
119 out_recover:
120 	lmem_recover(i915);
121 
122 	return ret;
123 }
124 
125 void i915_gem_suspend_late(struct drm_i915_private *i915)
126 {
127 	struct drm_i915_gem_object *obj;
128 	struct list_head *phases[] = {
129 		&i915->mm.shrink_list,
130 		&i915->mm.purge_list,
131 		NULL
132 	}, **phase;
133 	unsigned long flags;
134 	bool flush = false;
135 
136 	/*
137 	 * Neither the BIOS, ourselves or any other kernel
138 	 * expects the system to be in execlists mode on startup,
139 	 * so we need to reset the GPU back to legacy mode. And the only
140 	 * known way to disable logical contexts is through a GPU reset.
141 	 *
142 	 * So in order to leave the system in a known default configuration,
143 	 * always reset the GPU upon unload and suspend. Afterwards we then
144 	 * clean up the GEM state tracking, flushing off the requests and
145 	 * leaving the system in a known idle state.
146 	 *
147 	 * Note that is of the upmost importance that the GPU is idle and
148 	 * all stray writes are flushed *before* we dismantle the backing
149 	 * storage for the pinned objects.
150 	 *
151 	 * However, since we are uncertain that resetting the GPU on older
152 	 * machines is a good idea, we don't - just in case it leaves the
153 	 * machine in an unusable condition.
154 	 */
155 
156 	intel_gt_suspend_late(&i915->gt);
157 
158 	spin_lock_irqsave(&i915->mm.obj_lock, flags);
159 	for (phase = phases; *phase; phase++) {
160 		list_for_each_entry(obj, *phase, mm.link) {
161 			if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
162 				flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
163 			__start_cpu_write(obj); /* presume auto-hibernate */
164 		}
165 	}
166 	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
167 	if (flush)
168 		wbinvd_on_all_cpus();
169 }
170 
171 int i915_gem_freeze(struct drm_i915_private *i915)
172 {
173 	/* Discard all purgeable objects, let userspace recover those as
174 	 * required after resuming.
175 	 */
176 	i915_gem_shrink_all(i915);
177 
178 	return 0;
179 }
180 
181 int i915_gem_freeze_late(struct drm_i915_private *i915)
182 {
183 	struct drm_i915_gem_object *obj;
184 	intel_wakeref_t wakeref;
185 
186 	/*
187 	 * Called just before we write the hibernation image.
188 	 *
189 	 * We need to update the domain tracking to reflect that the CPU
190 	 * will be accessing all the pages to create and restore from the
191 	 * hibernation, and so upon restoration those pages will be in the
192 	 * CPU domain.
193 	 *
194 	 * To make sure the hibernation image contains the latest state,
195 	 * we update that state just before writing out the image.
196 	 *
197 	 * To try and reduce the hibernation image, we manually shrink
198 	 * the objects as well, see i915_gem_freeze()
199 	 */
200 
201 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
202 		i915_gem_shrink(NULL, i915, -1UL, NULL, ~0);
203 	i915_gem_drain_freed_objects(i915);
204 
205 	wbinvd_on_all_cpus();
206 	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
207 		__start_cpu_write(obj);
208 
209 	return 0;
210 }
211 
212 void i915_gem_resume(struct drm_i915_private *i915)
213 {
214 	int ret;
215 
216 	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
217 
218 	ret = lmem_restore(i915, 0);
219 	GEM_WARN_ON(ret);
220 
221 	/*
222 	 * As we didn't flush the kernel context before suspend, we cannot
223 	 * guarantee that the context image is complete. So let's just reset
224 	 * it and start again.
225 	 */
226 	intel_gt_resume(&i915->gt);
227 
228 	ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU);
229 	GEM_WARN_ON(ret);
230 }
231