1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020-2021 Intel Corporation
4  */
5 
6 #include "gt/intel_migrate.h"
7 #include "gt/intel_gpu_commands.h"
8 #include "gem/i915_gem_ttm_move.h"
9 
10 #include "i915_deps.h"
11 
12 #include "selftests/igt_spinner.h"
13 
14 static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
15 				 bool fill)
16 {
17 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
18 	unsigned int i, count = obj->base.size / sizeof(u32);
19 	enum i915_map_type map_type =
20 		i915_coherent_map_type(i915, obj, false);
21 	u32 *cur;
22 	int err = 0;
23 
24 	assert_object_held(obj);
25 	cur = i915_gem_object_pin_map(obj, map_type);
26 	if (IS_ERR(cur))
27 		return PTR_ERR(cur);
28 
29 	if (fill)
30 		for (i = 0; i < count; ++i)
31 			*cur++ = i;
32 	else
33 		for (i = 0; i < count; ++i)
34 			if (*cur++ != i) {
35 				pr_err("Object content mismatch at location %d of %d\n", i, count);
36 				err = -EINVAL;
37 				break;
38 			}
39 
40 	i915_gem_object_unpin_map(obj);
41 
42 	return err;
43 }
44 
45 static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
46 			      enum intel_region_id dst)
47 {
48 	struct drm_i915_private *i915 = gt->i915;
49 	struct intel_memory_region *src_mr = i915->mm.regions[src];
50 	struct intel_memory_region *dst_mr = i915->mm.regions[dst];
51 	struct drm_i915_gem_object *obj;
52 	struct i915_gem_ww_ctx ww;
53 	int err = 0;
54 
55 	GEM_BUG_ON(!src_mr);
56 	GEM_BUG_ON(!dst_mr);
57 
58 	/* Switch object backing-store on create */
59 	obj = i915_gem_object_create_region(src_mr, dst_mr->min_page_size, 0, 0);
60 	if (IS_ERR(obj))
61 		return PTR_ERR(obj);
62 
63 	for_i915_gem_ww(&ww, err, true) {
64 		err = i915_gem_object_lock(obj, &ww);
65 		if (err)
66 			continue;
67 
68 		err = igt_fill_check_buffer(obj, true);
69 		if (err)
70 			continue;
71 
72 		err = i915_gem_object_migrate(obj, &ww, dst);
73 		if (err)
74 			continue;
75 
76 		err = i915_gem_object_pin_pages(obj);
77 		if (err)
78 			continue;
79 
80 		if (i915_gem_object_can_migrate(obj, src))
81 			err = -EINVAL;
82 
83 		i915_gem_object_unpin_pages(obj);
84 		err = i915_gem_object_wait_migration(obj, true);
85 		if (err)
86 			continue;
87 
88 		err = igt_fill_check_buffer(obj, false);
89 	}
90 	i915_gem_object_put(obj);
91 
92 	return err;
93 }
94 
95 static int igt_smem_create_migrate(void *arg)
96 {
97 	return igt_create_migrate(arg, INTEL_REGION_LMEM_0, INTEL_REGION_SMEM);
98 }
99 
100 static int igt_lmem_create_migrate(void *arg)
101 {
102 	return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM_0);
103 }
104 
105 static int igt_same_create_migrate(void *arg)
106 {
107 	return igt_create_migrate(arg, INTEL_REGION_LMEM_0, INTEL_REGION_LMEM_0);
108 }
109 
110 static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
111 				  struct drm_i915_gem_object *obj,
112 				  struct i915_vma *vma)
113 {
114 	int err;
115 
116 	err = i915_gem_object_lock(obj, ww);
117 	if (err)
118 		return err;
119 
120 	if (vma) {
121 		err = i915_vma_pin_ww(vma, ww, obj->base.size, 0,
122 				      0UL | PIN_OFFSET_FIXED |
123 				      PIN_USER);
124 		if (err) {
125 			if (err != -EINTR && err != ERESTARTSYS &&
126 			    err != -EDEADLK)
127 				pr_err("Failed to pin vma.\n");
128 			return err;
129 		}
130 
131 		i915_vma_unpin(vma);
132 	}
133 
134 	/*
135 	 * Migration will implicitly unbind (asynchronously) any bound
136 	 * vmas.
137 	 */
138 	if (i915_gem_object_is_lmem(obj)) {
139 		err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM);
140 		if (err) {
141 			pr_err("Object failed migration to smem\n");
142 			if (err)
143 				return err;
144 		}
145 
146 		if (i915_gem_object_is_lmem(obj)) {
147 			pr_err("object still backed by lmem\n");
148 			err = -EINVAL;
149 		}
150 
151 		if (!i915_gem_object_has_struct_page(obj)) {
152 			pr_err("object not backed by struct page\n");
153 			err = -EINVAL;
154 		}
155 
156 	} else {
157 		err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM_0);
158 		if (err) {
159 			pr_err("Object failed migration to lmem\n");
160 			if (err)
161 				return err;
162 		}
163 
164 		if (i915_gem_object_has_struct_page(obj)) {
165 			pr_err("object still backed by struct page\n");
166 			err = -EINVAL;
167 		}
168 
169 		if (!i915_gem_object_is_lmem(obj)) {
170 			pr_err("object not backed by lmem\n");
171 			err = -EINVAL;
172 		}
173 	}
174 
175 	return err;
176 }
177 
178 static int __igt_lmem_pages_migrate(struct intel_gt *gt,
179 				    struct i915_address_space *vm,
180 				    struct i915_deps *deps,
181 				    struct igt_spinner *spin,
182 				    struct dma_fence *spin_fence)
183 {
184 	struct drm_i915_private *i915 = gt->i915;
185 	struct drm_i915_gem_object *obj;
186 	struct i915_vma *vma = NULL;
187 	struct i915_gem_ww_ctx ww;
188 	struct i915_request *rq;
189 	int err;
190 	int i;
191 
192 	/* From LMEM to shmem and back again */
193 
194 	obj = i915_gem_object_create_lmem(i915, SZ_2M, 0);
195 	if (IS_ERR(obj))
196 		return PTR_ERR(obj);
197 
198 	if (vm) {
199 		vma = i915_vma_instance(obj, vm, NULL);
200 		if (IS_ERR(vma)) {
201 			err = PTR_ERR(vma);
202 			goto out_put;
203 		}
204 	}
205 
206 	/* Initial GPU fill, sync, CPU initialization. */
207 	for_i915_gem_ww(&ww, err, true) {
208 		err = i915_gem_object_lock(obj, &ww);
209 		if (err)
210 			continue;
211 
212 		err = ____i915_gem_object_get_pages(obj);
213 		if (err)
214 			continue;
215 
216 		err = intel_migrate_clear(&gt->migrate, &ww, deps,
217 					  obj->mm.pages->sgl, obj->cache_level,
218 					  i915_gem_object_is_lmem(obj),
219 					  0xdeadbeaf, &rq);
220 		if (rq) {
221 			err = dma_resv_reserve_fences(obj->base.resv, 1);
222 			if (!err)
223 				dma_resv_add_fence(obj->base.resv, &rq->fence,
224 						   DMA_RESV_USAGE_KERNEL);
225 			i915_request_put(rq);
226 		}
227 		if (err)
228 			continue;
229 
230 		if (!vma) {
231 			err = igt_fill_check_buffer(obj, true);
232 			if (err)
233 				continue;
234 		}
235 	}
236 	if (err)
237 		goto out_put;
238 
239 	/*
240 	 * Migrate to and from smem without explicitly syncing.
241 	 * Finalize with data in smem for fast readout.
242 	 */
243 	for (i = 1; i <= 5; ++i) {
244 		for_i915_gem_ww(&ww, err, true)
245 			err = lmem_pages_migrate_one(&ww, obj, vma);
246 		if (err)
247 			goto out_put;
248 	}
249 
250 	err = i915_gem_object_lock_interruptible(obj, NULL);
251 	if (err)
252 		goto out_put;
253 
254 	if (spin) {
255 		if (dma_fence_is_signaled(spin_fence)) {
256 			pr_err("Spinner was terminated by hangcheck.\n");
257 			err = -EBUSY;
258 			goto out_unlock;
259 		}
260 		igt_spinner_end(spin);
261 	}
262 
263 	/* Finally sync migration and check content. */
264 	err = i915_gem_object_wait_migration(obj, true);
265 	if (err)
266 		goto out_unlock;
267 
268 	if (vma) {
269 		err = i915_vma_wait_for_bind(vma);
270 		if (err)
271 			goto out_unlock;
272 	} else {
273 		err = igt_fill_check_buffer(obj, false);
274 	}
275 
276 out_unlock:
277 	i915_gem_object_unlock(obj);
278 out_put:
279 	i915_gem_object_put(obj);
280 
281 	return err;
282 }
283 
284 static int igt_lmem_pages_failsafe_migrate(void *arg)
285 {
286 	int fail_gpu, fail_alloc, ret;
287 	struct intel_gt *gt = arg;
288 
289 	for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
290 		for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) {
291 			pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
292 				fail_gpu, fail_alloc);
293 			i915_ttm_migrate_set_failure_modes(fail_gpu,
294 							   fail_alloc);
295 			ret = __igt_lmem_pages_migrate(gt, NULL, NULL, NULL, NULL);
296 			if (ret)
297 				goto out_err;
298 		}
299 	}
300 
301 out_err:
302 	i915_ttm_migrate_set_failure_modes(false, false);
303 	return ret;
304 }
305 
306 /*
307  * This subtest tests that unbinding at migration is indeed performed
308  * async. We launch a spinner and a number of migrations depending on
309  * that spinner to have terminated. Before each migration we bind a
310  * vma, which should then be async unbound by the migration operation.
311  * If we are able to schedule migrations without blocking while the
312  * spinner is still running, those unbinds are indeed async and non-
313  * blocking.
314  *
315  * Note that each async bind operation is awaiting the previous migration
316  * due to the moving fence resulting from the migration.
317  */
318 static int igt_async_migrate(struct intel_gt *gt)
319 {
320 	struct intel_engine_cs *engine;
321 	enum intel_engine_id id;
322 	struct i915_ppgtt *ppgtt;
323 	struct igt_spinner spin;
324 	int err;
325 
326 	ppgtt = i915_ppgtt_create(gt, 0);
327 	if (IS_ERR(ppgtt))
328 		return PTR_ERR(ppgtt);
329 
330 	if (igt_spinner_init(&spin, gt)) {
331 		err = -ENOMEM;
332 		goto out_spin;
333 	}
334 
335 	for_each_engine(engine, gt, id) {
336 		struct ttm_operation_ctx ctx = {
337 			.interruptible = true
338 		};
339 		struct dma_fence *spin_fence;
340 		struct intel_context *ce;
341 		struct i915_request *rq;
342 		struct i915_deps deps;
343 
344 		ce = intel_context_create(engine);
345 		if (IS_ERR(ce)) {
346 			err = PTR_ERR(ce);
347 			goto out_ce;
348 		}
349 
350 		/*
351 		 * Use MI_NOOP, making the spinner non-preemptible. If there
352 		 * is a code path where we fail async operation due to the
353 		 * running spinner, we will block and fail to end the
354 		 * spinner resulting in a deadlock. But with a non-
355 		 * preemptible spinner, hangcheck will terminate the spinner
356 		 * for us, and we will later detect that and fail the test.
357 		 */
358 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
359 		intel_context_put(ce);
360 		if (IS_ERR(rq)) {
361 			err = PTR_ERR(rq);
362 			goto out_ce;
363 		}
364 
365 		i915_deps_init(&deps, GFP_KERNEL);
366 		err = i915_deps_add_dependency(&deps, &rq->fence, &ctx);
367 		spin_fence = dma_fence_get(&rq->fence);
368 		i915_request_add(rq);
369 		if (err)
370 			goto out_ce;
371 
372 		err = __igt_lmem_pages_migrate(gt, &ppgtt->vm, &deps, &spin,
373 					       spin_fence);
374 		i915_deps_fini(&deps);
375 		dma_fence_put(spin_fence);
376 		if (err)
377 			goto out_ce;
378 	}
379 
380 out_ce:
381 	igt_spinner_fini(&spin);
382 out_spin:
383 	i915_vm_put(&ppgtt->vm);
384 
385 	return err;
386 }
387 
388 /*
389  * Setting ASYNC_FAIL_ALLOC to 2 will simulate memory allocation failure while
390  * arming the migration error check and block async migration. This
391  * will cause us to deadlock and hangcheck will terminate the spinner
392  * causing the test to fail.
393  */
394 #define ASYNC_FAIL_ALLOC 1
395 static int igt_lmem_async_migrate(void *arg)
396 {
397 	int fail_gpu, fail_alloc, ret;
398 	struct intel_gt *gt = arg;
399 
400 	for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
401 		for (fail_alloc = 0; fail_alloc < ASYNC_FAIL_ALLOC; ++fail_alloc) {
402 			pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
403 				fail_gpu, fail_alloc);
404 			i915_ttm_migrate_set_failure_modes(fail_gpu,
405 							   fail_alloc);
406 			ret = igt_async_migrate(gt);
407 			if (ret)
408 				goto out_err;
409 		}
410 	}
411 
412 out_err:
413 	i915_ttm_migrate_set_failure_modes(false, false);
414 	return ret;
415 }
416 
417 int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
418 {
419 	static const struct i915_subtest tests[] = {
420 		SUBTEST(igt_smem_create_migrate),
421 		SUBTEST(igt_lmem_create_migrate),
422 		SUBTEST(igt_same_create_migrate),
423 		SUBTEST(igt_lmem_pages_failsafe_migrate),
424 		SUBTEST(igt_lmem_async_migrate),
425 	};
426 
427 	if (!HAS_LMEM(i915))
428 		return 0;
429 
430 	return intel_gt_live_subtests(tests, to_gt(i915));
431 }
432