1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020-2021 Intel Corporation
4  */
5 
6 #include "gt/intel_migrate.h"
7 #include "gt/intel_gpu_commands.h"
8 #include "gem/i915_gem_ttm_move.h"
9 
10 #include "i915_deps.h"
11 
12 #include "selftests/igt_spinner.h"
13 
14 static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
15 				 bool fill)
16 {
17 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
18 	unsigned int i, count = obj->base.size / sizeof(u32);
19 	enum i915_map_type map_type =
20 		i915_coherent_map_type(i915, obj, false);
21 	u32 *cur;
22 	int err = 0;
23 
24 	assert_object_held(obj);
25 	cur = i915_gem_object_pin_map(obj, map_type);
26 	if (IS_ERR(cur))
27 		return PTR_ERR(cur);
28 
29 	if (fill)
30 		for (i = 0; i < count; ++i)
31 			*cur++ = i;
32 	else
33 		for (i = 0; i < count; ++i)
34 			if (*cur++ != i) {
35 				pr_err("Object content mismatch at location %d of %d\n", i, count);
36 				err = -EINVAL;
37 				break;
38 			}
39 
40 	i915_gem_object_unpin_map(obj);
41 
42 	return err;
43 }
44 
45 static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
46 			      enum intel_region_id dst)
47 {
48 	struct drm_i915_private *i915 = gt->i915;
49 	struct intel_memory_region *src_mr = i915->mm.regions[src];
50 	struct drm_i915_gem_object *obj;
51 	struct i915_gem_ww_ctx ww;
52 	int err = 0;
53 
54 	GEM_BUG_ON(!src_mr);
55 
56 	/* Switch object backing-store on create */
57 	obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0, 0);
58 	if (IS_ERR(obj))
59 		return PTR_ERR(obj);
60 
61 	for_i915_gem_ww(&ww, err, true) {
62 		err = i915_gem_object_lock(obj, &ww);
63 		if (err)
64 			continue;
65 
66 		err = igt_fill_check_buffer(obj, true);
67 		if (err)
68 			continue;
69 
70 		err = i915_gem_object_migrate(obj, &ww, dst);
71 		if (err)
72 			continue;
73 
74 		err = i915_gem_object_pin_pages(obj);
75 		if (err)
76 			continue;
77 
78 		if (i915_gem_object_can_migrate(obj, src))
79 			err = -EINVAL;
80 
81 		i915_gem_object_unpin_pages(obj);
82 		err = i915_gem_object_wait_migration(obj, true);
83 		if (err)
84 			continue;
85 
86 		err = igt_fill_check_buffer(obj, false);
87 	}
88 	i915_gem_object_put(obj);
89 
90 	return err;
91 }
92 
93 static int igt_smem_create_migrate(void *arg)
94 {
95 	return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_SMEM);
96 }
97 
98 static int igt_lmem_create_migrate(void *arg)
99 {
100 	return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM);
101 }
102 
103 static int igt_same_create_migrate(void *arg)
104 {
105 	return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_LMEM);
106 }
107 
108 static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
109 				  struct drm_i915_gem_object *obj,
110 				  struct i915_vma *vma)
111 {
112 	int err;
113 
114 	err = i915_gem_object_lock(obj, ww);
115 	if (err)
116 		return err;
117 
118 	if (vma) {
119 		err = i915_vma_pin_ww(vma, ww, obj->base.size, 0,
120 				      0UL | PIN_OFFSET_FIXED |
121 				      PIN_USER);
122 		if (err) {
123 			if (err != -EINTR && err != ERESTARTSYS &&
124 			    err != -EDEADLK)
125 				pr_err("Failed to pin vma.\n");
126 			return err;
127 		}
128 
129 		i915_vma_unpin(vma);
130 	}
131 
132 	/*
133 	 * Migration will implicitly unbind (asynchronously) any bound
134 	 * vmas.
135 	 */
136 	if (i915_gem_object_is_lmem(obj)) {
137 		err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM);
138 		if (err) {
139 			pr_err("Object failed migration to smem\n");
140 			if (err)
141 				return err;
142 		}
143 
144 		if (i915_gem_object_is_lmem(obj)) {
145 			pr_err("object still backed by lmem\n");
146 			err = -EINVAL;
147 		}
148 
149 		if (!i915_gem_object_has_struct_page(obj)) {
150 			pr_err("object not backed by struct page\n");
151 			err = -EINVAL;
152 		}
153 
154 	} else {
155 		err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM);
156 		if (err) {
157 			pr_err("Object failed migration to lmem\n");
158 			if (err)
159 				return err;
160 		}
161 
162 		if (i915_gem_object_has_struct_page(obj)) {
163 			pr_err("object still backed by struct page\n");
164 			err = -EINVAL;
165 		}
166 
167 		if (!i915_gem_object_is_lmem(obj)) {
168 			pr_err("object not backed by lmem\n");
169 			err = -EINVAL;
170 		}
171 	}
172 
173 	return err;
174 }
175 
176 static int __igt_lmem_pages_migrate(struct intel_gt *gt,
177 				    struct i915_address_space *vm,
178 				    struct i915_deps *deps,
179 				    struct igt_spinner *spin,
180 				    struct dma_fence *spin_fence)
181 {
182 	struct drm_i915_private *i915 = gt->i915;
183 	struct drm_i915_gem_object *obj;
184 	struct i915_vma *vma = NULL;
185 	struct i915_gem_ww_ctx ww;
186 	struct i915_request *rq;
187 	int err;
188 	int i;
189 
190 	/* From LMEM to shmem and back again */
191 
192 	obj = i915_gem_object_create_lmem(i915, SZ_2M, 0);
193 	if (IS_ERR(obj))
194 		return PTR_ERR(obj);
195 
196 	if (vm) {
197 		vma = i915_vma_instance(obj, vm, NULL);
198 		if (IS_ERR(vma)) {
199 			err = PTR_ERR(vma);
200 			goto out_put;
201 		}
202 	}
203 
204 	/* Initial GPU fill, sync, CPU initialization. */
205 	for_i915_gem_ww(&ww, err, true) {
206 		err = i915_gem_object_lock(obj, &ww);
207 		if (err)
208 			continue;
209 
210 		err = ____i915_gem_object_get_pages(obj);
211 		if (err)
212 			continue;
213 
214 		err = intel_migrate_clear(&gt->migrate, &ww, deps,
215 					  obj->mm.pages->sgl, obj->cache_level,
216 					  i915_gem_object_is_lmem(obj),
217 					  0xdeadbeaf, &rq);
218 		if (rq) {
219 			err = dma_resv_reserve_fences(obj->base.resv, 1);
220 			if (!err)
221 				dma_resv_add_fence(obj->base.resv, &rq->fence,
222 						   DMA_RESV_USAGE_KERNEL);
223 			i915_request_put(rq);
224 		}
225 		if (err)
226 			continue;
227 
228 		if (!vma) {
229 			err = igt_fill_check_buffer(obj, true);
230 			if (err)
231 				continue;
232 		}
233 	}
234 	if (err)
235 		goto out_put;
236 
237 	/*
238 	 * Migrate to and from smem without explicitly syncing.
239 	 * Finalize with data in smem for fast readout.
240 	 */
241 	for (i = 1; i <= 5; ++i) {
242 		for_i915_gem_ww(&ww, err, true)
243 			err = lmem_pages_migrate_one(&ww, obj, vma);
244 		if (err)
245 			goto out_put;
246 	}
247 
248 	err = i915_gem_object_lock_interruptible(obj, NULL);
249 	if (err)
250 		goto out_put;
251 
252 	if (spin) {
253 		if (dma_fence_is_signaled(spin_fence)) {
254 			pr_err("Spinner was terminated by hangcheck.\n");
255 			err = -EBUSY;
256 			goto out_unlock;
257 		}
258 		igt_spinner_end(spin);
259 	}
260 
261 	/* Finally sync migration and check content. */
262 	err = i915_gem_object_wait_migration(obj, true);
263 	if (err)
264 		goto out_unlock;
265 
266 	if (vma) {
267 		err = i915_vma_wait_for_bind(vma);
268 		if (err)
269 			goto out_unlock;
270 	} else {
271 		err = igt_fill_check_buffer(obj, false);
272 	}
273 
274 out_unlock:
275 	i915_gem_object_unlock(obj);
276 out_put:
277 	i915_gem_object_put(obj);
278 
279 	return err;
280 }
281 
282 static int igt_lmem_pages_failsafe_migrate(void *arg)
283 {
284 	int fail_gpu, fail_alloc, ret;
285 	struct intel_gt *gt = arg;
286 
287 	for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
288 		for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) {
289 			pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
290 				fail_gpu, fail_alloc);
291 			i915_ttm_migrate_set_failure_modes(fail_gpu,
292 							   fail_alloc);
293 			ret = __igt_lmem_pages_migrate(gt, NULL, NULL, NULL, NULL);
294 			if (ret)
295 				goto out_err;
296 		}
297 	}
298 
299 out_err:
300 	i915_ttm_migrate_set_failure_modes(false, false);
301 	return ret;
302 }
303 
304 /*
305  * This subtest tests that unbinding at migration is indeed performed
306  * async. We launch a spinner and a number of migrations depending on
307  * that spinner to have terminated. Before each migration we bind a
308  * vma, which should then be async unbound by the migration operation.
309  * If we are able to schedule migrations without blocking while the
310  * spinner is still running, those unbinds are indeed async and non-
311  * blocking.
312  *
313  * Note that each async bind operation is awaiting the previous migration
314  * due to the moving fence resulting from the migration.
315  */
316 static int igt_async_migrate(struct intel_gt *gt)
317 {
318 	struct intel_engine_cs *engine;
319 	enum intel_engine_id id;
320 	struct i915_ppgtt *ppgtt;
321 	struct igt_spinner spin;
322 	int err;
323 
324 	ppgtt = i915_ppgtt_create(gt, 0);
325 	if (IS_ERR(ppgtt))
326 		return PTR_ERR(ppgtt);
327 
328 	if (igt_spinner_init(&spin, gt)) {
329 		err = -ENOMEM;
330 		goto out_spin;
331 	}
332 
333 	for_each_engine(engine, gt, id) {
334 		struct ttm_operation_ctx ctx = {
335 			.interruptible = true
336 		};
337 		struct dma_fence *spin_fence;
338 		struct intel_context *ce;
339 		struct i915_request *rq;
340 		struct i915_deps deps;
341 
342 		ce = intel_context_create(engine);
343 		if (IS_ERR(ce)) {
344 			err = PTR_ERR(ce);
345 			goto out_ce;
346 		}
347 
348 		/*
349 		 * Use MI_NOOP, making the spinner non-preemptible. If there
350 		 * is a code path where we fail async operation due to the
351 		 * running spinner, we will block and fail to end the
352 		 * spinner resulting in a deadlock. But with a non-
353 		 * preemptible spinner, hangcheck will terminate the spinner
354 		 * for us, and we will later detect that and fail the test.
355 		 */
356 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
357 		intel_context_put(ce);
358 		if (IS_ERR(rq)) {
359 			err = PTR_ERR(rq);
360 			goto out_ce;
361 		}
362 
363 		i915_deps_init(&deps, GFP_KERNEL);
364 		err = i915_deps_add_dependency(&deps, &rq->fence, &ctx);
365 		spin_fence = dma_fence_get(&rq->fence);
366 		i915_request_add(rq);
367 		if (err)
368 			goto out_ce;
369 
370 		err = __igt_lmem_pages_migrate(gt, &ppgtt->vm, &deps, &spin,
371 					       spin_fence);
372 		i915_deps_fini(&deps);
373 		dma_fence_put(spin_fence);
374 		if (err)
375 			goto out_ce;
376 	}
377 
378 out_ce:
379 	igt_spinner_fini(&spin);
380 out_spin:
381 	i915_vm_put(&ppgtt->vm);
382 
383 	return err;
384 }
385 
386 /*
387  * Setting ASYNC_FAIL_ALLOC to 2 will simulate memory allocation failure while
388  * arming the migration error check and block async migration. This
389  * will cause us to deadlock and hangcheck will terminate the spinner
390  * causing the test to fail.
391  */
392 #define ASYNC_FAIL_ALLOC 1
393 static int igt_lmem_async_migrate(void *arg)
394 {
395 	int fail_gpu, fail_alloc, ret;
396 	struct intel_gt *gt = arg;
397 
398 	for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
399 		for (fail_alloc = 0; fail_alloc < ASYNC_FAIL_ALLOC; ++fail_alloc) {
400 			pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
401 				fail_gpu, fail_alloc);
402 			i915_ttm_migrate_set_failure_modes(fail_gpu,
403 							   fail_alloc);
404 			ret = igt_async_migrate(gt);
405 			if (ret)
406 				goto out_err;
407 		}
408 	}
409 
410 out_err:
411 	i915_ttm_migrate_set_failure_modes(false, false);
412 	return ret;
413 }
414 
415 int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
416 {
417 	static const struct i915_subtest tests[] = {
418 		SUBTEST(igt_smem_create_migrate),
419 		SUBTEST(igt_lmem_create_migrate),
420 		SUBTEST(igt_same_create_migrate),
421 		SUBTEST(igt_lmem_pages_failsafe_migrate),
422 		SUBTEST(igt_lmem_async_migrate),
423 	};
424 
425 	if (!HAS_LMEM(i915))
426 		return 0;
427 
428 	return intel_gt_live_subtests(tests, to_gt(i915));
429 }
430