1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "gem/i915_gem_pm.h"
26 #include "gem/selftests/igt_gem_utils.h"
27 #include "gem/selftests/mock_context.h"
28 #include "gt/intel_gt.h"
29 
30 #include "i915_selftest.h"
31 
32 #include "igt_flush_test.h"
33 #include "lib_sw_fence.h"
34 #include "mock_drm.h"
35 #include "mock_gem_device.h"
36 
37 static void quirk_add(struct drm_i915_gem_object *obj,
38 		      struct list_head *objects)
39 {
40 	/* quirk is only for live tiled objects, use it to declare ownership */
41 	GEM_BUG_ON(obj->mm.quirked);
42 	obj->mm.quirked = true;
43 	list_add(&obj->st_link, objects);
44 }
45 
46 static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
47 {
48 	unsigned long unbound, bound, count;
49 	struct drm_i915_gem_object *obj;
50 
51 	count = 0;
52 	do {
53 		struct i915_vma *vma;
54 
55 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
56 						      I915_GTT_PAGE_SIZE);
57 		if (IS_ERR(obj))
58 			return PTR_ERR(obj);
59 
60 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
61 		if (IS_ERR(vma)) {
62 			i915_gem_object_put(obj);
63 			if (vma == ERR_PTR(-ENOSPC))
64 				break;
65 
66 			return PTR_ERR(vma);
67 		}
68 
69 		quirk_add(obj, objects);
70 		count++;
71 	} while (1);
72 	pr_debug("Filled GGTT with %lu pages [%llu total]\n",
73 		 count, ggtt->vm.total / PAGE_SIZE);
74 
75 	bound = 0;
76 	unbound = 0;
77 	list_for_each_entry(obj, objects, st_link) {
78 		GEM_BUG_ON(!obj->mm.quirked);
79 
80 		if (atomic_read(&obj->bind_count))
81 			bound++;
82 		else
83 			unbound++;
84 	}
85 	GEM_BUG_ON(bound + unbound != count);
86 
87 	if (unbound) {
88 		pr_err("%s: Found %lu objects unbound, expected %u!\n",
89 		       __func__, unbound, 0);
90 		return -EINVAL;
91 	}
92 
93 	if (bound != count) {
94 		pr_err("%s: Found %lu objects bound, expected %lu!\n",
95 		       __func__, bound, count);
96 		return -EINVAL;
97 	}
98 
99 	if (list_empty(&ggtt->vm.bound_list)) {
100 		pr_err("No objects on the GGTT inactive list!\n");
101 		return -EINVAL;
102 	}
103 
104 	return 0;
105 }
106 
107 static void unpin_ggtt(struct i915_ggtt *ggtt)
108 {
109 	struct i915_vma *vma;
110 
111 	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
112 		if (vma->obj->mm.quirked)
113 			i915_vma_unpin(vma);
114 }
115 
116 static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
117 {
118 	struct drm_i915_gem_object *obj, *on;
119 
120 	list_for_each_entry_safe(obj, on, list, st_link) {
121 		GEM_BUG_ON(!obj->mm.quirked);
122 		obj->mm.quirked = false;
123 		i915_gem_object_put(obj);
124 	}
125 
126 	i915_gem_drain_freed_objects(ggtt->vm.i915);
127 }
128 
129 static int igt_evict_something(void *arg)
130 {
131 	struct intel_gt *gt = arg;
132 	struct i915_ggtt *ggtt = gt->ggtt;
133 	LIST_HEAD(objects);
134 	int err;
135 
136 	/* Fill the GGTT with pinned objects and try to evict one. */
137 
138 	err = populate_ggtt(ggtt, &objects);
139 	if (err)
140 		goto cleanup;
141 
142 	/* Everything is pinned, nothing should happen */
143 	mutex_lock(&ggtt->vm.mutex);
144 	err = i915_gem_evict_something(&ggtt->vm,
145 				       I915_GTT_PAGE_SIZE, 0, 0,
146 				       0, U64_MAX,
147 				       0);
148 	mutex_unlock(&ggtt->vm.mutex);
149 	if (err != -ENOSPC) {
150 		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
151 		       err);
152 		goto cleanup;
153 	}
154 
155 	unpin_ggtt(ggtt);
156 
157 	/* Everything is unpinned, we should be able to evict something */
158 	mutex_lock(&ggtt->vm.mutex);
159 	err = i915_gem_evict_something(&ggtt->vm,
160 				       I915_GTT_PAGE_SIZE, 0, 0,
161 				       0, U64_MAX,
162 				       0);
163 	mutex_unlock(&ggtt->vm.mutex);
164 	if (err) {
165 		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
166 		       err);
167 		goto cleanup;
168 	}
169 
170 cleanup:
171 	cleanup_objects(ggtt, &objects);
172 	return err;
173 }
174 
175 static int igt_overcommit(void *arg)
176 {
177 	struct intel_gt *gt = arg;
178 	struct i915_ggtt *ggtt = gt->ggtt;
179 	struct drm_i915_gem_object *obj;
180 	struct i915_vma *vma;
181 	LIST_HEAD(objects);
182 	int err;
183 
184 	/* Fill the GGTT with pinned objects and then try to pin one more.
185 	 * We expect it to fail.
186 	 */
187 
188 	err = populate_ggtt(ggtt, &objects);
189 	if (err)
190 		goto cleanup;
191 
192 	obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
193 	if (IS_ERR(obj)) {
194 		err = PTR_ERR(obj);
195 		goto cleanup;
196 	}
197 
198 	quirk_add(obj, &objects);
199 
200 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
201 	if (vma != ERR_PTR(-ENOSPC)) {
202 		pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR_OR_ZERO(vma));
203 		err = -EINVAL;
204 		goto cleanup;
205 	}
206 
207 cleanup:
208 	cleanup_objects(ggtt, &objects);
209 	return err;
210 }
211 
212 static int igt_evict_for_vma(void *arg)
213 {
214 	struct intel_gt *gt = arg;
215 	struct i915_ggtt *ggtt = gt->ggtt;
216 	struct drm_mm_node target = {
217 		.start = 0,
218 		.size = 4096,
219 	};
220 	LIST_HEAD(objects);
221 	int err;
222 
223 	/* Fill the GGTT with pinned objects and try to evict a range. */
224 
225 	err = populate_ggtt(ggtt, &objects);
226 	if (err)
227 		goto cleanup;
228 
229 	/* Everything is pinned, nothing should happen */
230 	mutex_lock(&ggtt->vm.mutex);
231 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
232 	mutex_unlock(&ggtt->vm.mutex);
233 	if (err != -ENOSPC) {
234 		pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
235 		       err);
236 		goto cleanup;
237 	}
238 
239 	unpin_ggtt(ggtt);
240 
241 	/* Everything is unpinned, we should be able to evict the node */
242 	mutex_lock(&ggtt->vm.mutex);
243 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
244 	mutex_unlock(&ggtt->vm.mutex);
245 	if (err) {
246 		pr_err("i915_gem_evict_for_node returned err=%d\n",
247 		       err);
248 		goto cleanup;
249 	}
250 
251 cleanup:
252 	cleanup_objects(ggtt, &objects);
253 	return err;
254 }
255 
256 static void mock_color_adjust(const struct drm_mm_node *node,
257 			      unsigned long color,
258 			      u64 *start,
259 			      u64 *end)
260 {
261 }
262 
263 static int igt_evict_for_cache_color(void *arg)
264 {
265 	struct intel_gt *gt = arg;
266 	struct i915_ggtt *ggtt = gt->ggtt;
267 	const unsigned long flags = PIN_OFFSET_FIXED;
268 	struct drm_mm_node target = {
269 		.start = I915_GTT_PAGE_SIZE * 2,
270 		.size = I915_GTT_PAGE_SIZE,
271 		.color = I915_CACHE_LLC,
272 	};
273 	struct drm_i915_gem_object *obj;
274 	struct i915_vma *vma;
275 	LIST_HEAD(objects);
276 	int err;
277 
278 	/*
279 	 * Currently the use of color_adjust for the GGTT is limited to cache
280 	 * coloring and guard pages, and so the presence of mm.color_adjust for
281 	 * the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock
282 	 * color adjust will work just fine for our purposes.
283 	 */
284 	ggtt->vm.mm.color_adjust = mock_color_adjust;
285 	GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
286 
287 	obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
288 	if (IS_ERR(obj)) {
289 		err = PTR_ERR(obj);
290 		goto cleanup;
291 	}
292 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
293 	quirk_add(obj, &objects);
294 
295 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
296 				       I915_GTT_PAGE_SIZE | flags);
297 	if (IS_ERR(vma)) {
298 		pr_err("[0]i915_gem_object_ggtt_pin failed\n");
299 		err = PTR_ERR(vma);
300 		goto cleanup;
301 	}
302 
303 	obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
304 	if (IS_ERR(obj)) {
305 		err = PTR_ERR(obj);
306 		goto cleanup;
307 	}
308 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
309 	quirk_add(obj, &objects);
310 
311 	/* Neighbouring; same colour - should fit */
312 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
313 				       (I915_GTT_PAGE_SIZE * 2) | flags);
314 	if (IS_ERR(vma)) {
315 		pr_err("[1]i915_gem_object_ggtt_pin failed\n");
316 		err = PTR_ERR(vma);
317 		goto cleanup;
318 	}
319 
320 	i915_vma_unpin(vma);
321 
322 	/* Remove just the second vma */
323 	mutex_lock(&ggtt->vm.mutex);
324 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
325 	mutex_unlock(&ggtt->vm.mutex);
326 	if (err) {
327 		pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
328 		goto cleanup;
329 	}
330 
331 	/* Attempt to remove the first *pinned* vma, by removing the (empty)
332 	 * neighbour -- this should fail.
333 	 */
334 	target.color = I915_CACHE_L3_LLC;
335 
336 	mutex_lock(&ggtt->vm.mutex);
337 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
338 	mutex_unlock(&ggtt->vm.mutex);
339 	if (!err) {
340 		pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
341 		err = -EINVAL;
342 		goto cleanup;
343 	}
344 
345 	err = 0;
346 
347 cleanup:
348 	unpin_ggtt(ggtt);
349 	cleanup_objects(ggtt, &objects);
350 	ggtt->vm.mm.color_adjust = NULL;
351 	return err;
352 }
353 
354 static int igt_evict_vm(void *arg)
355 {
356 	struct intel_gt *gt = arg;
357 	struct i915_ggtt *ggtt = gt->ggtt;
358 	LIST_HEAD(objects);
359 	int err;
360 
361 	/* Fill the GGTT with pinned objects and try to evict everything. */
362 
363 	err = populate_ggtt(ggtt, &objects);
364 	if (err)
365 		goto cleanup;
366 
367 	/* Everything is pinned, nothing should happen */
368 	mutex_lock(&ggtt->vm.mutex);
369 	err = i915_gem_evict_vm(&ggtt->vm);
370 	mutex_unlock(&ggtt->vm.mutex);
371 	if (err) {
372 		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
373 		       err);
374 		goto cleanup;
375 	}
376 
377 	unpin_ggtt(ggtt);
378 
379 	mutex_lock(&ggtt->vm.mutex);
380 	err = i915_gem_evict_vm(&ggtt->vm);
381 	mutex_unlock(&ggtt->vm.mutex);
382 	if (err) {
383 		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
384 		       err);
385 		goto cleanup;
386 	}
387 
388 cleanup:
389 	cleanup_objects(ggtt, &objects);
390 	return err;
391 }
392 
393 static int igt_evict_contexts(void *arg)
394 {
395 	const u64 PRETEND_GGTT_SIZE = 16ull << 20;
396 	struct intel_gt *gt = arg;
397 	struct i915_ggtt *ggtt = gt->ggtt;
398 	struct drm_i915_private *i915 = gt->i915;
399 	struct intel_engine_cs *engine;
400 	enum intel_engine_id id;
401 	struct reserved {
402 		struct drm_mm_node node;
403 		struct reserved *next;
404 	} *reserved = NULL;
405 	intel_wakeref_t wakeref;
406 	struct drm_mm_node hole;
407 	unsigned long count;
408 	int err;
409 
410 	/*
411 	 * The purpose of this test is to verify that we will trigger an
412 	 * eviction in the GGTT when constructing a request that requires
413 	 * additional space in the GGTT for pinning the context. This space
414 	 * is not directly tied to the request so reclaiming it requires
415 	 * extra work.
416 	 *
417 	 * As such this test is only meaningful for full-ppgtt environments
418 	 * where the GTT space of the request is separate from the GGTT
419 	 * allocation required to build the request.
420 	 */
421 	if (!HAS_FULL_PPGTT(i915))
422 		return 0;
423 
424 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
425 
426 	/* Reserve a block so that we know we have enough to fit a few rq */
427 	memset(&hole, 0, sizeof(hole));
428 	mutex_lock(&ggtt->vm.mutex);
429 	err = i915_gem_gtt_insert(&ggtt->vm, &hole,
430 				  PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
431 				  0, ggtt->vm.total,
432 				  PIN_NOEVICT);
433 	if (err)
434 		goto out_locked;
435 
436 	/* Make the GGTT appear small by filling it with unevictable nodes */
437 	count = 0;
438 	do {
439 		struct reserved *r;
440 
441 		mutex_unlock(&ggtt->vm.mutex);
442 		r = kcalloc(1, sizeof(*r), GFP_KERNEL);
443 		mutex_lock(&ggtt->vm.mutex);
444 		if (!r) {
445 			err = -ENOMEM;
446 			goto out_locked;
447 		}
448 
449 		if (i915_gem_gtt_insert(&ggtt->vm, &r->node,
450 					1ul << 20, 0, I915_COLOR_UNEVICTABLE,
451 					0, ggtt->vm.total,
452 					PIN_NOEVICT)) {
453 			kfree(r);
454 			break;
455 		}
456 
457 		r->next = reserved;
458 		reserved = r;
459 
460 		count++;
461 	} while (1);
462 	drm_mm_remove_node(&hole);
463 	mutex_unlock(&ggtt->vm.mutex);
464 	pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
465 
466 	/* Overfill the GGTT with context objects and so try to evict one. */
467 	for_each_engine(engine, gt, id) {
468 		struct i915_sw_fence fence;
469 		struct file *file;
470 
471 		file = mock_file(i915);
472 		if (IS_ERR(file)) {
473 			err = PTR_ERR(file);
474 			break;
475 		}
476 
477 		count = 0;
478 		onstack_fence_init(&fence);
479 		do {
480 			struct i915_request *rq;
481 			struct i915_gem_context *ctx;
482 
483 			ctx = live_context(i915, file);
484 			if (IS_ERR(ctx))
485 				break;
486 
487 			/* We will need some GGTT space for the rq's context */
488 			igt_evict_ctl.fail_if_busy = true;
489 			rq = igt_request_alloc(ctx, engine);
490 			igt_evict_ctl.fail_if_busy = false;
491 
492 			if (IS_ERR(rq)) {
493 				/* When full, fail_if_busy will trigger EBUSY */
494 				if (PTR_ERR(rq) != -EBUSY) {
495 					pr_err("Unexpected error from request alloc (on %s): %d\n",
496 					       engine->name,
497 					       (int)PTR_ERR(rq));
498 					err = PTR_ERR(rq);
499 				}
500 				break;
501 			}
502 
503 			/* Keep every request/ctx pinned until we are full */
504 			err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
505 							       &fence,
506 							       GFP_KERNEL);
507 			if (err < 0)
508 				break;
509 
510 			i915_request_add(rq);
511 			count++;
512 			err = 0;
513 		} while(1);
514 		onstack_fence_fini(&fence);
515 		pr_info("Submitted %lu contexts/requests on %s\n",
516 			count, engine->name);
517 
518 		fput(file);
519 		if (err)
520 			break;
521 	}
522 
523 	mutex_lock(&ggtt->vm.mutex);
524 out_locked:
525 	if (igt_flush_test(i915))
526 		err = -EIO;
527 	while (reserved) {
528 		struct reserved *next = reserved->next;
529 
530 		drm_mm_remove_node(&reserved->node);
531 		kfree(reserved);
532 
533 		reserved = next;
534 	}
535 	if (drm_mm_node_allocated(&hole))
536 		drm_mm_remove_node(&hole);
537 	mutex_unlock(&ggtt->vm.mutex);
538 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
539 
540 	return err;
541 }
542 
543 int i915_gem_evict_mock_selftests(void)
544 {
545 	static const struct i915_subtest tests[] = {
546 		SUBTEST(igt_evict_something),
547 		SUBTEST(igt_evict_for_vma),
548 		SUBTEST(igt_evict_for_cache_color),
549 		SUBTEST(igt_evict_vm),
550 		SUBTEST(igt_overcommit),
551 	};
552 	struct drm_i915_private *i915;
553 	intel_wakeref_t wakeref;
554 	int err = 0;
555 
556 	i915 = mock_gem_device();
557 	if (!i915)
558 		return -ENOMEM;
559 
560 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
561 		err = i915_subtests(tests, &i915->gt);
562 
563 	drm_dev_put(&i915->drm);
564 	return err;
565 }
566 
567 int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
568 {
569 	static const struct i915_subtest tests[] = {
570 		SUBTEST(igt_evict_contexts),
571 	};
572 
573 	if (intel_gt_is_wedged(&i915->gt))
574 		return 0;
575 
576 	return intel_gt_live_subtests(tests, &i915->gt);
577 }
578