1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "gem/i915_gem_pm.h"
26 #include "gem/selftests/igt_gem_utils.h"
27 #include "gem/selftests/mock_context.h"
28 #include "gt/intel_gt.h"
29 
30 #include "i915_selftest.h"
31 
32 #include "igt_flush_test.h"
33 #include "lib_sw_fence.h"
34 #include "mock_drm.h"
35 #include "mock_gem_device.h"
36 
37 static void quirk_add(struct drm_i915_gem_object *obj,
38 		      struct list_head *objects)
39 {
40 	/* quirk is only for live tiled objects, use it to declare ownership */
41 	GEM_BUG_ON(obj->mm.quirked);
42 	obj->mm.quirked = true;
43 	list_add(&obj->st_link, objects);
44 }
45 
46 static int populate_ggtt(struct drm_i915_private *i915,
47 			 struct list_head *objects)
48 {
49 	unsigned long unbound, bound, count;
50 	struct drm_i915_gem_object *obj;
51 
52 	count = 0;
53 	do {
54 		struct i915_vma *vma;
55 
56 		obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
57 		if (IS_ERR(obj))
58 			return PTR_ERR(obj);
59 
60 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
61 		if (IS_ERR(vma)) {
62 			i915_gem_object_put(obj);
63 			if (vma == ERR_PTR(-ENOSPC))
64 				break;
65 
66 			return PTR_ERR(vma);
67 		}
68 
69 		quirk_add(obj, objects);
70 		count++;
71 	} while (1);
72 	pr_debug("Filled GGTT with %lu pages [%llu total]\n",
73 		 count, i915->ggtt.vm.total / PAGE_SIZE);
74 
75 	bound = 0;
76 	unbound = 0;
77 	list_for_each_entry(obj, objects, st_link) {
78 		GEM_BUG_ON(!obj->mm.quirked);
79 
80 		if (atomic_read(&obj->bind_count))
81 			bound++;
82 		else
83 			unbound++;
84 	}
85 	GEM_BUG_ON(bound + unbound != count);
86 
87 	if (unbound) {
88 		pr_err("%s: Found %lu objects unbound, expected %u!\n",
89 		       __func__, unbound, 0);
90 		return -EINVAL;
91 	}
92 
93 	if (bound != count) {
94 		pr_err("%s: Found %lu objects bound, expected %lu!\n",
95 		       __func__, bound, count);
96 		return -EINVAL;
97 	}
98 
99 	if (list_empty(&i915->ggtt.vm.bound_list)) {
100 		pr_err("No objects on the GGTT inactive list!\n");
101 		return -EINVAL;
102 	}
103 
104 	return 0;
105 }
106 
107 static void unpin_ggtt(struct drm_i915_private *i915)
108 {
109 	struct i915_vma *vma;
110 
111 	list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
112 		if (vma->obj->mm.quirked)
113 			i915_vma_unpin(vma);
114 }
115 
116 static void cleanup_objects(struct drm_i915_private *i915,
117 			    struct list_head *list)
118 {
119 	struct drm_i915_gem_object *obj, *on;
120 
121 	list_for_each_entry_safe(obj, on, list, st_link) {
122 		GEM_BUG_ON(!obj->mm.quirked);
123 		obj->mm.quirked = false;
124 		i915_gem_object_put(obj);
125 	}
126 
127 	i915_gem_drain_freed_objects(i915);
128 }
129 
130 static int igt_evict_something(void *arg)
131 {
132 	struct drm_i915_private *i915 = arg;
133 	struct i915_ggtt *ggtt = &i915->ggtt;
134 	LIST_HEAD(objects);
135 	int err;
136 
137 	/* Fill the GGTT with pinned objects and try to evict one. */
138 
139 	err = populate_ggtt(i915, &objects);
140 	if (err)
141 		goto cleanup;
142 
143 	/* Everything is pinned, nothing should happen */
144 	mutex_lock(&ggtt->vm.mutex);
145 	err = i915_gem_evict_something(&ggtt->vm,
146 				       I915_GTT_PAGE_SIZE, 0, 0,
147 				       0, U64_MAX,
148 				       0);
149 	mutex_unlock(&ggtt->vm.mutex);
150 	if (err != -ENOSPC) {
151 		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
152 		       err);
153 		goto cleanup;
154 	}
155 
156 	unpin_ggtt(i915);
157 
158 	/* Everything is unpinned, we should be able to evict something */
159 	mutex_lock(&ggtt->vm.mutex);
160 	err = i915_gem_evict_something(&ggtt->vm,
161 				       I915_GTT_PAGE_SIZE, 0, 0,
162 				       0, U64_MAX,
163 				       0);
164 	mutex_unlock(&ggtt->vm.mutex);
165 	if (err) {
166 		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
167 		       err);
168 		goto cleanup;
169 	}
170 
171 cleanup:
172 	cleanup_objects(i915, &objects);
173 	return err;
174 }
175 
176 static int igt_overcommit(void *arg)
177 {
178 	struct drm_i915_private *i915 = arg;
179 	struct drm_i915_gem_object *obj;
180 	struct i915_vma *vma;
181 	LIST_HEAD(objects);
182 	int err;
183 
184 	/* Fill the GGTT with pinned objects and then try to pin one more.
185 	 * We expect it to fail.
186 	 */
187 
188 	err = populate_ggtt(i915, &objects);
189 	if (err)
190 		goto cleanup;
191 
192 	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
193 	if (IS_ERR(obj)) {
194 		err = PTR_ERR(obj);
195 		goto cleanup;
196 	}
197 
198 	quirk_add(obj, &objects);
199 
200 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
201 	if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
202 		pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
203 		err = -EINVAL;
204 		goto cleanup;
205 	}
206 
207 cleanup:
208 	cleanup_objects(i915, &objects);
209 	return err;
210 }
211 
212 static int igt_evict_for_vma(void *arg)
213 {
214 	struct drm_i915_private *i915 = arg;
215 	struct i915_ggtt *ggtt = &i915->ggtt;
216 	struct drm_mm_node target = {
217 		.start = 0,
218 		.size = 4096,
219 	};
220 	LIST_HEAD(objects);
221 	int err;
222 
223 	/* Fill the GGTT with pinned objects and try to evict a range. */
224 
225 	err = populate_ggtt(i915, &objects);
226 	if (err)
227 		goto cleanup;
228 
229 	/* Everything is pinned, nothing should happen */
230 	mutex_lock(&ggtt->vm.mutex);
231 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
232 	mutex_unlock(&ggtt->vm.mutex);
233 	if (err != -ENOSPC) {
234 		pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
235 		       err);
236 		goto cleanup;
237 	}
238 
239 	unpin_ggtt(i915);
240 
241 	/* Everything is unpinned, we should be able to evict the node */
242 	mutex_lock(&ggtt->vm.mutex);
243 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
244 	mutex_unlock(&ggtt->vm.mutex);
245 	if (err) {
246 		pr_err("i915_gem_evict_for_node returned err=%d\n",
247 		       err);
248 		goto cleanup;
249 	}
250 
251 cleanup:
252 	cleanup_objects(i915, &objects);
253 	return err;
254 }
255 
256 static void mock_color_adjust(const struct drm_mm_node *node,
257 			      unsigned long color,
258 			      u64 *start,
259 			      u64 *end)
260 {
261 }
262 
263 static int igt_evict_for_cache_color(void *arg)
264 {
265 	struct drm_i915_private *i915 = arg;
266 	struct i915_ggtt *ggtt = &i915->ggtt;
267 	const unsigned long flags = PIN_OFFSET_FIXED;
268 	struct drm_mm_node target = {
269 		.start = I915_GTT_PAGE_SIZE * 2,
270 		.size = I915_GTT_PAGE_SIZE,
271 		.color = I915_CACHE_LLC,
272 	};
273 	struct drm_i915_gem_object *obj;
274 	struct i915_vma *vma;
275 	LIST_HEAD(objects);
276 	int err;
277 
278 	/*
279 	 * Currently the use of color_adjust for the GGTT is limited to cache
280 	 * coloring and guard pages, and so the presence of mm.color_adjust for
281 	 * the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock
282 	 * color adjust will work just fine for our purposes.
283 	 */
284 	ggtt->vm.mm.color_adjust = mock_color_adjust;
285 	GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
286 
287 	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
288 	if (IS_ERR(obj)) {
289 		err = PTR_ERR(obj);
290 		goto cleanup;
291 	}
292 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
293 	quirk_add(obj, &objects);
294 
295 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
296 				       I915_GTT_PAGE_SIZE | flags);
297 	if (IS_ERR(vma)) {
298 		pr_err("[0]i915_gem_object_ggtt_pin failed\n");
299 		err = PTR_ERR(vma);
300 		goto cleanup;
301 	}
302 
303 	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
304 	if (IS_ERR(obj)) {
305 		err = PTR_ERR(obj);
306 		goto cleanup;
307 	}
308 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
309 	quirk_add(obj, &objects);
310 
311 	/* Neighbouring; same colour - should fit */
312 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
313 				       (I915_GTT_PAGE_SIZE * 2) | flags);
314 	if (IS_ERR(vma)) {
315 		pr_err("[1]i915_gem_object_ggtt_pin failed\n");
316 		err = PTR_ERR(vma);
317 		goto cleanup;
318 	}
319 
320 	i915_vma_unpin(vma);
321 
322 	/* Remove just the second vma */
323 	mutex_lock(&ggtt->vm.mutex);
324 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
325 	mutex_unlock(&ggtt->vm.mutex);
326 	if (err) {
327 		pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
328 		goto cleanup;
329 	}
330 
331 	/* Attempt to remove the first *pinned* vma, by removing the (empty)
332 	 * neighbour -- this should fail.
333 	 */
334 	target.color = I915_CACHE_L3_LLC;
335 
336 	mutex_lock(&ggtt->vm.mutex);
337 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
338 	mutex_unlock(&ggtt->vm.mutex);
339 	if (!err) {
340 		pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
341 		err = -EINVAL;
342 		goto cleanup;
343 	}
344 
345 	err = 0;
346 
347 cleanup:
348 	unpin_ggtt(i915);
349 	cleanup_objects(i915, &objects);
350 	ggtt->vm.mm.color_adjust = NULL;
351 	return err;
352 }
353 
354 static int igt_evict_vm(void *arg)
355 {
356 	struct drm_i915_private *i915 = arg;
357 	struct i915_ggtt *ggtt = &i915->ggtt;
358 	LIST_HEAD(objects);
359 	int err;
360 
361 	/* Fill the GGTT with pinned objects and try to evict everything. */
362 
363 	err = populate_ggtt(i915, &objects);
364 	if (err)
365 		goto cleanup;
366 
367 	/* Everything is pinned, nothing should happen */
368 	mutex_lock(&ggtt->vm.mutex);
369 	err = i915_gem_evict_vm(&ggtt->vm);
370 	mutex_unlock(&ggtt->vm.mutex);
371 	if (err) {
372 		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
373 		       err);
374 		goto cleanup;
375 	}
376 
377 	unpin_ggtt(i915);
378 
379 	mutex_lock(&ggtt->vm.mutex);
380 	err = i915_gem_evict_vm(&ggtt->vm);
381 	mutex_unlock(&ggtt->vm.mutex);
382 	if (err) {
383 		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
384 		       err);
385 		goto cleanup;
386 	}
387 
388 cleanup:
389 	cleanup_objects(i915, &objects);
390 	return err;
391 }
392 
393 static int igt_evict_contexts(void *arg)
394 {
395 	const u64 PRETEND_GGTT_SIZE = 16ull << 20;
396 	struct drm_i915_private *i915 = arg;
397 	struct intel_engine_cs *engine;
398 	enum intel_engine_id id;
399 	struct reserved {
400 		struct drm_mm_node node;
401 		struct reserved *next;
402 	} *reserved = NULL;
403 	intel_wakeref_t wakeref;
404 	struct drm_mm_node hole;
405 	unsigned long count;
406 	int err;
407 
408 	/*
409 	 * The purpose of this test is to verify that we will trigger an
410 	 * eviction in the GGTT when constructing a request that requires
411 	 * additional space in the GGTT for pinning the context. This space
412 	 * is not directly tied to the request so reclaiming it requires
413 	 * extra work.
414 	 *
415 	 * As such this test is only meaningful for full-ppgtt environments
416 	 * where the GTT space of the request is separate from the GGTT
417 	 * allocation required to build the request.
418 	 */
419 	if (!HAS_FULL_PPGTT(i915))
420 		return 0;
421 
422 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
423 
424 	/* Reserve a block so that we know we have enough to fit a few rq */
425 	memset(&hole, 0, sizeof(hole));
426 	mutex_lock(&i915->ggtt.vm.mutex);
427 	err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
428 				  PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
429 				  0, i915->ggtt.vm.total,
430 				  PIN_NOEVICT);
431 	if (err)
432 		goto out_locked;
433 
434 	/* Make the GGTT appear small by filling it with unevictable nodes */
435 	count = 0;
436 	do {
437 		struct reserved *r;
438 
439 		mutex_unlock(&i915->ggtt.vm.mutex);
440 		r = kcalloc(1, sizeof(*r), GFP_KERNEL);
441 		mutex_lock(&i915->ggtt.vm.mutex);
442 		if (!r) {
443 			err = -ENOMEM;
444 			goto out_locked;
445 		}
446 
447 		if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
448 					1ul << 20, 0, I915_COLOR_UNEVICTABLE,
449 					0, i915->ggtt.vm.total,
450 					PIN_NOEVICT)) {
451 			kfree(r);
452 			break;
453 		}
454 
455 		r->next = reserved;
456 		reserved = r;
457 
458 		count++;
459 	} while (1);
460 	drm_mm_remove_node(&hole);
461 	mutex_unlock(&i915->ggtt.vm.mutex);
462 	pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
463 
464 	/* Overfill the GGTT with context objects and so try to evict one. */
465 	for_each_engine(engine, i915, id) {
466 		struct i915_sw_fence fence;
467 		struct drm_file *file;
468 
469 		file = mock_file(i915);
470 		if (IS_ERR(file)) {
471 			err = PTR_ERR(file);
472 			break;
473 		}
474 
475 		count = 0;
476 		onstack_fence_init(&fence);
477 		do {
478 			struct i915_request *rq;
479 			struct i915_gem_context *ctx;
480 
481 			ctx = live_context(i915, file);
482 			if (IS_ERR(ctx))
483 				break;
484 
485 			/* We will need some GGTT space for the rq's context */
486 			igt_evict_ctl.fail_if_busy = true;
487 			rq = igt_request_alloc(ctx, engine);
488 			igt_evict_ctl.fail_if_busy = false;
489 
490 			if (IS_ERR(rq)) {
491 				/* When full, fail_if_busy will trigger EBUSY */
492 				if (PTR_ERR(rq) != -EBUSY) {
493 					pr_err("Unexpected error from request alloc (on %s): %d\n",
494 					       engine->name,
495 					       (int)PTR_ERR(rq));
496 					err = PTR_ERR(rq);
497 				}
498 				break;
499 			}
500 
501 			/* Keep every request/ctx pinned until we are full */
502 			err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
503 							       &fence,
504 							       GFP_KERNEL);
505 			if (err < 0)
506 				break;
507 
508 			i915_request_add(rq);
509 			count++;
510 			err = 0;
511 		} while(1);
512 		onstack_fence_fini(&fence);
513 		pr_info("Submitted %lu contexts/requests on %s\n",
514 			count, engine->name);
515 
516 		mock_file_free(i915, file);
517 		if (err)
518 			break;
519 	}
520 
521 	mutex_lock(&i915->ggtt.vm.mutex);
522 out_locked:
523 	if (igt_flush_test(i915))
524 		err = -EIO;
525 	while (reserved) {
526 		struct reserved *next = reserved->next;
527 
528 		drm_mm_remove_node(&reserved->node);
529 		kfree(reserved);
530 
531 		reserved = next;
532 	}
533 	if (drm_mm_node_allocated(&hole))
534 		drm_mm_remove_node(&hole);
535 	mutex_unlock(&i915->ggtt.vm.mutex);
536 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
537 
538 	return err;
539 }
540 
541 int i915_gem_evict_mock_selftests(void)
542 {
543 	static const struct i915_subtest tests[] = {
544 		SUBTEST(igt_evict_something),
545 		SUBTEST(igt_evict_for_vma),
546 		SUBTEST(igt_evict_for_cache_color),
547 		SUBTEST(igt_evict_vm),
548 		SUBTEST(igt_overcommit),
549 	};
550 	struct drm_i915_private *i915;
551 	intel_wakeref_t wakeref;
552 	int err = 0;
553 
554 	i915 = mock_gem_device();
555 	if (!i915)
556 		return -ENOMEM;
557 
558 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
559 		err = i915_subtests(tests, i915);
560 
561 	drm_dev_put(&i915->drm);
562 	return err;
563 }
564 
565 int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
566 {
567 	static const struct i915_subtest tests[] = {
568 		SUBTEST(igt_evict_contexts),
569 	};
570 
571 	if (intel_gt_is_wedged(&i915->gt))
572 		return 0;
573 
574 	return i915_subtests(tests, i915);
575 }
576