1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "gem/i915_gem_pm.h"
26 #include "gem/selftests/igt_gem_utils.h"
27 #include "gem/selftests/mock_context.h"
28 
29 #include "i915_selftest.h"
30 
31 #include "igt_flush_test.h"
32 #include "lib_sw_fence.h"
33 #include "mock_drm.h"
34 #include "mock_gem_device.h"
35 
36 static void quirk_add(struct drm_i915_gem_object *obj,
37 		      struct list_head *objects)
38 {
39 	/* quirk is only for live tiled objects, use it to declare ownership */
40 	GEM_BUG_ON(obj->mm.quirked);
41 	obj->mm.quirked = true;
42 	list_add(&obj->st_link, objects);
43 }
44 
45 static int populate_ggtt(struct drm_i915_private *i915,
46 			 struct list_head *objects)
47 {
48 	unsigned long unbound, bound, count;
49 	struct drm_i915_gem_object *obj;
50 	u64 size;
51 
52 	count = 0;
53 	for (size = 0;
54 	     size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
55 	     size += I915_GTT_PAGE_SIZE) {
56 		struct i915_vma *vma;
57 
58 		obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
59 		if (IS_ERR(obj))
60 			return PTR_ERR(obj);
61 
62 		quirk_add(obj, objects);
63 
64 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
65 		if (IS_ERR(vma))
66 			return PTR_ERR(vma);
67 
68 		count++;
69 	}
70 
71 	bound = 0;
72 	unbound = 0;
73 	list_for_each_entry(obj, objects, st_link) {
74 		GEM_BUG_ON(!obj->mm.quirked);
75 
76 		if (atomic_read(&obj->bind_count))
77 			bound++;
78 		else
79 			unbound++;
80 	}
81 	GEM_BUG_ON(bound + unbound != count);
82 
83 	if (unbound) {
84 		pr_err("%s: Found %lu objects unbound, expected %u!\n",
85 		       __func__, unbound, 0);
86 		return -EINVAL;
87 	}
88 
89 	if (bound != count) {
90 		pr_err("%s: Found %lu objects bound, expected %lu!\n",
91 		       __func__, bound, count);
92 		return -EINVAL;
93 	}
94 
95 	if (list_empty(&i915->ggtt.vm.bound_list)) {
96 		pr_err("No objects on the GGTT inactive list!\n");
97 		return -EINVAL;
98 	}
99 
100 	return 0;
101 }
102 
103 static void unpin_ggtt(struct drm_i915_private *i915)
104 {
105 	struct i915_ggtt *ggtt = &i915->ggtt;
106 	struct i915_vma *vma;
107 
108 	mutex_lock(&ggtt->vm.mutex);
109 	list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
110 		if (vma->obj->mm.quirked)
111 			i915_vma_unpin(vma);
112 	mutex_unlock(&ggtt->vm.mutex);
113 }
114 
115 static void cleanup_objects(struct drm_i915_private *i915,
116 			    struct list_head *list)
117 {
118 	struct drm_i915_gem_object *obj, *on;
119 
120 	list_for_each_entry_safe(obj, on, list, st_link) {
121 		GEM_BUG_ON(!obj->mm.quirked);
122 		obj->mm.quirked = false;
123 		i915_gem_object_put(obj);
124 	}
125 
126 	mutex_unlock(&i915->drm.struct_mutex);
127 
128 	i915_gem_drain_freed_objects(i915);
129 
130 	mutex_lock(&i915->drm.struct_mutex);
131 }
132 
133 static int igt_evict_something(void *arg)
134 {
135 	struct drm_i915_private *i915 = arg;
136 	struct i915_ggtt *ggtt = &i915->ggtt;
137 	LIST_HEAD(objects);
138 	int err;
139 
140 	/* Fill the GGTT with pinned objects and try to evict one. */
141 
142 	err = populate_ggtt(i915, &objects);
143 	if (err)
144 		goto cleanup;
145 
146 	/* Everything is pinned, nothing should happen */
147 	err = i915_gem_evict_something(&ggtt->vm,
148 				       I915_GTT_PAGE_SIZE, 0, 0,
149 				       0, U64_MAX,
150 				       0);
151 	if (err != -ENOSPC) {
152 		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
153 		       err);
154 		goto cleanup;
155 	}
156 
157 	unpin_ggtt(i915);
158 
159 	/* Everything is unpinned, we should be able to evict something */
160 	err = i915_gem_evict_something(&ggtt->vm,
161 				       I915_GTT_PAGE_SIZE, 0, 0,
162 				       0, U64_MAX,
163 				       0);
164 	if (err) {
165 		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
166 		       err);
167 		goto cleanup;
168 	}
169 
170 cleanup:
171 	cleanup_objects(i915, &objects);
172 	return err;
173 }
174 
175 static int igt_overcommit(void *arg)
176 {
177 	struct drm_i915_private *i915 = arg;
178 	struct drm_i915_gem_object *obj;
179 	struct i915_vma *vma;
180 	LIST_HEAD(objects);
181 	int err;
182 
183 	/* Fill the GGTT with pinned objects and then try to pin one more.
184 	 * We expect it to fail.
185 	 */
186 
187 	err = populate_ggtt(i915, &objects);
188 	if (err)
189 		goto cleanup;
190 
191 	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
192 	if (IS_ERR(obj)) {
193 		err = PTR_ERR(obj);
194 		goto cleanup;
195 	}
196 
197 	quirk_add(obj, &objects);
198 
199 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
200 	if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
201 		pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
202 		err = -EINVAL;
203 		goto cleanup;
204 	}
205 
206 cleanup:
207 	cleanup_objects(i915, &objects);
208 	return err;
209 }
210 
211 static int igt_evict_for_vma(void *arg)
212 {
213 	struct drm_i915_private *i915 = arg;
214 	struct i915_ggtt *ggtt = &i915->ggtt;
215 	struct drm_mm_node target = {
216 		.start = 0,
217 		.size = 4096,
218 	};
219 	LIST_HEAD(objects);
220 	int err;
221 
222 	/* Fill the GGTT with pinned objects and try to evict a range. */
223 
224 	err = populate_ggtt(i915, &objects);
225 	if (err)
226 		goto cleanup;
227 
228 	/* Everything is pinned, nothing should happen */
229 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
230 	if (err != -ENOSPC) {
231 		pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
232 		       err);
233 		goto cleanup;
234 	}
235 
236 	unpin_ggtt(i915);
237 
238 	/* Everything is unpinned, we should be able to evict the node */
239 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
240 	if (err) {
241 		pr_err("i915_gem_evict_for_node returned err=%d\n",
242 		       err);
243 		goto cleanup;
244 	}
245 
246 cleanup:
247 	cleanup_objects(i915, &objects);
248 	return err;
249 }
250 
251 static void mock_color_adjust(const struct drm_mm_node *node,
252 			      unsigned long color,
253 			      u64 *start,
254 			      u64 *end)
255 {
256 }
257 
258 static int igt_evict_for_cache_color(void *arg)
259 {
260 	struct drm_i915_private *i915 = arg;
261 	struct i915_ggtt *ggtt = &i915->ggtt;
262 	const unsigned long flags = PIN_OFFSET_FIXED;
263 	struct drm_mm_node target = {
264 		.start = I915_GTT_PAGE_SIZE * 2,
265 		.size = I915_GTT_PAGE_SIZE,
266 		.color = I915_CACHE_LLC,
267 	};
268 	struct drm_i915_gem_object *obj;
269 	struct i915_vma *vma;
270 	LIST_HEAD(objects);
271 	int err;
272 
273 	/* Currently the use of color_adjust is limited to cache domains within
274 	 * the ggtt, and so the presence of mm.color_adjust is assumed to be
275 	 * i915_gtt_color_adjust throughout our driver, so using a mock color
276 	 * adjust will work just fine for our purposes.
277 	 */
278 	ggtt->vm.mm.color_adjust = mock_color_adjust;
279 
280 	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
281 	if (IS_ERR(obj)) {
282 		err = PTR_ERR(obj);
283 		goto cleanup;
284 	}
285 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
286 	quirk_add(obj, &objects);
287 
288 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
289 				       I915_GTT_PAGE_SIZE | flags);
290 	if (IS_ERR(vma)) {
291 		pr_err("[0]i915_gem_object_ggtt_pin failed\n");
292 		err = PTR_ERR(vma);
293 		goto cleanup;
294 	}
295 
296 	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
297 	if (IS_ERR(obj)) {
298 		err = PTR_ERR(obj);
299 		goto cleanup;
300 	}
301 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
302 	quirk_add(obj, &objects);
303 
304 	/* Neighbouring; same colour - should fit */
305 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
306 				       (I915_GTT_PAGE_SIZE * 2) | flags);
307 	if (IS_ERR(vma)) {
308 		pr_err("[1]i915_gem_object_ggtt_pin failed\n");
309 		err = PTR_ERR(vma);
310 		goto cleanup;
311 	}
312 
313 	i915_vma_unpin(vma);
314 
315 	/* Remove just the second vma */
316 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
317 	if (err) {
318 		pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
319 		goto cleanup;
320 	}
321 
322 	/* Attempt to remove the first *pinned* vma, by removing the (empty)
323 	 * neighbour -- this should fail.
324 	 */
325 	target.color = I915_CACHE_L3_LLC;
326 
327 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
328 	if (!err) {
329 		pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
330 		err = -EINVAL;
331 		goto cleanup;
332 	}
333 
334 	err = 0;
335 
336 cleanup:
337 	unpin_ggtt(i915);
338 	cleanup_objects(i915, &objects);
339 	ggtt->vm.mm.color_adjust = NULL;
340 	return err;
341 }
342 
343 static int igt_evict_vm(void *arg)
344 {
345 	struct drm_i915_private *i915 = arg;
346 	struct i915_ggtt *ggtt = &i915->ggtt;
347 	LIST_HEAD(objects);
348 	int err;
349 
350 	/* Fill the GGTT with pinned objects and try to evict everything. */
351 
352 	err = populate_ggtt(i915, &objects);
353 	if (err)
354 		goto cleanup;
355 
356 	/* Everything is pinned, nothing should happen */
357 	err = i915_gem_evict_vm(&ggtt->vm);
358 	if (err) {
359 		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
360 		       err);
361 		goto cleanup;
362 	}
363 
364 	unpin_ggtt(i915);
365 
366 	err = i915_gem_evict_vm(&ggtt->vm);
367 	if (err) {
368 		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
369 		       err);
370 		goto cleanup;
371 	}
372 
373 cleanup:
374 	cleanup_objects(i915, &objects);
375 	return err;
376 }
377 
378 static int igt_evict_contexts(void *arg)
379 {
380 	const u64 PRETEND_GGTT_SIZE = 16ull << 20;
381 	struct drm_i915_private *i915 = arg;
382 	struct intel_engine_cs *engine;
383 	enum intel_engine_id id;
384 	struct reserved {
385 		struct drm_mm_node node;
386 		struct reserved *next;
387 	} *reserved = NULL;
388 	intel_wakeref_t wakeref;
389 	struct drm_mm_node hole;
390 	unsigned long count;
391 	int err;
392 
393 	/*
394 	 * The purpose of this test is to verify that we will trigger an
395 	 * eviction in the GGTT when constructing a request that requires
396 	 * additional space in the GGTT for pinning the context. This space
397 	 * is not directly tied to the request so reclaiming it requires
398 	 * extra work.
399 	 *
400 	 * As such this test is only meaningful for full-ppgtt environments
401 	 * where the GTT space of the request is separate from the GGTT
402 	 * allocation required to build the request.
403 	 */
404 	if (!HAS_FULL_PPGTT(i915))
405 		return 0;
406 
407 	mutex_lock(&i915->drm.struct_mutex);
408 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
409 
410 	/* Reserve a block so that we know we have enough to fit a few rq */
411 	memset(&hole, 0, sizeof(hole));
412 	err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
413 				  PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
414 				  0, i915->ggtt.vm.total,
415 				  PIN_NOEVICT);
416 	if (err)
417 		goto out_locked;
418 
419 	/* Make the GGTT appear small by filling it with unevictable nodes */
420 	count = 0;
421 	do {
422 		struct reserved *r;
423 
424 		r = kcalloc(1, sizeof(*r), GFP_KERNEL);
425 		if (!r) {
426 			err = -ENOMEM;
427 			goto out_locked;
428 		}
429 
430 		if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
431 					1ul << 20, 0, I915_COLOR_UNEVICTABLE,
432 					0, i915->ggtt.vm.total,
433 					PIN_NOEVICT)) {
434 			kfree(r);
435 			break;
436 		}
437 
438 		r->next = reserved;
439 		reserved = r;
440 
441 		count++;
442 	} while (1);
443 	drm_mm_remove_node(&hole);
444 	mutex_unlock(&i915->drm.struct_mutex);
445 	pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
446 
447 	/* Overfill the GGTT with context objects and so try to evict one. */
448 	for_each_engine(engine, i915, id) {
449 		struct i915_sw_fence fence;
450 		struct drm_file *file;
451 
452 		file = mock_file(i915);
453 		if (IS_ERR(file)) {
454 			err = PTR_ERR(file);
455 			break;
456 		}
457 
458 		count = 0;
459 		mutex_lock(&i915->drm.struct_mutex);
460 		onstack_fence_init(&fence);
461 		do {
462 			struct i915_request *rq;
463 			struct i915_gem_context *ctx;
464 
465 			ctx = live_context(i915, file);
466 			if (IS_ERR(ctx))
467 				break;
468 
469 			/* We will need some GGTT space for the rq's context */
470 			igt_evict_ctl.fail_if_busy = true;
471 			rq = igt_request_alloc(ctx, engine);
472 			igt_evict_ctl.fail_if_busy = false;
473 
474 			if (IS_ERR(rq)) {
475 				/* When full, fail_if_busy will trigger EBUSY */
476 				if (PTR_ERR(rq) != -EBUSY) {
477 					pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
478 					       ctx->hw_id, engine->name,
479 					       (int)PTR_ERR(rq));
480 					err = PTR_ERR(rq);
481 				}
482 				break;
483 			}
484 
485 			/* Keep every request/ctx pinned until we are full */
486 			err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
487 							       &fence,
488 							       GFP_KERNEL);
489 			if (err < 0)
490 				break;
491 
492 			i915_request_add(rq);
493 			count++;
494 			err = 0;
495 		} while(1);
496 		mutex_unlock(&i915->drm.struct_mutex);
497 
498 		onstack_fence_fini(&fence);
499 		pr_info("Submitted %lu contexts/requests on %s\n",
500 			count, engine->name);
501 
502 		mock_file_free(i915, file);
503 		if (err)
504 			break;
505 	}
506 
507 	mutex_lock(&i915->drm.struct_mutex);
508 out_locked:
509 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
510 		err = -EIO;
511 	while (reserved) {
512 		struct reserved *next = reserved->next;
513 
514 		drm_mm_remove_node(&reserved->node);
515 		kfree(reserved);
516 
517 		reserved = next;
518 	}
519 	if (drm_mm_node_allocated(&hole))
520 		drm_mm_remove_node(&hole);
521 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
522 	mutex_unlock(&i915->drm.struct_mutex);
523 
524 	return err;
525 }
526 
527 int i915_gem_evict_mock_selftests(void)
528 {
529 	static const struct i915_subtest tests[] = {
530 		SUBTEST(igt_evict_something),
531 		SUBTEST(igt_evict_for_vma),
532 		SUBTEST(igt_evict_for_cache_color),
533 		SUBTEST(igt_evict_vm),
534 		SUBTEST(igt_overcommit),
535 	};
536 	struct drm_i915_private *i915;
537 	intel_wakeref_t wakeref;
538 	int err = 0;
539 
540 	i915 = mock_gem_device();
541 	if (!i915)
542 		return -ENOMEM;
543 
544 	mutex_lock(&i915->drm.struct_mutex);
545 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
546 		err = i915_subtests(tests, i915);
547 
548 	mutex_unlock(&i915->drm.struct_mutex);
549 
550 	drm_dev_put(&i915->drm);
551 	return err;
552 }
553 
554 int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
555 {
556 	static const struct i915_subtest tests[] = {
557 		SUBTEST(igt_evict_contexts),
558 	};
559 
560 	if (i915_terminally_wedged(i915))
561 		return 0;
562 
563 	return i915_subtests(tests, i915);
564 }
565