1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "../i915_selftest.h"
26 
27 #include "igt_gem_utils.h"
28 #include "lib_sw_fence.h"
29 #include "mock_context.h"
30 #include "mock_drm.h"
31 #include "mock_gem_device.h"
32 
33 static void quirk_add(struct drm_i915_gem_object *obj,
34 		      struct list_head *objects)
35 {
36 	/* quirk is only for live tiled objects, use it to declare ownership */
37 	GEM_BUG_ON(obj->mm.quirked);
38 	obj->mm.quirked = true;
39 	list_add(&obj->st_link, objects);
40 }
41 
42 static int populate_ggtt(struct drm_i915_private *i915,
43 			 struct list_head *objects)
44 {
45 	unsigned long unbound, bound, count;
46 	struct drm_i915_gem_object *obj;
47 	u64 size;
48 
49 	count = 0;
50 	for (size = 0;
51 	     size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
52 	     size += I915_GTT_PAGE_SIZE) {
53 		struct i915_vma *vma;
54 
55 		obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
56 		if (IS_ERR(obj))
57 			return PTR_ERR(obj);
58 
59 		quirk_add(obj, objects);
60 
61 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
62 		if (IS_ERR(vma))
63 			return PTR_ERR(vma);
64 
65 		count++;
66 	}
67 
68 	unbound = 0;
69 	list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
70 		if (obj->mm.quirked)
71 			unbound++;
72 	if (unbound) {
73 		pr_err("%s: Found %lu objects unbound, expected %u!\n",
74 		       __func__, unbound, 0);
75 		return -EINVAL;
76 	}
77 
78 	bound = 0;
79 	list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
80 		if (obj->mm.quirked)
81 			bound++;
82 	if (bound != count) {
83 		pr_err("%s: Found %lu objects bound, expected %lu!\n",
84 		       __func__, bound, count);
85 		return -EINVAL;
86 	}
87 
88 	if (list_empty(&i915->ggtt.vm.bound_list)) {
89 		pr_err("No objects on the GGTT inactive list!\n");
90 		return -EINVAL;
91 	}
92 
93 	return 0;
94 }
95 
96 static void unpin_ggtt(struct drm_i915_private *i915)
97 {
98 	struct i915_ggtt *ggtt = &i915->ggtt;
99 	struct i915_vma *vma;
100 
101 	mutex_lock(&ggtt->vm.mutex);
102 	list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
103 		if (vma->obj->mm.quirked)
104 			i915_vma_unpin(vma);
105 	mutex_unlock(&ggtt->vm.mutex);
106 }
107 
108 static void cleanup_objects(struct drm_i915_private *i915,
109 			    struct list_head *list)
110 {
111 	struct drm_i915_gem_object *obj, *on;
112 
113 	list_for_each_entry_safe(obj, on, list, st_link) {
114 		GEM_BUG_ON(!obj->mm.quirked);
115 		obj->mm.quirked = false;
116 		i915_gem_object_put(obj);
117 	}
118 
119 	mutex_unlock(&i915->drm.struct_mutex);
120 
121 	i915_gem_drain_freed_objects(i915);
122 
123 	mutex_lock(&i915->drm.struct_mutex);
124 }
125 
126 static int igt_evict_something(void *arg)
127 {
128 	struct drm_i915_private *i915 = arg;
129 	struct i915_ggtt *ggtt = &i915->ggtt;
130 	LIST_HEAD(objects);
131 	int err;
132 
133 	/* Fill the GGTT with pinned objects and try to evict one. */
134 
135 	err = populate_ggtt(i915, &objects);
136 	if (err)
137 		goto cleanup;
138 
139 	/* Everything is pinned, nothing should happen */
140 	err = i915_gem_evict_something(&ggtt->vm,
141 				       I915_GTT_PAGE_SIZE, 0, 0,
142 				       0, U64_MAX,
143 				       0);
144 	if (err != -ENOSPC) {
145 		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
146 		       err);
147 		goto cleanup;
148 	}
149 
150 	unpin_ggtt(i915);
151 
152 	/* Everything is unpinned, we should be able to evict something */
153 	err = i915_gem_evict_something(&ggtt->vm,
154 				       I915_GTT_PAGE_SIZE, 0, 0,
155 				       0, U64_MAX,
156 				       0);
157 	if (err) {
158 		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
159 		       err);
160 		goto cleanup;
161 	}
162 
163 cleanup:
164 	cleanup_objects(i915, &objects);
165 	return err;
166 }
167 
168 static int igt_overcommit(void *arg)
169 {
170 	struct drm_i915_private *i915 = arg;
171 	struct drm_i915_gem_object *obj;
172 	struct i915_vma *vma;
173 	LIST_HEAD(objects);
174 	int err;
175 
176 	/* Fill the GGTT with pinned objects and then try to pin one more.
177 	 * We expect it to fail.
178 	 */
179 
180 	err = populate_ggtt(i915, &objects);
181 	if (err)
182 		goto cleanup;
183 
184 	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
185 	if (IS_ERR(obj)) {
186 		err = PTR_ERR(obj);
187 		goto cleanup;
188 	}
189 
190 	quirk_add(obj, &objects);
191 
192 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
193 	if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
194 		pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
195 		err = -EINVAL;
196 		goto cleanup;
197 	}
198 
199 cleanup:
200 	cleanup_objects(i915, &objects);
201 	return err;
202 }
203 
204 static int igt_evict_for_vma(void *arg)
205 {
206 	struct drm_i915_private *i915 = arg;
207 	struct i915_ggtt *ggtt = &i915->ggtt;
208 	struct drm_mm_node target = {
209 		.start = 0,
210 		.size = 4096,
211 	};
212 	LIST_HEAD(objects);
213 	int err;
214 
215 	/* Fill the GGTT with pinned objects and try to evict a range. */
216 
217 	err = populate_ggtt(i915, &objects);
218 	if (err)
219 		goto cleanup;
220 
221 	/* Everything is pinned, nothing should happen */
222 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
223 	if (err != -ENOSPC) {
224 		pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
225 		       err);
226 		goto cleanup;
227 	}
228 
229 	unpin_ggtt(i915);
230 
231 	/* Everything is unpinned, we should be able to evict the node */
232 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
233 	if (err) {
234 		pr_err("i915_gem_evict_for_node returned err=%d\n",
235 		       err);
236 		goto cleanup;
237 	}
238 
239 cleanup:
240 	cleanup_objects(i915, &objects);
241 	return err;
242 }
243 
244 static void mock_color_adjust(const struct drm_mm_node *node,
245 			      unsigned long color,
246 			      u64 *start,
247 			      u64 *end)
248 {
249 }
250 
251 static int igt_evict_for_cache_color(void *arg)
252 {
253 	struct drm_i915_private *i915 = arg;
254 	struct i915_ggtt *ggtt = &i915->ggtt;
255 	const unsigned long flags = PIN_OFFSET_FIXED;
256 	struct drm_mm_node target = {
257 		.start = I915_GTT_PAGE_SIZE * 2,
258 		.size = I915_GTT_PAGE_SIZE,
259 		.color = I915_CACHE_LLC,
260 	};
261 	struct drm_i915_gem_object *obj;
262 	struct i915_vma *vma;
263 	LIST_HEAD(objects);
264 	int err;
265 
266 	/* Currently the use of color_adjust is limited to cache domains within
267 	 * the ggtt, and so the presence of mm.color_adjust is assumed to be
268 	 * i915_gtt_color_adjust throughout our driver, so using a mock color
269 	 * adjust will work just fine for our purposes.
270 	 */
271 	ggtt->vm.mm.color_adjust = mock_color_adjust;
272 
273 	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
274 	if (IS_ERR(obj)) {
275 		err = PTR_ERR(obj);
276 		goto cleanup;
277 	}
278 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
279 	quirk_add(obj, &objects);
280 
281 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
282 				       I915_GTT_PAGE_SIZE | flags);
283 	if (IS_ERR(vma)) {
284 		pr_err("[0]i915_gem_object_ggtt_pin failed\n");
285 		err = PTR_ERR(vma);
286 		goto cleanup;
287 	}
288 
289 	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
290 	if (IS_ERR(obj)) {
291 		err = PTR_ERR(obj);
292 		goto cleanup;
293 	}
294 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
295 	quirk_add(obj, &objects);
296 
297 	/* Neighbouring; same colour - should fit */
298 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
299 				       (I915_GTT_PAGE_SIZE * 2) | flags);
300 	if (IS_ERR(vma)) {
301 		pr_err("[1]i915_gem_object_ggtt_pin failed\n");
302 		err = PTR_ERR(vma);
303 		goto cleanup;
304 	}
305 
306 	i915_vma_unpin(vma);
307 
308 	/* Remove just the second vma */
309 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
310 	if (err) {
311 		pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
312 		goto cleanup;
313 	}
314 
315 	/* Attempt to remove the first *pinned* vma, by removing the (empty)
316 	 * neighbour -- this should fail.
317 	 */
318 	target.color = I915_CACHE_L3_LLC;
319 
320 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
321 	if (!err) {
322 		pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
323 		err = -EINVAL;
324 		goto cleanup;
325 	}
326 
327 	err = 0;
328 
329 cleanup:
330 	unpin_ggtt(i915);
331 	cleanup_objects(i915, &objects);
332 	ggtt->vm.mm.color_adjust = NULL;
333 	return err;
334 }
335 
336 static int igt_evict_vm(void *arg)
337 {
338 	struct drm_i915_private *i915 = arg;
339 	struct i915_ggtt *ggtt = &i915->ggtt;
340 	LIST_HEAD(objects);
341 	int err;
342 
343 	/* Fill the GGTT with pinned objects and try to evict everything. */
344 
345 	err = populate_ggtt(i915, &objects);
346 	if (err)
347 		goto cleanup;
348 
349 	/* Everything is pinned, nothing should happen */
350 	err = i915_gem_evict_vm(&ggtt->vm);
351 	if (err) {
352 		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
353 		       err);
354 		goto cleanup;
355 	}
356 
357 	unpin_ggtt(i915);
358 
359 	err = i915_gem_evict_vm(&ggtt->vm);
360 	if (err) {
361 		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
362 		       err);
363 		goto cleanup;
364 	}
365 
366 cleanup:
367 	cleanup_objects(i915, &objects);
368 	return err;
369 }
370 
371 static int igt_evict_contexts(void *arg)
372 {
373 	const u64 PRETEND_GGTT_SIZE = 16ull << 20;
374 	struct drm_i915_private *i915 = arg;
375 	struct intel_engine_cs *engine;
376 	enum intel_engine_id id;
377 	struct reserved {
378 		struct drm_mm_node node;
379 		struct reserved *next;
380 	} *reserved = NULL;
381 	intel_wakeref_t wakeref;
382 	struct drm_mm_node hole;
383 	unsigned long count;
384 	int err;
385 
386 	/*
387 	 * The purpose of this test is to verify that we will trigger an
388 	 * eviction in the GGTT when constructing a request that requires
389 	 * additional space in the GGTT for pinning the context. This space
390 	 * is not directly tied to the request so reclaiming it requires
391 	 * extra work.
392 	 *
393 	 * As such this test is only meaningful for full-ppgtt environments
394 	 * where the GTT space of the request is separate from the GGTT
395 	 * allocation required to build the request.
396 	 */
397 	if (!HAS_FULL_PPGTT(i915))
398 		return 0;
399 
400 	mutex_lock(&i915->drm.struct_mutex);
401 	wakeref = intel_runtime_pm_get(i915);
402 
403 	/* Reserve a block so that we know we have enough to fit a few rq */
404 	memset(&hole, 0, sizeof(hole));
405 	err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
406 				  PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
407 				  0, i915->ggtt.vm.total,
408 				  PIN_NOEVICT);
409 	if (err)
410 		goto out_locked;
411 
412 	/* Make the GGTT appear small by filling it with unevictable nodes */
413 	count = 0;
414 	do {
415 		struct reserved *r;
416 
417 		r = kcalloc(1, sizeof(*r), GFP_KERNEL);
418 		if (!r) {
419 			err = -ENOMEM;
420 			goto out_locked;
421 		}
422 
423 		if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
424 					1ul << 20, 0, I915_COLOR_UNEVICTABLE,
425 					0, i915->ggtt.vm.total,
426 					PIN_NOEVICT)) {
427 			kfree(r);
428 			break;
429 		}
430 
431 		r->next = reserved;
432 		reserved = r;
433 
434 		count++;
435 	} while (1);
436 	drm_mm_remove_node(&hole);
437 	mutex_unlock(&i915->drm.struct_mutex);
438 	pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
439 
440 	/* Overfill the GGTT with context objects and so try to evict one. */
441 	for_each_engine(engine, i915, id) {
442 		struct i915_sw_fence fence;
443 		struct drm_file *file;
444 
445 		file = mock_file(i915);
446 		if (IS_ERR(file)) {
447 			err = PTR_ERR(file);
448 			break;
449 		}
450 
451 		count = 0;
452 		mutex_lock(&i915->drm.struct_mutex);
453 		onstack_fence_init(&fence);
454 		do {
455 			struct i915_request *rq;
456 			struct i915_gem_context *ctx;
457 
458 			ctx = live_context(i915, file);
459 			if (IS_ERR(ctx))
460 				break;
461 
462 			/* We will need some GGTT space for the rq's context */
463 			igt_evict_ctl.fail_if_busy = true;
464 			rq = igt_request_alloc(ctx, engine);
465 			igt_evict_ctl.fail_if_busy = false;
466 
467 			if (IS_ERR(rq)) {
468 				/* When full, fail_if_busy will trigger EBUSY */
469 				if (PTR_ERR(rq) != -EBUSY) {
470 					pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
471 					       ctx->hw_id, engine->name,
472 					       (int)PTR_ERR(rq));
473 					err = PTR_ERR(rq);
474 				}
475 				break;
476 			}
477 
478 			/* Keep every request/ctx pinned until we are full */
479 			err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
480 							       &fence,
481 							       GFP_KERNEL);
482 			if (err < 0)
483 				break;
484 
485 			i915_request_add(rq);
486 			count++;
487 			err = 0;
488 		} while(1);
489 		mutex_unlock(&i915->drm.struct_mutex);
490 
491 		onstack_fence_fini(&fence);
492 		pr_info("Submitted %lu contexts/requests on %s\n",
493 			count, engine->name);
494 
495 		mock_file_free(i915, file);
496 		if (err)
497 			break;
498 	}
499 
500 	mutex_lock(&i915->drm.struct_mutex);
501 out_locked:
502 	while (reserved) {
503 		struct reserved *next = reserved->next;
504 
505 		drm_mm_remove_node(&reserved->node);
506 		kfree(reserved);
507 
508 		reserved = next;
509 	}
510 	if (drm_mm_node_allocated(&hole))
511 		drm_mm_remove_node(&hole);
512 	intel_runtime_pm_put(i915, wakeref);
513 	mutex_unlock(&i915->drm.struct_mutex);
514 
515 	return err;
516 }
517 
518 int i915_gem_evict_mock_selftests(void)
519 {
520 	static const struct i915_subtest tests[] = {
521 		SUBTEST(igt_evict_something),
522 		SUBTEST(igt_evict_for_vma),
523 		SUBTEST(igt_evict_for_cache_color),
524 		SUBTEST(igt_evict_vm),
525 		SUBTEST(igt_overcommit),
526 	};
527 	struct drm_i915_private *i915;
528 	intel_wakeref_t wakeref;
529 	int err = 0;
530 
531 	i915 = mock_gem_device();
532 	if (!i915)
533 		return -ENOMEM;
534 
535 	mutex_lock(&i915->drm.struct_mutex);
536 	with_intel_runtime_pm(i915, wakeref)
537 		err = i915_subtests(tests, i915);
538 
539 	mutex_unlock(&i915->drm.struct_mutex);
540 
541 	drm_dev_put(&i915->drm);
542 	return err;
543 }
544 
545 int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
546 {
547 	static const struct i915_subtest tests[] = {
548 		SUBTEST(igt_evict_contexts),
549 	};
550 
551 	if (i915_terminally_wedged(i915))
552 		return 0;
553 
554 	return i915_subtests(tests, i915);
555 }
556