1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "../i915_selftest.h"
26 
27 #include "lib_sw_fence.h"
28 #include "mock_context.h"
29 #include "mock_drm.h"
30 #include "mock_gem_device.h"
31 
32 static void quirk_add(struct drm_i915_gem_object *obj,
33 		      struct list_head *objects)
34 {
35 	/* quirk is only for live tiled objects, use it to declare ownership */
36 	GEM_BUG_ON(obj->mm.quirked);
37 	obj->mm.quirked = true;
38 	list_add(&obj->st_link, objects);
39 }
40 
41 static int populate_ggtt(struct drm_i915_private *i915,
42 			 struct list_head *objects)
43 {
44 	unsigned long unbound, bound, count;
45 	struct drm_i915_gem_object *obj;
46 	u64 size;
47 
48 	count = 0;
49 	for (size = 0;
50 	     size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
51 	     size += I915_GTT_PAGE_SIZE) {
52 		struct i915_vma *vma;
53 
54 		obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
55 		if (IS_ERR(obj))
56 			return PTR_ERR(obj);
57 
58 		quirk_add(obj, objects);
59 
60 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
61 		if (IS_ERR(vma))
62 			return PTR_ERR(vma);
63 
64 		count++;
65 	}
66 
67 	unbound = 0;
68 	list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
69 		if (obj->mm.quirked)
70 			unbound++;
71 	if (unbound) {
72 		pr_err("%s: Found %lu objects unbound, expected %u!\n",
73 		       __func__, unbound, 0);
74 		return -EINVAL;
75 	}
76 
77 	bound = 0;
78 	list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
79 		if (obj->mm.quirked)
80 			bound++;
81 	if (bound != count) {
82 		pr_err("%s: Found %lu objects bound, expected %lu!\n",
83 		       __func__, bound, count);
84 		return -EINVAL;
85 	}
86 
87 	if (list_empty(&i915->ggtt.vm.bound_list)) {
88 		pr_err("No objects on the GGTT inactive list!\n");
89 		return -EINVAL;
90 	}
91 
92 	return 0;
93 }
94 
95 static void unpin_ggtt(struct drm_i915_private *i915)
96 {
97 	struct i915_ggtt *ggtt = &i915->ggtt;
98 	struct i915_vma *vma;
99 
100 	mutex_lock(&ggtt->vm.mutex);
101 	list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
102 		if (vma->obj->mm.quirked)
103 			i915_vma_unpin(vma);
104 	mutex_unlock(&ggtt->vm.mutex);
105 }
106 
107 static void cleanup_objects(struct drm_i915_private *i915,
108 			    struct list_head *list)
109 {
110 	struct drm_i915_gem_object *obj, *on;
111 
112 	list_for_each_entry_safe(obj, on, list, st_link) {
113 		GEM_BUG_ON(!obj->mm.quirked);
114 		obj->mm.quirked = false;
115 		i915_gem_object_put(obj);
116 	}
117 
118 	mutex_unlock(&i915->drm.struct_mutex);
119 
120 	i915_gem_drain_freed_objects(i915);
121 
122 	mutex_lock(&i915->drm.struct_mutex);
123 }
124 
125 static int igt_evict_something(void *arg)
126 {
127 	struct drm_i915_private *i915 = arg;
128 	struct i915_ggtt *ggtt = &i915->ggtt;
129 	LIST_HEAD(objects);
130 	int err;
131 
132 	/* Fill the GGTT with pinned objects and try to evict one. */
133 
134 	err = populate_ggtt(i915, &objects);
135 	if (err)
136 		goto cleanup;
137 
138 	/* Everything is pinned, nothing should happen */
139 	err = i915_gem_evict_something(&ggtt->vm,
140 				       I915_GTT_PAGE_SIZE, 0, 0,
141 				       0, U64_MAX,
142 				       0);
143 	if (err != -ENOSPC) {
144 		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
145 		       err);
146 		goto cleanup;
147 	}
148 
149 	unpin_ggtt(i915);
150 
151 	/* Everything is unpinned, we should be able to evict something */
152 	err = i915_gem_evict_something(&ggtt->vm,
153 				       I915_GTT_PAGE_SIZE, 0, 0,
154 				       0, U64_MAX,
155 				       0);
156 	if (err) {
157 		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
158 		       err);
159 		goto cleanup;
160 	}
161 
162 cleanup:
163 	cleanup_objects(i915, &objects);
164 	return err;
165 }
166 
167 static int igt_overcommit(void *arg)
168 {
169 	struct drm_i915_private *i915 = arg;
170 	struct drm_i915_gem_object *obj;
171 	struct i915_vma *vma;
172 	LIST_HEAD(objects);
173 	int err;
174 
175 	/* Fill the GGTT with pinned objects and then try to pin one more.
176 	 * We expect it to fail.
177 	 */
178 
179 	err = populate_ggtt(i915, &objects);
180 	if (err)
181 		goto cleanup;
182 
183 	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
184 	if (IS_ERR(obj)) {
185 		err = PTR_ERR(obj);
186 		goto cleanup;
187 	}
188 
189 	quirk_add(obj, &objects);
190 
191 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
192 	if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
193 		pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
194 		err = -EINVAL;
195 		goto cleanup;
196 	}
197 
198 cleanup:
199 	cleanup_objects(i915, &objects);
200 	return err;
201 }
202 
203 static int igt_evict_for_vma(void *arg)
204 {
205 	struct drm_i915_private *i915 = arg;
206 	struct i915_ggtt *ggtt = &i915->ggtt;
207 	struct drm_mm_node target = {
208 		.start = 0,
209 		.size = 4096,
210 	};
211 	LIST_HEAD(objects);
212 	int err;
213 
214 	/* Fill the GGTT with pinned objects and try to evict a range. */
215 
216 	err = populate_ggtt(i915, &objects);
217 	if (err)
218 		goto cleanup;
219 
220 	/* Everything is pinned, nothing should happen */
221 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
222 	if (err != -ENOSPC) {
223 		pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
224 		       err);
225 		goto cleanup;
226 	}
227 
228 	unpin_ggtt(i915);
229 
230 	/* Everything is unpinned, we should be able to evict the node */
231 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
232 	if (err) {
233 		pr_err("i915_gem_evict_for_node returned err=%d\n",
234 		       err);
235 		goto cleanup;
236 	}
237 
238 cleanup:
239 	cleanup_objects(i915, &objects);
240 	return err;
241 }
242 
243 static void mock_color_adjust(const struct drm_mm_node *node,
244 			      unsigned long color,
245 			      u64 *start,
246 			      u64 *end)
247 {
248 }
249 
250 static int igt_evict_for_cache_color(void *arg)
251 {
252 	struct drm_i915_private *i915 = arg;
253 	struct i915_ggtt *ggtt = &i915->ggtt;
254 	const unsigned long flags = PIN_OFFSET_FIXED;
255 	struct drm_mm_node target = {
256 		.start = I915_GTT_PAGE_SIZE * 2,
257 		.size = I915_GTT_PAGE_SIZE,
258 		.color = I915_CACHE_LLC,
259 	};
260 	struct drm_i915_gem_object *obj;
261 	struct i915_vma *vma;
262 	LIST_HEAD(objects);
263 	int err;
264 
265 	/* Currently the use of color_adjust is limited to cache domains within
266 	 * the ggtt, and so the presence of mm.color_adjust is assumed to be
267 	 * i915_gtt_color_adjust throughout our driver, so using a mock color
268 	 * adjust will work just fine for our purposes.
269 	 */
270 	ggtt->vm.mm.color_adjust = mock_color_adjust;
271 
272 	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
273 	if (IS_ERR(obj)) {
274 		err = PTR_ERR(obj);
275 		goto cleanup;
276 	}
277 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
278 	quirk_add(obj, &objects);
279 
280 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
281 				       I915_GTT_PAGE_SIZE | flags);
282 	if (IS_ERR(vma)) {
283 		pr_err("[0]i915_gem_object_ggtt_pin failed\n");
284 		err = PTR_ERR(vma);
285 		goto cleanup;
286 	}
287 
288 	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
289 	if (IS_ERR(obj)) {
290 		err = PTR_ERR(obj);
291 		goto cleanup;
292 	}
293 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
294 	quirk_add(obj, &objects);
295 
296 	/* Neighbouring; same colour - should fit */
297 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
298 				       (I915_GTT_PAGE_SIZE * 2) | flags);
299 	if (IS_ERR(vma)) {
300 		pr_err("[1]i915_gem_object_ggtt_pin failed\n");
301 		err = PTR_ERR(vma);
302 		goto cleanup;
303 	}
304 
305 	i915_vma_unpin(vma);
306 
307 	/* Remove just the second vma */
308 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
309 	if (err) {
310 		pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
311 		goto cleanup;
312 	}
313 
314 	/* Attempt to remove the first *pinned* vma, by removing the (empty)
315 	 * neighbour -- this should fail.
316 	 */
317 	target.color = I915_CACHE_L3_LLC;
318 
319 	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
320 	if (!err) {
321 		pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
322 		err = -EINVAL;
323 		goto cleanup;
324 	}
325 
326 	err = 0;
327 
328 cleanup:
329 	unpin_ggtt(i915);
330 	cleanup_objects(i915, &objects);
331 	ggtt->vm.mm.color_adjust = NULL;
332 	return err;
333 }
334 
335 static int igt_evict_vm(void *arg)
336 {
337 	struct drm_i915_private *i915 = arg;
338 	struct i915_ggtt *ggtt = &i915->ggtt;
339 	LIST_HEAD(objects);
340 	int err;
341 
342 	/* Fill the GGTT with pinned objects and try to evict everything. */
343 
344 	err = populate_ggtt(i915, &objects);
345 	if (err)
346 		goto cleanup;
347 
348 	/* Everything is pinned, nothing should happen */
349 	err = i915_gem_evict_vm(&ggtt->vm);
350 	if (err) {
351 		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
352 		       err);
353 		goto cleanup;
354 	}
355 
356 	unpin_ggtt(i915);
357 
358 	err = i915_gem_evict_vm(&ggtt->vm);
359 	if (err) {
360 		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
361 		       err);
362 		goto cleanup;
363 	}
364 
365 cleanup:
366 	cleanup_objects(i915, &objects);
367 	return err;
368 }
369 
370 static int igt_evict_contexts(void *arg)
371 {
372 	const u64 PRETEND_GGTT_SIZE = 16ull << 20;
373 	struct drm_i915_private *i915 = arg;
374 	struct intel_engine_cs *engine;
375 	enum intel_engine_id id;
376 	struct reserved {
377 		struct drm_mm_node node;
378 		struct reserved *next;
379 	} *reserved = NULL;
380 	intel_wakeref_t wakeref;
381 	struct drm_mm_node hole;
382 	unsigned long count;
383 	int err;
384 
385 	/*
386 	 * The purpose of this test is to verify that we will trigger an
387 	 * eviction in the GGTT when constructing a request that requires
388 	 * additional space in the GGTT for pinning the context. This space
389 	 * is not directly tied to the request so reclaiming it requires
390 	 * extra work.
391 	 *
392 	 * As such this test is only meaningful for full-ppgtt environments
393 	 * where the GTT space of the request is separate from the GGTT
394 	 * allocation required to build the request.
395 	 */
396 	if (!HAS_FULL_PPGTT(i915))
397 		return 0;
398 
399 	mutex_lock(&i915->drm.struct_mutex);
400 	wakeref = intel_runtime_pm_get(i915);
401 
402 	/* Reserve a block so that we know we have enough to fit a few rq */
403 	memset(&hole, 0, sizeof(hole));
404 	err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
405 				  PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
406 				  0, i915->ggtt.vm.total,
407 				  PIN_NOEVICT);
408 	if (err)
409 		goto out_locked;
410 
411 	/* Make the GGTT appear small by filling it with unevictable nodes */
412 	count = 0;
413 	do {
414 		struct reserved *r;
415 
416 		r = kcalloc(1, sizeof(*r), GFP_KERNEL);
417 		if (!r) {
418 			err = -ENOMEM;
419 			goto out_locked;
420 		}
421 
422 		if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
423 					1ul << 20, 0, I915_COLOR_UNEVICTABLE,
424 					0, i915->ggtt.vm.total,
425 					PIN_NOEVICT)) {
426 			kfree(r);
427 			break;
428 		}
429 
430 		r->next = reserved;
431 		reserved = r;
432 
433 		count++;
434 	} while (1);
435 	drm_mm_remove_node(&hole);
436 	mutex_unlock(&i915->drm.struct_mutex);
437 	pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
438 
439 	/* Overfill the GGTT with context objects and so try to evict one. */
440 	for_each_engine(engine, i915, id) {
441 		struct i915_sw_fence fence;
442 		struct drm_file *file;
443 
444 		file = mock_file(i915);
445 		if (IS_ERR(file)) {
446 			err = PTR_ERR(file);
447 			break;
448 		}
449 
450 		count = 0;
451 		mutex_lock(&i915->drm.struct_mutex);
452 		onstack_fence_init(&fence);
453 		do {
454 			struct i915_request *rq;
455 			struct i915_gem_context *ctx;
456 
457 			ctx = live_context(i915, file);
458 			if (IS_ERR(ctx))
459 				break;
460 
461 			/* We will need some GGTT space for the rq's context */
462 			igt_evict_ctl.fail_if_busy = true;
463 			rq = i915_request_alloc(engine, ctx);
464 			igt_evict_ctl.fail_if_busy = false;
465 
466 			if (IS_ERR(rq)) {
467 				/* When full, fail_if_busy will trigger EBUSY */
468 				if (PTR_ERR(rq) != -EBUSY) {
469 					pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
470 					       ctx->hw_id, engine->name,
471 					       (int)PTR_ERR(rq));
472 					err = PTR_ERR(rq);
473 				}
474 				break;
475 			}
476 
477 			/* Keep every request/ctx pinned until we are full */
478 			err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
479 							       &fence,
480 							       GFP_KERNEL);
481 			if (err < 0)
482 				break;
483 
484 			i915_request_add(rq);
485 			count++;
486 			err = 0;
487 		} while(1);
488 		mutex_unlock(&i915->drm.struct_mutex);
489 
490 		onstack_fence_fini(&fence);
491 		pr_info("Submitted %lu contexts/requests on %s\n",
492 			count, engine->name);
493 
494 		mock_file_free(i915, file);
495 		if (err)
496 			break;
497 	}
498 
499 	mutex_lock(&i915->drm.struct_mutex);
500 out_locked:
501 	while (reserved) {
502 		struct reserved *next = reserved->next;
503 
504 		drm_mm_remove_node(&reserved->node);
505 		kfree(reserved);
506 
507 		reserved = next;
508 	}
509 	if (drm_mm_node_allocated(&hole))
510 		drm_mm_remove_node(&hole);
511 	intel_runtime_pm_put(i915, wakeref);
512 	mutex_unlock(&i915->drm.struct_mutex);
513 
514 	return err;
515 }
516 
517 int i915_gem_evict_mock_selftests(void)
518 {
519 	static const struct i915_subtest tests[] = {
520 		SUBTEST(igt_evict_something),
521 		SUBTEST(igt_evict_for_vma),
522 		SUBTEST(igt_evict_for_cache_color),
523 		SUBTEST(igt_evict_vm),
524 		SUBTEST(igt_overcommit),
525 	};
526 	struct drm_i915_private *i915;
527 	intel_wakeref_t wakeref;
528 	int err = 0;
529 
530 	i915 = mock_gem_device();
531 	if (!i915)
532 		return -ENOMEM;
533 
534 	mutex_lock(&i915->drm.struct_mutex);
535 	with_intel_runtime_pm(i915, wakeref)
536 		err = i915_subtests(tests, i915);
537 
538 	mutex_unlock(&i915->drm.struct_mutex);
539 
540 	drm_dev_put(&i915->drm);
541 	return err;
542 }
543 
544 int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
545 {
546 	static const struct i915_subtest tests[] = {
547 		SUBTEST(igt_evict_contexts),
548 	};
549 
550 	if (i915_terminally_wedged(i915))
551 		return 0;
552 
553 	return i915_subtests(tests, i915);
554 }
555