1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "gem/i915_gem_context.h"
29 #include "gem/selftests/mock_context.h"
30 #include "gt/intel_context.h"
31 
32 #include "i915_random.h"
33 #include "i915_selftest.h"
34 
35 #include "mock_drm.h"
36 #include "mock_gem_device.h"
37 #include "igt_flush_test.h"
38 
39 static void cleanup_freed_objects(struct drm_i915_private *i915)
40 {
41 	i915_gem_drain_freed_objects(i915);
42 }
43 
44 static void fake_free_pages(struct drm_i915_gem_object *obj,
45 			    struct sg_table *pages)
46 {
47 	sg_free_table(pages);
48 	kfree(pages);
49 }
50 
51 static int fake_get_pages(struct drm_i915_gem_object *obj)
52 {
53 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
54 #define PFN_BIAS 0x1000
55 	struct sg_table *pages;
56 	struct scatterlist *sg;
57 	unsigned int sg_page_sizes;
58 	typeof(obj->base.size) rem;
59 
60 	pages = kmalloc(sizeof(*pages), GFP);
61 	if (!pages)
62 		return -ENOMEM;
63 
64 	rem = round_up(obj->base.size, BIT(31)) >> 31;
65 	if (sg_alloc_table(pages, rem, GFP)) {
66 		kfree(pages);
67 		return -ENOMEM;
68 	}
69 
70 	sg_page_sizes = 0;
71 	rem = obj->base.size;
72 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
73 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
74 
75 		GEM_BUG_ON(!len);
76 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
77 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
78 		sg_dma_len(sg) = len;
79 		sg_page_sizes |= len;
80 
81 		rem -= len;
82 	}
83 	GEM_BUG_ON(rem);
84 
85 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
86 
87 	return 0;
88 #undef GFP
89 }
90 
91 static void fake_put_pages(struct drm_i915_gem_object *obj,
92 			   struct sg_table *pages)
93 {
94 	fake_free_pages(obj, pages);
95 	obj->mm.dirty = false;
96 }
97 
98 static const struct drm_i915_gem_object_ops fake_ops = {
99 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
100 	.get_pages = fake_get_pages,
101 	.put_pages = fake_put_pages,
102 };
103 
104 static struct drm_i915_gem_object *
105 fake_dma_object(struct drm_i915_private *i915, u64 size)
106 {
107 	static struct lock_class_key lock_class;
108 	struct drm_i915_gem_object *obj;
109 
110 	GEM_BUG_ON(!size);
111 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
112 
113 	if (overflows_type(size, obj->base.size))
114 		return ERR_PTR(-E2BIG);
115 
116 	obj = i915_gem_object_alloc();
117 	if (!obj)
118 		goto err;
119 
120 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
121 	i915_gem_object_init(obj, &fake_ops, &lock_class);
122 
123 	i915_gem_object_set_volatile(obj);
124 
125 	obj->write_domain = I915_GEM_DOMAIN_CPU;
126 	obj->read_domains = I915_GEM_DOMAIN_CPU;
127 	obj->cache_level = I915_CACHE_NONE;
128 
129 	/* Preallocate the "backing storage" */
130 	if (i915_gem_object_pin_pages(obj))
131 		goto err_obj;
132 
133 	i915_gem_object_unpin_pages(obj);
134 	return obj;
135 
136 err_obj:
137 	i915_gem_object_put(obj);
138 err:
139 	return ERR_PTR(-ENOMEM);
140 }
141 
142 static int igt_ppgtt_alloc(void *arg)
143 {
144 	struct drm_i915_private *dev_priv = arg;
145 	struct i915_ppgtt *ppgtt;
146 	u64 size, last, limit;
147 	int err = 0;
148 
149 	/* Allocate a ppggt and try to fill the entire range */
150 
151 	if (!HAS_PPGTT(dev_priv))
152 		return 0;
153 
154 	ppgtt = __ppgtt_create(dev_priv);
155 	if (IS_ERR(ppgtt))
156 		return PTR_ERR(ppgtt);
157 
158 	if (!ppgtt->vm.allocate_va_range)
159 		goto err_ppgtt_cleanup;
160 
161 	/*
162 	 * While we only allocate the page tables here and so we could
163 	 * address a much larger GTT than we could actually fit into
164 	 * RAM, a practical limit is the amount of physical pages in the system.
165 	 * This should ensure that we do not run into the oomkiller during
166 	 * the test and take down the machine wilfully.
167 	 */
168 	limit = totalram_pages() << PAGE_SHIFT;
169 	limit = min(ppgtt->vm.total, limit);
170 
171 	/* Check we can allocate the entire range */
172 	for (size = 4096; size <= limit; size <<= 2) {
173 		err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
174 		if (err) {
175 			if (err == -ENOMEM) {
176 				pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
177 					size, ilog2(size));
178 				err = 0; /* virtual space too large! */
179 			}
180 			goto err_ppgtt_cleanup;
181 		}
182 
183 		cond_resched();
184 
185 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
186 	}
187 
188 	/* Check we can incrementally allocate the entire range */
189 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
190 		err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
191 						  last, size - last);
192 		if (err) {
193 			if (err == -ENOMEM) {
194 				pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
195 					last, size - last, ilog2(size));
196 				err = 0; /* virtual space too large! */
197 			}
198 			goto err_ppgtt_cleanup;
199 		}
200 
201 		cond_resched();
202 	}
203 
204 err_ppgtt_cleanup:
205 	i915_vm_put(&ppgtt->vm);
206 	return err;
207 }
208 
209 static int lowlevel_hole(struct drm_i915_private *i915,
210 			 struct i915_address_space *vm,
211 			 u64 hole_start, u64 hole_end,
212 			 unsigned long end_time)
213 {
214 	I915_RND_STATE(seed_prng);
215 	unsigned int size;
216 	struct i915_vma mock_vma;
217 
218 	memset(&mock_vma, 0, sizeof(struct i915_vma));
219 
220 	/* Keep creating larger objects until one cannot fit into the hole */
221 	for (size = 12; (hole_end - hole_start) >> size; size++) {
222 		I915_RND_SUBSTATE(prng, seed_prng);
223 		struct drm_i915_gem_object *obj;
224 		unsigned int *order, count, n;
225 		u64 hole_size;
226 
227 		hole_size = (hole_end - hole_start) >> size;
228 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
229 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
230 		count = hole_size >> 1;
231 		if (!count) {
232 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
233 				 __func__, hole_start, hole_end, size, hole_size);
234 			break;
235 		}
236 
237 		do {
238 			order = i915_random_order(count, &prng);
239 			if (order)
240 				break;
241 		} while (count >>= 1);
242 		if (!count)
243 			return -ENOMEM;
244 		GEM_BUG_ON(!order);
245 
246 		GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
247 		GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
248 
249 		/* Ignore allocation failures (i.e. don't report them as
250 		 * a test failure) as we are purposefully allocating very
251 		 * large objects without checking that we have sufficient
252 		 * memory. We expect to hit -ENOMEM.
253 		 */
254 
255 		obj = fake_dma_object(i915, BIT_ULL(size));
256 		if (IS_ERR(obj)) {
257 			kfree(order);
258 			break;
259 		}
260 
261 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
262 
263 		if (i915_gem_object_pin_pages(obj)) {
264 			i915_gem_object_put(obj);
265 			kfree(order);
266 			break;
267 		}
268 
269 		for (n = 0; n < count; n++) {
270 			u64 addr = hole_start + order[n] * BIT_ULL(size);
271 			intel_wakeref_t wakeref;
272 
273 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
274 
275 			if (igt_timeout(end_time,
276 					"%s timed out before %d/%d\n",
277 					__func__, n, count)) {
278 				hole_end = hole_start; /* quit */
279 				break;
280 			}
281 
282 			if (vm->allocate_va_range &&
283 			    vm->allocate_va_range(vm, addr, BIT_ULL(size)))
284 				break;
285 
286 			mock_vma.pages = obj->mm.pages;
287 			mock_vma.node.size = BIT_ULL(size);
288 			mock_vma.node.start = addr;
289 
290 			with_intel_runtime_pm(&i915->runtime_pm, wakeref)
291 				vm->insert_entries(vm, &mock_vma,
292 						   I915_CACHE_NONE, 0);
293 		}
294 		count = n;
295 
296 		i915_random_reorder(order, count, &prng);
297 		for (n = 0; n < count; n++) {
298 			u64 addr = hole_start + order[n] * BIT_ULL(size);
299 			intel_wakeref_t wakeref;
300 
301 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
302 			with_intel_runtime_pm(&i915->runtime_pm, wakeref)
303 				vm->clear_range(vm, addr, BIT_ULL(size));
304 		}
305 
306 		i915_gem_object_unpin_pages(obj);
307 		i915_gem_object_put(obj);
308 
309 		kfree(order);
310 
311 		cleanup_freed_objects(i915);
312 	}
313 
314 	return 0;
315 }
316 
317 static void close_object_list(struct list_head *objects,
318 			      struct i915_address_space *vm)
319 {
320 	struct drm_i915_gem_object *obj, *on;
321 	int ignored;
322 
323 	list_for_each_entry_safe(obj, on, objects, st_link) {
324 		struct i915_vma *vma;
325 
326 		vma = i915_vma_instance(obj, vm, NULL);
327 		if (!IS_ERR(vma))
328 			ignored = i915_vma_unbind(vma);
329 		/* Only ppgtt vma may be closed before the object is freed */
330 		if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
331 			i915_vma_close(vma);
332 
333 		list_del(&obj->st_link);
334 		i915_gem_object_put(obj);
335 	}
336 }
337 
338 static int fill_hole(struct drm_i915_private *i915,
339 		     struct i915_address_space *vm,
340 		     u64 hole_start, u64 hole_end,
341 		     unsigned long end_time)
342 {
343 	const u64 hole_size = hole_end - hole_start;
344 	struct drm_i915_gem_object *obj;
345 	const unsigned long max_pages =
346 		min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
347 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
348 	unsigned long npages, prime, flags;
349 	struct i915_vma *vma;
350 	LIST_HEAD(objects);
351 	int err;
352 
353 	/* Try binding many VMA working inwards from either edge */
354 
355 	flags = PIN_OFFSET_FIXED | PIN_USER;
356 	if (i915_is_ggtt(vm))
357 		flags |= PIN_GLOBAL;
358 
359 	for_each_prime_number_from(prime, 2, max_step) {
360 		for (npages = 1; npages <= max_pages; npages *= prime) {
361 			const u64 full_size = npages << PAGE_SHIFT;
362 			const struct {
363 				const char *name;
364 				u64 offset;
365 				int step;
366 			} phases[] = {
367 				{ "top-down", hole_end, -1, },
368 				{ "bottom-up", hole_start, 1, },
369 				{ }
370 			}, *p;
371 
372 			obj = fake_dma_object(i915, full_size);
373 			if (IS_ERR(obj))
374 				break;
375 
376 			list_add(&obj->st_link, &objects);
377 
378 			/* Align differing sized objects against the edges, and
379 			 * check we don't walk off into the void when binding
380 			 * them into the GTT.
381 			 */
382 			for (p = phases; p->name; p++) {
383 				u64 offset;
384 
385 				offset = p->offset;
386 				list_for_each_entry(obj, &objects, st_link) {
387 					vma = i915_vma_instance(obj, vm, NULL);
388 					if (IS_ERR(vma))
389 						continue;
390 
391 					if (p->step < 0) {
392 						if (offset < hole_start + obj->base.size)
393 							break;
394 						offset -= obj->base.size;
395 					}
396 
397 					err = i915_vma_pin(vma, 0, 0, offset | flags);
398 					if (err) {
399 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
400 						       __func__, p->name, err, npages, prime, offset);
401 						goto err;
402 					}
403 
404 					if (!drm_mm_node_allocated(&vma->node) ||
405 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
406 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
407 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
408 						       offset);
409 						err = -EINVAL;
410 						goto err;
411 					}
412 
413 					i915_vma_unpin(vma);
414 
415 					if (p->step > 0) {
416 						if (offset + obj->base.size > hole_end)
417 							break;
418 						offset += obj->base.size;
419 					}
420 				}
421 
422 				offset = p->offset;
423 				list_for_each_entry(obj, &objects, st_link) {
424 					vma = i915_vma_instance(obj, vm, NULL);
425 					if (IS_ERR(vma))
426 						continue;
427 
428 					if (p->step < 0) {
429 						if (offset < hole_start + obj->base.size)
430 							break;
431 						offset -= obj->base.size;
432 					}
433 
434 					if (!drm_mm_node_allocated(&vma->node) ||
435 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
436 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
437 						       __func__, p->name, vma->node.start, vma->node.size,
438 						       offset);
439 						err = -EINVAL;
440 						goto err;
441 					}
442 
443 					err = i915_vma_unbind(vma);
444 					if (err) {
445 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
446 						       __func__, p->name, vma->node.start, vma->node.size,
447 						       err);
448 						goto err;
449 					}
450 
451 					if (p->step > 0) {
452 						if (offset + obj->base.size > hole_end)
453 							break;
454 						offset += obj->base.size;
455 					}
456 				}
457 
458 				offset = p->offset;
459 				list_for_each_entry_reverse(obj, &objects, st_link) {
460 					vma = i915_vma_instance(obj, vm, NULL);
461 					if (IS_ERR(vma))
462 						continue;
463 
464 					if (p->step < 0) {
465 						if (offset < hole_start + obj->base.size)
466 							break;
467 						offset -= obj->base.size;
468 					}
469 
470 					err = i915_vma_pin(vma, 0, 0, offset | flags);
471 					if (err) {
472 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
473 						       __func__, p->name, err, npages, prime, offset);
474 						goto err;
475 					}
476 
477 					if (!drm_mm_node_allocated(&vma->node) ||
478 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
479 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
480 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
481 						       offset);
482 						err = -EINVAL;
483 						goto err;
484 					}
485 
486 					i915_vma_unpin(vma);
487 
488 					if (p->step > 0) {
489 						if (offset + obj->base.size > hole_end)
490 							break;
491 						offset += obj->base.size;
492 					}
493 				}
494 
495 				offset = p->offset;
496 				list_for_each_entry_reverse(obj, &objects, st_link) {
497 					vma = i915_vma_instance(obj, vm, NULL);
498 					if (IS_ERR(vma))
499 						continue;
500 
501 					if (p->step < 0) {
502 						if (offset < hole_start + obj->base.size)
503 							break;
504 						offset -= obj->base.size;
505 					}
506 
507 					if (!drm_mm_node_allocated(&vma->node) ||
508 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
509 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
510 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
511 						       offset);
512 						err = -EINVAL;
513 						goto err;
514 					}
515 
516 					err = i915_vma_unbind(vma);
517 					if (err) {
518 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
519 						       __func__, p->name, vma->node.start, vma->node.size,
520 						       err);
521 						goto err;
522 					}
523 
524 					if (p->step > 0) {
525 						if (offset + obj->base.size > hole_end)
526 							break;
527 						offset += obj->base.size;
528 					}
529 				}
530 			}
531 
532 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
533 					__func__, npages, prime)) {
534 				err = -EINTR;
535 				goto err;
536 			}
537 		}
538 
539 		close_object_list(&objects, vm);
540 		cleanup_freed_objects(i915);
541 	}
542 
543 	return 0;
544 
545 err:
546 	close_object_list(&objects, vm);
547 	return err;
548 }
549 
550 static int walk_hole(struct drm_i915_private *i915,
551 		     struct i915_address_space *vm,
552 		     u64 hole_start, u64 hole_end,
553 		     unsigned long end_time)
554 {
555 	const u64 hole_size = hole_end - hole_start;
556 	const unsigned long max_pages =
557 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
558 	unsigned long flags;
559 	u64 size;
560 
561 	/* Try binding a single VMA in different positions within the hole */
562 
563 	flags = PIN_OFFSET_FIXED | PIN_USER;
564 	if (i915_is_ggtt(vm))
565 		flags |= PIN_GLOBAL;
566 
567 	for_each_prime_number_from(size, 1, max_pages) {
568 		struct drm_i915_gem_object *obj;
569 		struct i915_vma *vma;
570 		u64 addr;
571 		int err = 0;
572 
573 		obj = fake_dma_object(i915, size << PAGE_SHIFT);
574 		if (IS_ERR(obj))
575 			break;
576 
577 		vma = i915_vma_instance(obj, vm, NULL);
578 		if (IS_ERR(vma)) {
579 			err = PTR_ERR(vma);
580 			goto err_put;
581 		}
582 
583 		for (addr = hole_start;
584 		     addr + obj->base.size < hole_end;
585 		     addr += obj->base.size) {
586 			err = i915_vma_pin(vma, 0, 0, addr | flags);
587 			if (err) {
588 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
589 				       __func__, addr, vma->size,
590 				       hole_start, hole_end, err);
591 				goto err_close;
592 			}
593 			i915_vma_unpin(vma);
594 
595 			if (!drm_mm_node_allocated(&vma->node) ||
596 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
597 				pr_err("%s incorrect at %llx + %llx\n",
598 				       __func__, addr, vma->size);
599 				err = -EINVAL;
600 				goto err_close;
601 			}
602 
603 			err = i915_vma_unbind(vma);
604 			if (err) {
605 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
606 				       __func__, addr, vma->size, err);
607 				goto err_close;
608 			}
609 
610 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
611 
612 			if (igt_timeout(end_time,
613 					"%s timed out at %llx\n",
614 					__func__, addr)) {
615 				err = -EINTR;
616 				goto err_close;
617 			}
618 		}
619 
620 err_close:
621 		if (!i915_vma_is_ggtt(vma))
622 			i915_vma_close(vma);
623 err_put:
624 		i915_gem_object_put(obj);
625 		if (err)
626 			return err;
627 
628 		cleanup_freed_objects(i915);
629 	}
630 
631 	return 0;
632 }
633 
634 static int pot_hole(struct drm_i915_private *i915,
635 		    struct i915_address_space *vm,
636 		    u64 hole_start, u64 hole_end,
637 		    unsigned long end_time)
638 {
639 	struct drm_i915_gem_object *obj;
640 	struct i915_vma *vma;
641 	unsigned long flags;
642 	unsigned int pot;
643 	int err = 0;
644 
645 	flags = PIN_OFFSET_FIXED | PIN_USER;
646 	if (i915_is_ggtt(vm))
647 		flags |= PIN_GLOBAL;
648 
649 	obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
650 	if (IS_ERR(obj))
651 		return PTR_ERR(obj);
652 
653 	vma = i915_vma_instance(obj, vm, NULL);
654 	if (IS_ERR(vma)) {
655 		err = PTR_ERR(vma);
656 		goto err_obj;
657 	}
658 
659 	/* Insert a pair of pages across every pot boundary within the hole */
660 	for (pot = fls64(hole_end - 1) - 1;
661 	     pot > ilog2(2 * I915_GTT_PAGE_SIZE);
662 	     pot--) {
663 		u64 step = BIT_ULL(pot);
664 		u64 addr;
665 
666 		for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
667 		     addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
668 		     addr += step) {
669 			err = i915_vma_pin(vma, 0, 0, addr | flags);
670 			if (err) {
671 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
672 				       __func__,
673 				       addr,
674 				       hole_start, hole_end,
675 				       err);
676 				goto err;
677 			}
678 
679 			if (!drm_mm_node_allocated(&vma->node) ||
680 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
681 				pr_err("%s incorrect at %llx + %llx\n",
682 				       __func__, addr, vma->size);
683 				i915_vma_unpin(vma);
684 				err = i915_vma_unbind(vma);
685 				err = -EINVAL;
686 				goto err;
687 			}
688 
689 			i915_vma_unpin(vma);
690 			err = i915_vma_unbind(vma);
691 			GEM_BUG_ON(err);
692 		}
693 
694 		if (igt_timeout(end_time,
695 				"%s timed out after %d/%d\n",
696 				__func__, pot, fls64(hole_end - 1) - 1)) {
697 			err = -EINTR;
698 			goto err;
699 		}
700 	}
701 
702 err:
703 	if (!i915_vma_is_ggtt(vma))
704 		i915_vma_close(vma);
705 err_obj:
706 	i915_gem_object_put(obj);
707 	return err;
708 }
709 
710 static int drunk_hole(struct drm_i915_private *i915,
711 		      struct i915_address_space *vm,
712 		      u64 hole_start, u64 hole_end,
713 		      unsigned long end_time)
714 {
715 	I915_RND_STATE(prng);
716 	unsigned int size;
717 	unsigned long flags;
718 
719 	flags = PIN_OFFSET_FIXED | PIN_USER;
720 	if (i915_is_ggtt(vm))
721 		flags |= PIN_GLOBAL;
722 
723 	/* Keep creating larger objects until one cannot fit into the hole */
724 	for (size = 12; (hole_end - hole_start) >> size; size++) {
725 		struct drm_i915_gem_object *obj;
726 		unsigned int *order, count, n;
727 		struct i915_vma *vma;
728 		u64 hole_size;
729 		int err = -ENODEV;
730 
731 		hole_size = (hole_end - hole_start) >> size;
732 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
733 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
734 		count = hole_size >> 1;
735 		if (!count) {
736 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
737 				 __func__, hole_start, hole_end, size, hole_size);
738 			break;
739 		}
740 
741 		do {
742 			order = i915_random_order(count, &prng);
743 			if (order)
744 				break;
745 		} while (count >>= 1);
746 		if (!count)
747 			return -ENOMEM;
748 		GEM_BUG_ON(!order);
749 
750 		/* Ignore allocation failures (i.e. don't report them as
751 		 * a test failure) as we are purposefully allocating very
752 		 * large objects without checking that we have sufficient
753 		 * memory. We expect to hit -ENOMEM.
754 		 */
755 
756 		obj = fake_dma_object(i915, BIT_ULL(size));
757 		if (IS_ERR(obj)) {
758 			kfree(order);
759 			break;
760 		}
761 
762 		vma = i915_vma_instance(obj, vm, NULL);
763 		if (IS_ERR(vma)) {
764 			err = PTR_ERR(vma);
765 			goto err_obj;
766 		}
767 
768 		GEM_BUG_ON(vma->size != BIT_ULL(size));
769 
770 		for (n = 0; n < count; n++) {
771 			u64 addr = hole_start + order[n] * BIT_ULL(size);
772 
773 			err = i915_vma_pin(vma, 0, 0, addr | flags);
774 			if (err) {
775 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
776 				       __func__,
777 				       addr, BIT_ULL(size),
778 				       hole_start, hole_end,
779 				       err);
780 				goto err;
781 			}
782 
783 			if (!drm_mm_node_allocated(&vma->node) ||
784 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
785 				pr_err("%s incorrect at %llx + %llx\n",
786 				       __func__, addr, BIT_ULL(size));
787 				i915_vma_unpin(vma);
788 				err = i915_vma_unbind(vma);
789 				err = -EINVAL;
790 				goto err;
791 			}
792 
793 			i915_vma_unpin(vma);
794 			err = i915_vma_unbind(vma);
795 			GEM_BUG_ON(err);
796 
797 			if (igt_timeout(end_time,
798 					"%s timed out after %d/%d\n",
799 					__func__, n, count)) {
800 				err = -EINTR;
801 				goto err;
802 			}
803 		}
804 
805 err:
806 		if (!i915_vma_is_ggtt(vma))
807 			i915_vma_close(vma);
808 err_obj:
809 		i915_gem_object_put(obj);
810 		kfree(order);
811 		if (err)
812 			return err;
813 
814 		cleanup_freed_objects(i915);
815 	}
816 
817 	return 0;
818 }
819 
820 static int __shrink_hole(struct drm_i915_private *i915,
821 			 struct i915_address_space *vm,
822 			 u64 hole_start, u64 hole_end,
823 			 unsigned long end_time)
824 {
825 	struct drm_i915_gem_object *obj;
826 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
827 	unsigned int order = 12;
828 	LIST_HEAD(objects);
829 	int err = 0;
830 	u64 addr;
831 
832 	/* Keep creating larger objects until one cannot fit into the hole */
833 	for (addr = hole_start; addr < hole_end; ) {
834 		struct i915_vma *vma;
835 		u64 size = BIT_ULL(order++);
836 
837 		size = min(size, hole_end - addr);
838 		obj = fake_dma_object(i915, size);
839 		if (IS_ERR(obj)) {
840 			err = PTR_ERR(obj);
841 			break;
842 		}
843 
844 		list_add(&obj->st_link, &objects);
845 
846 		vma = i915_vma_instance(obj, vm, NULL);
847 		if (IS_ERR(vma)) {
848 			err = PTR_ERR(vma);
849 			break;
850 		}
851 
852 		GEM_BUG_ON(vma->size != size);
853 
854 		err = i915_vma_pin(vma, 0, 0, addr | flags);
855 		if (err) {
856 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
857 			       __func__, addr, size, hole_start, hole_end, err);
858 			break;
859 		}
860 
861 		if (!drm_mm_node_allocated(&vma->node) ||
862 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
863 			pr_err("%s incorrect at %llx + %llx\n",
864 			       __func__, addr, size);
865 			i915_vma_unpin(vma);
866 			err = i915_vma_unbind(vma);
867 			err = -EINVAL;
868 			break;
869 		}
870 
871 		i915_vma_unpin(vma);
872 		addr += size;
873 
874 		/*
875 		 * Since we are injecting allocation faults at random intervals,
876 		 * wait for this allocation to complete before we change the
877 		 * faultinjection.
878 		 */
879 		err = i915_vma_sync(vma);
880 		if (err)
881 			break;
882 
883 		if (igt_timeout(end_time,
884 				"%s timed out at ofset %llx [%llx - %llx]\n",
885 				__func__, addr, hole_start, hole_end)) {
886 			err = -EINTR;
887 			break;
888 		}
889 	}
890 
891 	close_object_list(&objects, vm);
892 	cleanup_freed_objects(i915);
893 	return err;
894 }
895 
896 static int shrink_hole(struct drm_i915_private *i915,
897 		       struct i915_address_space *vm,
898 		       u64 hole_start, u64 hole_end,
899 		       unsigned long end_time)
900 {
901 	unsigned long prime;
902 	int err;
903 
904 	vm->fault_attr.probability = 999;
905 	atomic_set(&vm->fault_attr.times, -1);
906 
907 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
908 		vm->fault_attr.interval = prime;
909 		err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
910 		if (err)
911 			break;
912 	}
913 
914 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
915 
916 	return err;
917 }
918 
919 static int shrink_boom(struct drm_i915_private *i915,
920 		       struct i915_address_space *vm,
921 		       u64 hole_start, u64 hole_end,
922 		       unsigned long end_time)
923 {
924 	unsigned int sizes[] = { SZ_2M, SZ_1G };
925 	struct drm_i915_gem_object *purge;
926 	struct drm_i915_gem_object *explode;
927 	int err;
928 	int i;
929 
930 	/*
931 	 * Catch the case which shrink_hole seems to miss. The setup here
932 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
933 	 * ensuring that all vma assiocated with the respective pd/pdp are
934 	 * unpinned at the time.
935 	 */
936 
937 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
938 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
939 		unsigned int size = sizes[i];
940 		struct i915_vma *vma;
941 
942 		purge = fake_dma_object(i915, size);
943 		if (IS_ERR(purge))
944 			return PTR_ERR(purge);
945 
946 		vma = i915_vma_instance(purge, vm, NULL);
947 		if (IS_ERR(vma)) {
948 			err = PTR_ERR(vma);
949 			goto err_purge;
950 		}
951 
952 		err = i915_vma_pin(vma, 0, 0, flags);
953 		if (err)
954 			goto err_purge;
955 
956 		/* Should now be ripe for purging */
957 		i915_vma_unpin(vma);
958 
959 		explode = fake_dma_object(i915, size);
960 		if (IS_ERR(explode)) {
961 			err = PTR_ERR(explode);
962 			goto err_purge;
963 		}
964 
965 		vm->fault_attr.probability = 100;
966 		vm->fault_attr.interval = 1;
967 		atomic_set(&vm->fault_attr.times, -1);
968 
969 		vma = i915_vma_instance(explode, vm, NULL);
970 		if (IS_ERR(vma)) {
971 			err = PTR_ERR(vma);
972 			goto err_explode;
973 		}
974 
975 		err = i915_vma_pin(vma, 0, 0, flags | size);
976 		if (err)
977 			goto err_explode;
978 
979 		i915_vma_unpin(vma);
980 
981 		i915_gem_object_put(purge);
982 		i915_gem_object_put(explode);
983 
984 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
985 		cleanup_freed_objects(i915);
986 	}
987 
988 	return 0;
989 
990 err_explode:
991 	i915_gem_object_put(explode);
992 err_purge:
993 	i915_gem_object_put(purge);
994 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
995 	return err;
996 }
997 
998 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
999 			  int (*func)(struct drm_i915_private *i915,
1000 				      struct i915_address_space *vm,
1001 				      u64 hole_start, u64 hole_end,
1002 				      unsigned long end_time))
1003 {
1004 	struct drm_file *file;
1005 	struct i915_ppgtt *ppgtt;
1006 	IGT_TIMEOUT(end_time);
1007 	int err;
1008 
1009 	if (!HAS_FULL_PPGTT(dev_priv))
1010 		return 0;
1011 
1012 	file = mock_file(dev_priv);
1013 	if (IS_ERR(file))
1014 		return PTR_ERR(file);
1015 
1016 	ppgtt = i915_ppgtt_create(dev_priv);
1017 	if (IS_ERR(ppgtt)) {
1018 		err = PTR_ERR(ppgtt);
1019 		goto out_free;
1020 	}
1021 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1022 	GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1023 
1024 	err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
1025 
1026 	i915_vm_put(&ppgtt->vm);
1027 
1028 out_free:
1029 	mock_file_free(dev_priv, file);
1030 	return err;
1031 }
1032 
1033 static int igt_ppgtt_fill(void *arg)
1034 {
1035 	return exercise_ppgtt(arg, fill_hole);
1036 }
1037 
1038 static int igt_ppgtt_walk(void *arg)
1039 {
1040 	return exercise_ppgtt(arg, walk_hole);
1041 }
1042 
1043 static int igt_ppgtt_pot(void *arg)
1044 {
1045 	return exercise_ppgtt(arg, pot_hole);
1046 }
1047 
1048 static int igt_ppgtt_drunk(void *arg)
1049 {
1050 	return exercise_ppgtt(arg, drunk_hole);
1051 }
1052 
1053 static int igt_ppgtt_lowlevel(void *arg)
1054 {
1055 	return exercise_ppgtt(arg, lowlevel_hole);
1056 }
1057 
1058 static int igt_ppgtt_shrink(void *arg)
1059 {
1060 	return exercise_ppgtt(arg, shrink_hole);
1061 }
1062 
1063 static int igt_ppgtt_shrink_boom(void *arg)
1064 {
1065 	return exercise_ppgtt(arg, shrink_boom);
1066 }
1067 
1068 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1069 {
1070 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1071 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1072 
1073 	if (a->start < b->start)
1074 		return -1;
1075 	else
1076 		return 1;
1077 }
1078 
1079 static int exercise_ggtt(struct drm_i915_private *i915,
1080 			 int (*func)(struct drm_i915_private *i915,
1081 				     struct i915_address_space *vm,
1082 				     u64 hole_start, u64 hole_end,
1083 				     unsigned long end_time))
1084 {
1085 	struct i915_ggtt *ggtt = &i915->ggtt;
1086 	u64 hole_start, hole_end, last = 0;
1087 	struct drm_mm_node *node;
1088 	IGT_TIMEOUT(end_time);
1089 	int err = 0;
1090 
1091 restart:
1092 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1093 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1094 		if (hole_start < last)
1095 			continue;
1096 
1097 		if (ggtt->vm.mm.color_adjust)
1098 			ggtt->vm.mm.color_adjust(node, 0,
1099 						 &hole_start, &hole_end);
1100 		if (hole_start >= hole_end)
1101 			continue;
1102 
1103 		err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
1104 		if (err)
1105 			break;
1106 
1107 		/* As we have manipulated the drm_mm, the list may be corrupt */
1108 		last = hole_end;
1109 		goto restart;
1110 	}
1111 
1112 	return err;
1113 }
1114 
1115 static int igt_ggtt_fill(void *arg)
1116 {
1117 	return exercise_ggtt(arg, fill_hole);
1118 }
1119 
1120 static int igt_ggtt_walk(void *arg)
1121 {
1122 	return exercise_ggtt(arg, walk_hole);
1123 }
1124 
1125 static int igt_ggtt_pot(void *arg)
1126 {
1127 	return exercise_ggtt(arg, pot_hole);
1128 }
1129 
1130 static int igt_ggtt_drunk(void *arg)
1131 {
1132 	return exercise_ggtt(arg, drunk_hole);
1133 }
1134 
1135 static int igt_ggtt_lowlevel(void *arg)
1136 {
1137 	return exercise_ggtt(arg, lowlevel_hole);
1138 }
1139 
1140 static int igt_ggtt_page(void *arg)
1141 {
1142 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1143 	I915_RND_STATE(prng);
1144 	struct drm_i915_private *i915 = arg;
1145 	struct i915_ggtt *ggtt = &i915->ggtt;
1146 	struct drm_i915_gem_object *obj;
1147 	intel_wakeref_t wakeref;
1148 	struct drm_mm_node tmp;
1149 	unsigned int *order, n;
1150 	int err;
1151 
1152 	if (!i915_ggtt_has_aperture(ggtt))
1153 		return 0;
1154 
1155 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1156 	if (IS_ERR(obj))
1157 		return PTR_ERR(obj);
1158 
1159 	err = i915_gem_object_pin_pages(obj);
1160 	if (err)
1161 		goto out_free;
1162 
1163 	memset(&tmp, 0, sizeof(tmp));
1164 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1165 					  count * PAGE_SIZE, 0,
1166 					  I915_COLOR_UNEVICTABLE,
1167 					  0, ggtt->mappable_end,
1168 					  DRM_MM_INSERT_LOW);
1169 	if (err)
1170 		goto out_unpin;
1171 
1172 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1173 
1174 	for (n = 0; n < count; n++) {
1175 		u64 offset = tmp.start + n * PAGE_SIZE;
1176 
1177 		ggtt->vm.insert_page(&ggtt->vm,
1178 				     i915_gem_object_get_dma_address(obj, 0),
1179 				     offset, I915_CACHE_NONE, 0);
1180 	}
1181 
1182 	order = i915_random_order(count, &prng);
1183 	if (!order) {
1184 		err = -ENOMEM;
1185 		goto out_remove;
1186 	}
1187 
1188 	for (n = 0; n < count; n++) {
1189 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1190 		u32 __iomem *vaddr;
1191 
1192 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1193 		iowrite32(n, vaddr + n);
1194 		io_mapping_unmap_atomic(vaddr);
1195 	}
1196 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1197 
1198 	i915_random_reorder(order, count, &prng);
1199 	for (n = 0; n < count; n++) {
1200 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1201 		u32 __iomem *vaddr;
1202 		u32 val;
1203 
1204 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1205 		val = ioread32(vaddr + n);
1206 		io_mapping_unmap_atomic(vaddr);
1207 
1208 		if (val != n) {
1209 			pr_err("insert page failed: found %d, expected %d\n",
1210 			       val, n);
1211 			err = -EINVAL;
1212 			break;
1213 		}
1214 	}
1215 
1216 	kfree(order);
1217 out_remove:
1218 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1219 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1220 	drm_mm_remove_node(&tmp);
1221 out_unpin:
1222 	i915_gem_object_unpin_pages(obj);
1223 out_free:
1224 	i915_gem_object_put(obj);
1225 	return err;
1226 }
1227 
1228 static void track_vma_bind(struct i915_vma *vma)
1229 {
1230 	struct drm_i915_gem_object *obj = vma->obj;
1231 
1232 	atomic_inc(&obj->bind_count); /* track for eviction later */
1233 	__i915_gem_object_pin_pages(obj);
1234 
1235 	GEM_BUG_ON(vma->pages);
1236 	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1237 	__i915_gem_object_pin_pages(obj);
1238 	vma->pages = obj->mm.pages;
1239 
1240 	mutex_lock(&vma->vm->mutex);
1241 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1242 	mutex_unlock(&vma->vm->mutex);
1243 }
1244 
1245 static int exercise_mock(struct drm_i915_private *i915,
1246 			 int (*func)(struct drm_i915_private *i915,
1247 				     struct i915_address_space *vm,
1248 				     u64 hole_start, u64 hole_end,
1249 				     unsigned long end_time))
1250 {
1251 	const u64 limit = totalram_pages() << PAGE_SHIFT;
1252 	struct i915_address_space *vm;
1253 	struct i915_gem_context *ctx;
1254 	IGT_TIMEOUT(end_time);
1255 	int err;
1256 
1257 	ctx = mock_context(i915, "mock");
1258 	if (!ctx)
1259 		return -ENOMEM;
1260 
1261 	vm = i915_gem_context_get_vm_rcu(ctx);
1262 	err = func(i915, vm, 0, min(vm->total, limit), end_time);
1263 	i915_vm_put(vm);
1264 
1265 	mock_context_close(ctx);
1266 	return err;
1267 }
1268 
1269 static int igt_mock_fill(void *arg)
1270 {
1271 	struct i915_ggtt *ggtt = arg;
1272 
1273 	return exercise_mock(ggtt->vm.i915, fill_hole);
1274 }
1275 
1276 static int igt_mock_walk(void *arg)
1277 {
1278 	struct i915_ggtt *ggtt = arg;
1279 
1280 	return exercise_mock(ggtt->vm.i915, walk_hole);
1281 }
1282 
1283 static int igt_mock_pot(void *arg)
1284 {
1285 	struct i915_ggtt *ggtt = arg;
1286 
1287 	return exercise_mock(ggtt->vm.i915, pot_hole);
1288 }
1289 
1290 static int igt_mock_drunk(void *arg)
1291 {
1292 	struct i915_ggtt *ggtt = arg;
1293 
1294 	return exercise_mock(ggtt->vm.i915, drunk_hole);
1295 }
1296 
1297 static int igt_gtt_reserve(void *arg)
1298 {
1299 	struct i915_ggtt *ggtt = arg;
1300 	struct drm_i915_gem_object *obj, *on;
1301 	I915_RND_STATE(prng);
1302 	LIST_HEAD(objects);
1303 	u64 total;
1304 	int err = -ENODEV;
1305 
1306 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1307 	 * for the node, and evicts if it has to. So our test checks that
1308 	 * it can give us the requsted space and prevent overlaps.
1309 	 */
1310 
1311 	/* Start by filling the GGTT */
1312 	for (total = 0;
1313 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1314 	     total += 2 * I915_GTT_PAGE_SIZE) {
1315 		struct i915_vma *vma;
1316 
1317 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1318 						      2 * PAGE_SIZE);
1319 		if (IS_ERR(obj)) {
1320 			err = PTR_ERR(obj);
1321 			goto out;
1322 		}
1323 
1324 		err = i915_gem_object_pin_pages(obj);
1325 		if (err) {
1326 			i915_gem_object_put(obj);
1327 			goto out;
1328 		}
1329 
1330 		list_add(&obj->st_link, &objects);
1331 
1332 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1333 		if (IS_ERR(vma)) {
1334 			err = PTR_ERR(vma);
1335 			goto out;
1336 		}
1337 
1338 		mutex_lock(&ggtt->vm.mutex);
1339 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1340 					   obj->base.size,
1341 					   total,
1342 					   obj->cache_level,
1343 					   0);
1344 		mutex_unlock(&ggtt->vm.mutex);
1345 		if (err) {
1346 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1347 			       total, ggtt->vm.total, err);
1348 			goto out;
1349 		}
1350 		track_vma_bind(vma);
1351 
1352 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1353 		if (vma->node.start != total ||
1354 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1355 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1356 			       vma->node.start, vma->node.size,
1357 			       total, 2*I915_GTT_PAGE_SIZE);
1358 			err = -EINVAL;
1359 			goto out;
1360 		}
1361 	}
1362 
1363 	/* Now we start forcing evictions */
1364 	for (total = I915_GTT_PAGE_SIZE;
1365 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1366 	     total += 2 * I915_GTT_PAGE_SIZE) {
1367 		struct i915_vma *vma;
1368 
1369 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1370 						      2 * PAGE_SIZE);
1371 		if (IS_ERR(obj)) {
1372 			err = PTR_ERR(obj);
1373 			goto out;
1374 		}
1375 
1376 		err = i915_gem_object_pin_pages(obj);
1377 		if (err) {
1378 			i915_gem_object_put(obj);
1379 			goto out;
1380 		}
1381 
1382 		list_add(&obj->st_link, &objects);
1383 
1384 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1385 		if (IS_ERR(vma)) {
1386 			err = PTR_ERR(vma);
1387 			goto out;
1388 		}
1389 
1390 		mutex_lock(&ggtt->vm.mutex);
1391 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1392 					   obj->base.size,
1393 					   total,
1394 					   obj->cache_level,
1395 					   0);
1396 		mutex_unlock(&ggtt->vm.mutex);
1397 		if (err) {
1398 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1399 			       total, ggtt->vm.total, err);
1400 			goto out;
1401 		}
1402 		track_vma_bind(vma);
1403 
1404 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1405 		if (vma->node.start != total ||
1406 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1407 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1408 			       vma->node.start, vma->node.size,
1409 			       total, 2*I915_GTT_PAGE_SIZE);
1410 			err = -EINVAL;
1411 			goto out;
1412 		}
1413 	}
1414 
1415 	/* And then try at random */
1416 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1417 		struct i915_vma *vma;
1418 		u64 offset;
1419 
1420 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1421 		if (IS_ERR(vma)) {
1422 			err = PTR_ERR(vma);
1423 			goto out;
1424 		}
1425 
1426 		err = i915_vma_unbind(vma);
1427 		if (err) {
1428 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1429 			goto out;
1430 		}
1431 
1432 		offset = igt_random_offset(&prng,
1433 					   0, ggtt->vm.total,
1434 					   2 * I915_GTT_PAGE_SIZE,
1435 					   I915_GTT_MIN_ALIGNMENT);
1436 
1437 		mutex_lock(&ggtt->vm.mutex);
1438 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1439 					   obj->base.size,
1440 					   offset,
1441 					   obj->cache_level,
1442 					   0);
1443 		mutex_unlock(&ggtt->vm.mutex);
1444 		if (err) {
1445 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1446 			       total, ggtt->vm.total, err);
1447 			goto out;
1448 		}
1449 		track_vma_bind(vma);
1450 
1451 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1452 		if (vma->node.start != offset ||
1453 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1454 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1455 			       vma->node.start, vma->node.size,
1456 			       offset, 2*I915_GTT_PAGE_SIZE);
1457 			err = -EINVAL;
1458 			goto out;
1459 		}
1460 	}
1461 
1462 out:
1463 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1464 		i915_gem_object_unpin_pages(obj);
1465 		i915_gem_object_put(obj);
1466 	}
1467 	return err;
1468 }
1469 
1470 static int igt_gtt_insert(void *arg)
1471 {
1472 	struct i915_ggtt *ggtt = arg;
1473 	struct drm_i915_gem_object *obj, *on;
1474 	struct drm_mm_node tmp = {};
1475 	const struct invalid_insert {
1476 		u64 size;
1477 		u64 alignment;
1478 		u64 start, end;
1479 	} invalid_insert[] = {
1480 		{
1481 			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1482 			0, ggtt->vm.total,
1483 		},
1484 		{
1485 			2*I915_GTT_PAGE_SIZE, 0,
1486 			0, I915_GTT_PAGE_SIZE,
1487 		},
1488 		{
1489 			-(u64)I915_GTT_PAGE_SIZE, 0,
1490 			0, 4*I915_GTT_PAGE_SIZE,
1491 		},
1492 		{
1493 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1494 			0, 4*I915_GTT_PAGE_SIZE,
1495 		},
1496 		{
1497 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1498 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1499 		},
1500 		{}
1501 	}, *ii;
1502 	LIST_HEAD(objects);
1503 	u64 total;
1504 	int err = -ENODEV;
1505 
1506 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1507 	 * to the node, evicting if required.
1508 	 */
1509 
1510 	/* Check a couple of obviously invalid requests */
1511 	for (ii = invalid_insert; ii->size; ii++) {
1512 		mutex_lock(&ggtt->vm.mutex);
1513 		err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1514 					  ii->size, ii->alignment,
1515 					  I915_COLOR_UNEVICTABLE,
1516 					  ii->start, ii->end,
1517 					  0);
1518 		mutex_unlock(&ggtt->vm.mutex);
1519 		if (err != -ENOSPC) {
1520 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1521 			       ii->size, ii->alignment, ii->start, ii->end,
1522 			       err);
1523 			return -EINVAL;
1524 		}
1525 	}
1526 
1527 	/* Start by filling the GGTT */
1528 	for (total = 0;
1529 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1530 	     total += I915_GTT_PAGE_SIZE) {
1531 		struct i915_vma *vma;
1532 
1533 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1534 						      I915_GTT_PAGE_SIZE);
1535 		if (IS_ERR(obj)) {
1536 			err = PTR_ERR(obj);
1537 			goto out;
1538 		}
1539 
1540 		err = i915_gem_object_pin_pages(obj);
1541 		if (err) {
1542 			i915_gem_object_put(obj);
1543 			goto out;
1544 		}
1545 
1546 		list_add(&obj->st_link, &objects);
1547 
1548 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1549 		if (IS_ERR(vma)) {
1550 			err = PTR_ERR(vma);
1551 			goto out;
1552 		}
1553 
1554 		mutex_lock(&ggtt->vm.mutex);
1555 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1556 					  obj->base.size, 0, obj->cache_level,
1557 					  0, ggtt->vm.total,
1558 					  0);
1559 		mutex_unlock(&ggtt->vm.mutex);
1560 		if (err == -ENOSPC) {
1561 			/* maxed out the GGTT space */
1562 			i915_gem_object_put(obj);
1563 			break;
1564 		}
1565 		if (err) {
1566 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1567 			       total, ggtt->vm.total, err);
1568 			goto out;
1569 		}
1570 		track_vma_bind(vma);
1571 		__i915_vma_pin(vma);
1572 
1573 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1574 	}
1575 
1576 	list_for_each_entry(obj, &objects, st_link) {
1577 		struct i915_vma *vma;
1578 
1579 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1580 		if (IS_ERR(vma)) {
1581 			err = PTR_ERR(vma);
1582 			goto out;
1583 		}
1584 
1585 		if (!drm_mm_node_allocated(&vma->node)) {
1586 			pr_err("VMA was unexpectedly evicted!\n");
1587 			err = -EINVAL;
1588 			goto out;
1589 		}
1590 
1591 		__i915_vma_unpin(vma);
1592 	}
1593 
1594 	/* If we then reinsert, we should find the same hole */
1595 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1596 		struct i915_vma *vma;
1597 		u64 offset;
1598 
1599 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1600 		if (IS_ERR(vma)) {
1601 			err = PTR_ERR(vma);
1602 			goto out;
1603 		}
1604 
1605 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1606 		offset = vma->node.start;
1607 
1608 		err = i915_vma_unbind(vma);
1609 		if (err) {
1610 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1611 			goto out;
1612 		}
1613 
1614 		mutex_lock(&ggtt->vm.mutex);
1615 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1616 					  obj->base.size, 0, obj->cache_level,
1617 					  0, ggtt->vm.total,
1618 					  0);
1619 		mutex_unlock(&ggtt->vm.mutex);
1620 		if (err) {
1621 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1622 			       total, ggtt->vm.total, err);
1623 			goto out;
1624 		}
1625 		track_vma_bind(vma);
1626 
1627 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1628 		if (vma->node.start != offset) {
1629 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1630 			       offset, vma->node.start);
1631 			err = -EINVAL;
1632 			goto out;
1633 		}
1634 	}
1635 
1636 	/* And then force evictions */
1637 	for (total = 0;
1638 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1639 	     total += 2 * I915_GTT_PAGE_SIZE) {
1640 		struct i915_vma *vma;
1641 
1642 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1643 						      2 * I915_GTT_PAGE_SIZE);
1644 		if (IS_ERR(obj)) {
1645 			err = PTR_ERR(obj);
1646 			goto out;
1647 		}
1648 
1649 		err = i915_gem_object_pin_pages(obj);
1650 		if (err) {
1651 			i915_gem_object_put(obj);
1652 			goto out;
1653 		}
1654 
1655 		list_add(&obj->st_link, &objects);
1656 
1657 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1658 		if (IS_ERR(vma)) {
1659 			err = PTR_ERR(vma);
1660 			goto out;
1661 		}
1662 
1663 		mutex_lock(&ggtt->vm.mutex);
1664 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1665 					  obj->base.size, 0, obj->cache_level,
1666 					  0, ggtt->vm.total,
1667 					  0);
1668 		mutex_unlock(&ggtt->vm.mutex);
1669 		if (err) {
1670 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1671 			       total, ggtt->vm.total, err);
1672 			goto out;
1673 		}
1674 		track_vma_bind(vma);
1675 
1676 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1677 	}
1678 
1679 out:
1680 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1681 		i915_gem_object_unpin_pages(obj);
1682 		i915_gem_object_put(obj);
1683 	}
1684 	return err;
1685 }
1686 
1687 int i915_gem_gtt_mock_selftests(void)
1688 {
1689 	static const struct i915_subtest tests[] = {
1690 		SUBTEST(igt_mock_drunk),
1691 		SUBTEST(igt_mock_walk),
1692 		SUBTEST(igt_mock_pot),
1693 		SUBTEST(igt_mock_fill),
1694 		SUBTEST(igt_gtt_reserve),
1695 		SUBTEST(igt_gtt_insert),
1696 	};
1697 	struct drm_i915_private *i915;
1698 	struct i915_ggtt *ggtt;
1699 	int err;
1700 
1701 	i915 = mock_gem_device();
1702 	if (!i915)
1703 		return -ENOMEM;
1704 
1705 	ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1706 	if (!ggtt) {
1707 		err = -ENOMEM;
1708 		goto out_put;
1709 	}
1710 	mock_init_ggtt(i915, ggtt);
1711 
1712 	err = i915_subtests(tests, ggtt);
1713 
1714 	mock_device_flush(i915);
1715 	i915_gem_drain_freed_objects(i915);
1716 	mock_fini_ggtt(ggtt);
1717 	kfree(ggtt);
1718 out_put:
1719 	drm_dev_put(&i915->drm);
1720 	return err;
1721 }
1722 
1723 static int context_sync(struct intel_context *ce)
1724 {
1725 	struct i915_request *rq;
1726 	long timeout;
1727 
1728 	rq = intel_context_create_request(ce);
1729 	if (IS_ERR(rq))
1730 		return PTR_ERR(rq);
1731 
1732 	i915_request_get(rq);
1733 	i915_request_add(rq);
1734 
1735 	timeout = i915_request_wait(rq, 0, HZ / 5);
1736 	i915_request_put(rq);
1737 
1738 	return timeout < 0 ? -EIO : 0;
1739 }
1740 
1741 static struct i915_request *
1742 submit_batch(struct intel_context *ce, u64 addr)
1743 {
1744 	struct i915_request *rq;
1745 	int err;
1746 
1747 	rq = intel_context_create_request(ce);
1748 	if (IS_ERR(rq))
1749 		return rq;
1750 
1751 	err = 0;
1752 	if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1753 		err = rq->engine->emit_init_breadcrumb(rq);
1754 	if (err == 0)
1755 		err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1756 
1757 	if (err == 0)
1758 		i915_request_get(rq);
1759 	i915_request_add(rq);
1760 
1761 	return err ? ERR_PTR(err) : rq;
1762 }
1763 
1764 static u32 *spinner(u32 *batch, int i)
1765 {
1766 	return batch + i * 64 / sizeof(*batch) + 4;
1767 }
1768 
1769 static void end_spin(u32 *batch, int i)
1770 {
1771 	*spinner(batch, i) = MI_BATCH_BUFFER_END;
1772 	wmb();
1773 }
1774 
1775 static int igt_cs_tlb(void *arg)
1776 {
1777 	const unsigned int count = PAGE_SIZE / 64;
1778 	const unsigned int chunk_size = count * PAGE_SIZE;
1779 	struct drm_i915_private *i915 = arg;
1780 	struct drm_i915_gem_object *bbe, *act, *out;
1781 	struct i915_gem_engines_iter it;
1782 	struct i915_address_space *vm;
1783 	struct i915_gem_context *ctx;
1784 	struct intel_context *ce;
1785 	struct drm_file *file;
1786 	struct i915_vma *vma;
1787 	I915_RND_STATE(prng);
1788 	unsigned int i;
1789 	u32 *result;
1790 	u32 *batch;
1791 	int err = 0;
1792 
1793 	/*
1794 	 * Our mission here is to fool the hardware to execute something
1795 	 * from scratch as it has not seen the batch move (due to missing
1796 	 * the TLB invalidate).
1797 	 */
1798 
1799 	file = mock_file(i915);
1800 	if (IS_ERR(file))
1801 		return PTR_ERR(file);
1802 
1803 	ctx = live_context(i915, file);
1804 	if (IS_ERR(ctx)) {
1805 		err = PTR_ERR(ctx);
1806 		goto out_unlock;
1807 	}
1808 
1809 	vm = i915_gem_context_get_vm_rcu(ctx);
1810 	if (i915_is_ggtt(vm))
1811 		goto out_vm;
1812 
1813 	/* Create two pages; dummy we prefill the TLB, and intended */
1814 	bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1815 	if (IS_ERR(bbe)) {
1816 		err = PTR_ERR(bbe);
1817 		goto out_vm;
1818 	}
1819 
1820 	batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
1821 	if (IS_ERR(batch)) {
1822 		err = PTR_ERR(batch);
1823 		goto out_put_bbe;
1824 	}
1825 	memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1826 	i915_gem_object_flush_map(bbe);
1827 	i915_gem_object_unpin_map(bbe);
1828 
1829 	act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1830 	if (IS_ERR(act)) {
1831 		err = PTR_ERR(act);
1832 		goto out_put_bbe;
1833 	}
1834 
1835 	/* Track the execution of each request by writing into different slot */
1836 	batch = i915_gem_object_pin_map(act, I915_MAP_WC);
1837 	if (IS_ERR(batch)) {
1838 		err = PTR_ERR(batch);
1839 		goto out_put_act;
1840 	}
1841 	for (i = 0; i < count; i++) {
1842 		u32 *cs = batch + i * 64 / sizeof(*cs);
1843 		u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1844 
1845 		GEM_BUG_ON(INTEL_GEN(i915) < 6);
1846 		cs[0] = MI_STORE_DWORD_IMM_GEN4;
1847 		if (INTEL_GEN(i915) >= 8) {
1848 			cs[1] = lower_32_bits(addr);
1849 			cs[2] = upper_32_bits(addr);
1850 			cs[3] = i;
1851 			cs[4] = MI_NOOP;
1852 			cs[5] = MI_BATCH_BUFFER_START_GEN8;
1853 		} else {
1854 			cs[1] = 0;
1855 			cs[2] = lower_32_bits(addr);
1856 			cs[3] = i;
1857 			cs[4] = MI_NOOP;
1858 			cs[5] = MI_BATCH_BUFFER_START;
1859 		}
1860 	}
1861 
1862 	out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1863 	if (IS_ERR(out)) {
1864 		err = PTR_ERR(out);
1865 		goto out_put_batch;
1866 	}
1867 	i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1868 
1869 	vma = i915_vma_instance(out, vm, NULL);
1870 	if (IS_ERR(vma)) {
1871 		err = PTR_ERR(vma);
1872 		goto out_put_batch;
1873 	}
1874 
1875 	err = i915_vma_pin(vma, 0, 0,
1876 			   PIN_USER |
1877 			   PIN_OFFSET_FIXED |
1878 			   (vm->total - PAGE_SIZE));
1879 	if (err)
1880 		goto out_put_out;
1881 	GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1882 
1883 	result = i915_gem_object_pin_map(out, I915_MAP_WB);
1884 	if (IS_ERR(result)) {
1885 		err = PTR_ERR(result);
1886 		goto out_put_out;
1887 	}
1888 
1889 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1890 		IGT_TIMEOUT(end_time);
1891 		unsigned long pass = 0;
1892 
1893 		if (!intel_engine_can_store_dword(ce->engine))
1894 			continue;
1895 
1896 		while (!__igt_timeout(end_time, NULL)) {
1897 			struct i915_request *rq;
1898 			u64 offset;
1899 
1900 			offset = igt_random_offset(&prng,
1901 						   0, vm->total - PAGE_SIZE,
1902 						   chunk_size, PAGE_SIZE);
1903 
1904 			err = vm->allocate_va_range(vm, offset, chunk_size);
1905 			if (err)
1906 				goto end;
1907 
1908 			memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1909 
1910 			vma = i915_vma_instance(bbe, vm, NULL);
1911 			if (IS_ERR(vma)) {
1912 				err = PTR_ERR(vma);
1913 				goto end;
1914 			}
1915 
1916 			err = vma->ops->set_pages(vma);
1917 			if (err)
1918 				goto end;
1919 
1920 			/* Prime the TLB with the dummy pages */
1921 			for (i = 0; i < count; i++) {
1922 				vma->node.start = offset + i * PAGE_SIZE;
1923 				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1924 
1925 				rq = submit_batch(ce, vma->node.start);
1926 				if (IS_ERR(rq)) {
1927 					err = PTR_ERR(rq);
1928 					goto end;
1929 				}
1930 				i915_request_put(rq);
1931 			}
1932 
1933 			vma->ops->clear_pages(vma);
1934 
1935 			err = context_sync(ce);
1936 			if (err) {
1937 				pr_err("%s: dummy setup timed out\n",
1938 				       ce->engine->name);
1939 				goto end;
1940 			}
1941 
1942 			vma = i915_vma_instance(act, vm, NULL);
1943 			if (IS_ERR(vma)) {
1944 				err = PTR_ERR(vma);
1945 				goto end;
1946 			}
1947 
1948 			err = vma->ops->set_pages(vma);
1949 			if (err)
1950 				goto end;
1951 
1952 			/* Replace the TLB with target batches */
1953 			for (i = 0; i < count; i++) {
1954 				struct i915_request *rq;
1955 				u32 *cs = batch + i * 64 / sizeof(*cs);
1956 				u64 addr;
1957 
1958 				vma->node.start = offset + i * PAGE_SIZE;
1959 				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1960 
1961 				addr = vma->node.start + i * 64;
1962 				cs[4] = MI_NOOP;
1963 				cs[6] = lower_32_bits(addr);
1964 				cs[7] = upper_32_bits(addr);
1965 				wmb();
1966 
1967 				rq = submit_batch(ce, addr);
1968 				if (IS_ERR(rq)) {
1969 					err = PTR_ERR(rq);
1970 					goto end;
1971 				}
1972 
1973 				/* Wait until the context chain has started */
1974 				if (i == 0) {
1975 					while (READ_ONCE(result[i]) &&
1976 					       !i915_request_completed(rq))
1977 						cond_resched();
1978 				} else {
1979 					end_spin(batch, i - 1);
1980 				}
1981 
1982 				i915_request_put(rq);
1983 			}
1984 			end_spin(batch, count - 1);
1985 
1986 			vma->ops->clear_pages(vma);
1987 
1988 			err = context_sync(ce);
1989 			if (err) {
1990 				pr_err("%s: writes timed out\n",
1991 				       ce->engine->name);
1992 				goto end;
1993 			}
1994 
1995 			for (i = 0; i < count; i++) {
1996 				if (result[i] != i) {
1997 					pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
1998 					       ce->engine->name, pass,
1999 					       offset, i, result[i], i);
2000 					err = -EINVAL;
2001 					goto end;
2002 				}
2003 			}
2004 
2005 			vm->clear_range(vm, offset, chunk_size);
2006 			pass++;
2007 		}
2008 	}
2009 end:
2010 	if (igt_flush_test(i915))
2011 		err = -EIO;
2012 	i915_gem_context_unlock_engines(ctx);
2013 	i915_gem_object_unpin_map(out);
2014 out_put_out:
2015 	i915_gem_object_put(out);
2016 out_put_batch:
2017 	i915_gem_object_unpin_map(act);
2018 out_put_act:
2019 	i915_gem_object_put(act);
2020 out_put_bbe:
2021 	i915_gem_object_put(bbe);
2022 out_vm:
2023 	i915_vm_put(vm);
2024 out_unlock:
2025 	mock_file_free(i915, file);
2026 	return err;
2027 }
2028 
2029 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2030 {
2031 	static const struct i915_subtest tests[] = {
2032 		SUBTEST(igt_ppgtt_alloc),
2033 		SUBTEST(igt_ppgtt_lowlevel),
2034 		SUBTEST(igt_ppgtt_drunk),
2035 		SUBTEST(igt_ppgtt_walk),
2036 		SUBTEST(igt_ppgtt_pot),
2037 		SUBTEST(igt_ppgtt_fill),
2038 		SUBTEST(igt_ppgtt_shrink),
2039 		SUBTEST(igt_ppgtt_shrink_boom),
2040 		SUBTEST(igt_ggtt_lowlevel),
2041 		SUBTEST(igt_ggtt_drunk),
2042 		SUBTEST(igt_ggtt_walk),
2043 		SUBTEST(igt_ggtt_pot),
2044 		SUBTEST(igt_ggtt_fill),
2045 		SUBTEST(igt_ggtt_page),
2046 		SUBTEST(igt_cs_tlb),
2047 	};
2048 
2049 	GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2050 
2051 	return i915_subtests(tests, i915);
2052 }
2053