xref: /openbmc/linux/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c (revision 4464005a12b5c79e1a364e6272ee10a83413f928)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "gem/i915_gem_context.h"
29 #include "gem/selftests/mock_context.h"
30 #include "gt/intel_context.h"
31 
32 #include "i915_random.h"
33 #include "i915_selftest.h"
34 
35 #include "mock_drm.h"
36 #include "mock_gem_device.h"
37 #include "mock_gtt.h"
38 #include "igt_flush_test.h"
39 
40 static void cleanup_freed_objects(struct drm_i915_private *i915)
41 {
42 	i915_gem_drain_freed_objects(i915);
43 }
44 
45 static void fake_free_pages(struct drm_i915_gem_object *obj,
46 			    struct sg_table *pages)
47 {
48 	sg_free_table(pages);
49 	kfree(pages);
50 }
51 
52 static int fake_get_pages(struct drm_i915_gem_object *obj)
53 {
54 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
55 #define PFN_BIAS 0x1000
56 	struct sg_table *pages;
57 	struct scatterlist *sg;
58 	unsigned int sg_page_sizes;
59 	typeof(obj->base.size) rem;
60 
61 	pages = kmalloc(sizeof(*pages), GFP);
62 	if (!pages)
63 		return -ENOMEM;
64 
65 	rem = round_up(obj->base.size, BIT(31)) >> 31;
66 	if (sg_alloc_table(pages, rem, GFP)) {
67 		kfree(pages);
68 		return -ENOMEM;
69 	}
70 
71 	sg_page_sizes = 0;
72 	rem = obj->base.size;
73 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
74 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
75 
76 		GEM_BUG_ON(!len);
77 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
78 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
79 		sg_dma_len(sg) = len;
80 		sg_page_sizes |= len;
81 
82 		rem -= len;
83 	}
84 	GEM_BUG_ON(rem);
85 
86 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
87 
88 	return 0;
89 #undef GFP
90 }
91 
92 static void fake_put_pages(struct drm_i915_gem_object *obj,
93 			   struct sg_table *pages)
94 {
95 	fake_free_pages(obj, pages);
96 	obj->mm.dirty = false;
97 }
98 
99 static const struct drm_i915_gem_object_ops fake_ops = {
100 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
101 	.get_pages = fake_get_pages,
102 	.put_pages = fake_put_pages,
103 };
104 
105 static struct drm_i915_gem_object *
106 fake_dma_object(struct drm_i915_private *i915, u64 size)
107 {
108 	static struct lock_class_key lock_class;
109 	struct drm_i915_gem_object *obj;
110 
111 	GEM_BUG_ON(!size);
112 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
113 
114 	if (overflows_type(size, obj->base.size))
115 		return ERR_PTR(-E2BIG);
116 
117 	obj = i915_gem_object_alloc();
118 	if (!obj)
119 		goto err;
120 
121 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
122 	i915_gem_object_init(obj, &fake_ops, &lock_class);
123 
124 	i915_gem_object_set_volatile(obj);
125 
126 	obj->write_domain = I915_GEM_DOMAIN_CPU;
127 	obj->read_domains = I915_GEM_DOMAIN_CPU;
128 	obj->cache_level = I915_CACHE_NONE;
129 
130 	/* Preallocate the "backing storage" */
131 	if (i915_gem_object_pin_pages(obj))
132 		goto err_obj;
133 
134 	i915_gem_object_unpin_pages(obj);
135 	return obj;
136 
137 err_obj:
138 	i915_gem_object_put(obj);
139 err:
140 	return ERR_PTR(-ENOMEM);
141 }
142 
143 static int igt_ppgtt_alloc(void *arg)
144 {
145 	struct drm_i915_private *dev_priv = arg;
146 	struct i915_ppgtt *ppgtt;
147 	u64 size, last, limit;
148 	int err = 0;
149 
150 	/* Allocate a ppggt and try to fill the entire range */
151 
152 	if (!HAS_PPGTT(dev_priv))
153 		return 0;
154 
155 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
156 	if (IS_ERR(ppgtt))
157 		return PTR_ERR(ppgtt);
158 
159 	if (!ppgtt->vm.allocate_va_range)
160 		goto err_ppgtt_cleanup;
161 
162 	/*
163 	 * While we only allocate the page tables here and so we could
164 	 * address a much larger GTT than we could actually fit into
165 	 * RAM, a practical limit is the amount of physical pages in the system.
166 	 * This should ensure that we do not run into the oomkiller during
167 	 * the test and take down the machine wilfully.
168 	 */
169 	limit = totalram_pages() << PAGE_SHIFT;
170 	limit = min(ppgtt->vm.total, limit);
171 
172 	/* Check we can allocate the entire range */
173 	for (size = 4096; size <= limit; size <<= 2) {
174 		err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
175 		if (err) {
176 			if (err == -ENOMEM) {
177 				pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
178 					size, ilog2(size));
179 				err = 0; /* virtual space too large! */
180 			}
181 			goto err_ppgtt_cleanup;
182 		}
183 
184 		cond_resched();
185 
186 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
187 	}
188 
189 	/* Check we can incrementally allocate the entire range */
190 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
191 		err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
192 						  last, size - last);
193 		if (err) {
194 			if (err == -ENOMEM) {
195 				pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
196 					last, size - last, ilog2(size));
197 				err = 0; /* virtual space too large! */
198 			}
199 			goto err_ppgtt_cleanup;
200 		}
201 
202 		cond_resched();
203 	}
204 
205 err_ppgtt_cleanup:
206 	i915_vm_put(&ppgtt->vm);
207 	return err;
208 }
209 
210 static int lowlevel_hole(struct i915_address_space *vm,
211 			 u64 hole_start, u64 hole_end,
212 			 unsigned long end_time)
213 {
214 	I915_RND_STATE(seed_prng);
215 	struct i915_vma *mock_vma;
216 	unsigned int size;
217 
218 	mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
219 	if (!mock_vma)
220 		return -ENOMEM;
221 
222 	/* Keep creating larger objects until one cannot fit into the hole */
223 	for (size = 12; (hole_end - hole_start) >> size; size++) {
224 		I915_RND_SUBSTATE(prng, seed_prng);
225 		struct drm_i915_gem_object *obj;
226 		unsigned int *order, count, n;
227 		u64 hole_size;
228 
229 		hole_size = (hole_end - hole_start) >> size;
230 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
231 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
232 		count = hole_size >> 1;
233 		if (!count) {
234 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
235 				 __func__, hole_start, hole_end, size, hole_size);
236 			break;
237 		}
238 
239 		do {
240 			order = i915_random_order(count, &prng);
241 			if (order)
242 				break;
243 		} while (count >>= 1);
244 		if (!count) {
245 			kfree(mock_vma);
246 			return -ENOMEM;
247 		}
248 		GEM_BUG_ON(!order);
249 
250 		GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
251 		GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
252 
253 		/* Ignore allocation failures (i.e. don't report them as
254 		 * a test failure) as we are purposefully allocating very
255 		 * large objects without checking that we have sufficient
256 		 * memory. We expect to hit -ENOMEM.
257 		 */
258 
259 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
260 		if (IS_ERR(obj)) {
261 			kfree(order);
262 			break;
263 		}
264 
265 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
266 
267 		if (i915_gem_object_pin_pages(obj)) {
268 			i915_gem_object_put(obj);
269 			kfree(order);
270 			break;
271 		}
272 
273 		for (n = 0; n < count; n++) {
274 			u64 addr = hole_start + order[n] * BIT_ULL(size);
275 			intel_wakeref_t wakeref;
276 
277 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
278 
279 			if (igt_timeout(end_time,
280 					"%s timed out before %d/%d\n",
281 					__func__, n, count)) {
282 				hole_end = hole_start; /* quit */
283 				break;
284 			}
285 
286 			if (vm->allocate_va_range &&
287 			    vm->allocate_va_range(vm, addr, BIT_ULL(size)))
288 				break;
289 
290 			mock_vma->pages = obj->mm.pages;
291 			mock_vma->node.size = BIT_ULL(size);
292 			mock_vma->node.start = addr;
293 
294 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
295 				vm->insert_entries(vm, mock_vma,
296 						   I915_CACHE_NONE, 0);
297 		}
298 		count = n;
299 
300 		i915_random_reorder(order, count, &prng);
301 		for (n = 0; n < count; n++) {
302 			u64 addr = hole_start + order[n] * BIT_ULL(size);
303 			intel_wakeref_t wakeref;
304 
305 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
306 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
307 				vm->clear_range(vm, addr, BIT_ULL(size));
308 		}
309 
310 		i915_gem_object_unpin_pages(obj);
311 		i915_gem_object_put(obj);
312 
313 		kfree(order);
314 
315 		cleanup_freed_objects(vm->i915);
316 	}
317 
318 	kfree(mock_vma);
319 	return 0;
320 }
321 
322 static void close_object_list(struct list_head *objects,
323 			      struct i915_address_space *vm)
324 {
325 	struct drm_i915_gem_object *obj, *on;
326 	int ignored;
327 
328 	list_for_each_entry_safe(obj, on, objects, st_link) {
329 		struct i915_vma *vma;
330 
331 		vma = i915_vma_instance(obj, vm, NULL);
332 		if (!IS_ERR(vma))
333 			ignored = i915_vma_unbind(vma);
334 
335 		list_del(&obj->st_link);
336 		i915_gem_object_put(obj);
337 	}
338 }
339 
340 static int fill_hole(struct i915_address_space *vm,
341 		     u64 hole_start, u64 hole_end,
342 		     unsigned long end_time)
343 {
344 	const u64 hole_size = hole_end - hole_start;
345 	struct drm_i915_gem_object *obj;
346 	const unsigned long max_pages =
347 		min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
348 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
349 	unsigned long npages, prime, flags;
350 	struct i915_vma *vma;
351 	LIST_HEAD(objects);
352 	int err;
353 
354 	/* Try binding many VMA working inwards from either edge */
355 
356 	flags = PIN_OFFSET_FIXED | PIN_USER;
357 	if (i915_is_ggtt(vm))
358 		flags |= PIN_GLOBAL;
359 
360 	for_each_prime_number_from(prime, 2, max_step) {
361 		for (npages = 1; npages <= max_pages; npages *= prime) {
362 			const u64 full_size = npages << PAGE_SHIFT;
363 			const struct {
364 				const char *name;
365 				u64 offset;
366 				int step;
367 			} phases[] = {
368 				{ "top-down", hole_end, -1, },
369 				{ "bottom-up", hole_start, 1, },
370 				{ }
371 			}, *p;
372 
373 			obj = fake_dma_object(vm->i915, full_size);
374 			if (IS_ERR(obj))
375 				break;
376 
377 			list_add(&obj->st_link, &objects);
378 
379 			/* Align differing sized objects against the edges, and
380 			 * check we don't walk off into the void when binding
381 			 * them into the GTT.
382 			 */
383 			for (p = phases; p->name; p++) {
384 				u64 offset;
385 
386 				offset = p->offset;
387 				list_for_each_entry(obj, &objects, st_link) {
388 					vma = i915_vma_instance(obj, vm, NULL);
389 					if (IS_ERR(vma))
390 						continue;
391 
392 					if (p->step < 0) {
393 						if (offset < hole_start + obj->base.size)
394 							break;
395 						offset -= obj->base.size;
396 					}
397 
398 					err = i915_vma_pin(vma, 0, 0, offset | flags);
399 					if (err) {
400 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
401 						       __func__, p->name, err, npages, prime, offset);
402 						goto err;
403 					}
404 
405 					if (!drm_mm_node_allocated(&vma->node) ||
406 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
407 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
408 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
409 						       offset);
410 						err = -EINVAL;
411 						goto err;
412 					}
413 
414 					i915_vma_unpin(vma);
415 
416 					if (p->step > 0) {
417 						if (offset + obj->base.size > hole_end)
418 							break;
419 						offset += obj->base.size;
420 					}
421 				}
422 
423 				offset = p->offset;
424 				list_for_each_entry(obj, &objects, st_link) {
425 					vma = i915_vma_instance(obj, vm, NULL);
426 					if (IS_ERR(vma))
427 						continue;
428 
429 					if (p->step < 0) {
430 						if (offset < hole_start + obj->base.size)
431 							break;
432 						offset -= obj->base.size;
433 					}
434 
435 					if (!drm_mm_node_allocated(&vma->node) ||
436 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
437 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
438 						       __func__, p->name, vma->node.start, vma->node.size,
439 						       offset);
440 						err = -EINVAL;
441 						goto err;
442 					}
443 
444 					err = i915_vma_unbind(vma);
445 					if (err) {
446 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
447 						       __func__, p->name, vma->node.start, vma->node.size,
448 						       err);
449 						goto err;
450 					}
451 
452 					if (p->step > 0) {
453 						if (offset + obj->base.size > hole_end)
454 							break;
455 						offset += obj->base.size;
456 					}
457 				}
458 
459 				offset = p->offset;
460 				list_for_each_entry_reverse(obj, &objects, st_link) {
461 					vma = i915_vma_instance(obj, vm, NULL);
462 					if (IS_ERR(vma))
463 						continue;
464 
465 					if (p->step < 0) {
466 						if (offset < hole_start + obj->base.size)
467 							break;
468 						offset -= obj->base.size;
469 					}
470 
471 					err = i915_vma_pin(vma, 0, 0, offset | flags);
472 					if (err) {
473 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
474 						       __func__, p->name, err, npages, prime, offset);
475 						goto err;
476 					}
477 
478 					if (!drm_mm_node_allocated(&vma->node) ||
479 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
480 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
481 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
482 						       offset);
483 						err = -EINVAL;
484 						goto err;
485 					}
486 
487 					i915_vma_unpin(vma);
488 
489 					if (p->step > 0) {
490 						if (offset + obj->base.size > hole_end)
491 							break;
492 						offset += obj->base.size;
493 					}
494 				}
495 
496 				offset = p->offset;
497 				list_for_each_entry_reverse(obj, &objects, st_link) {
498 					vma = i915_vma_instance(obj, vm, NULL);
499 					if (IS_ERR(vma))
500 						continue;
501 
502 					if (p->step < 0) {
503 						if (offset < hole_start + obj->base.size)
504 							break;
505 						offset -= obj->base.size;
506 					}
507 
508 					if (!drm_mm_node_allocated(&vma->node) ||
509 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
510 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
511 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
512 						       offset);
513 						err = -EINVAL;
514 						goto err;
515 					}
516 
517 					err = i915_vma_unbind(vma);
518 					if (err) {
519 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
520 						       __func__, p->name, vma->node.start, vma->node.size,
521 						       err);
522 						goto err;
523 					}
524 
525 					if (p->step > 0) {
526 						if (offset + obj->base.size > hole_end)
527 							break;
528 						offset += obj->base.size;
529 					}
530 				}
531 			}
532 
533 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
534 					__func__, npages, prime)) {
535 				err = -EINTR;
536 				goto err;
537 			}
538 		}
539 
540 		close_object_list(&objects, vm);
541 		cleanup_freed_objects(vm->i915);
542 	}
543 
544 	return 0;
545 
546 err:
547 	close_object_list(&objects, vm);
548 	return err;
549 }
550 
551 static int walk_hole(struct i915_address_space *vm,
552 		     u64 hole_start, u64 hole_end,
553 		     unsigned long end_time)
554 {
555 	const u64 hole_size = hole_end - hole_start;
556 	const unsigned long max_pages =
557 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
558 	unsigned long flags;
559 	u64 size;
560 
561 	/* Try binding a single VMA in different positions within the hole */
562 
563 	flags = PIN_OFFSET_FIXED | PIN_USER;
564 	if (i915_is_ggtt(vm))
565 		flags |= PIN_GLOBAL;
566 
567 	for_each_prime_number_from(size, 1, max_pages) {
568 		struct drm_i915_gem_object *obj;
569 		struct i915_vma *vma;
570 		u64 addr;
571 		int err = 0;
572 
573 		obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
574 		if (IS_ERR(obj))
575 			break;
576 
577 		vma = i915_vma_instance(obj, vm, NULL);
578 		if (IS_ERR(vma)) {
579 			err = PTR_ERR(vma);
580 			goto err_put;
581 		}
582 
583 		for (addr = hole_start;
584 		     addr + obj->base.size < hole_end;
585 		     addr += obj->base.size) {
586 			err = i915_vma_pin(vma, 0, 0, addr | flags);
587 			if (err) {
588 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
589 				       __func__, addr, vma->size,
590 				       hole_start, hole_end, err);
591 				goto err_put;
592 			}
593 			i915_vma_unpin(vma);
594 
595 			if (!drm_mm_node_allocated(&vma->node) ||
596 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
597 				pr_err("%s incorrect at %llx + %llx\n",
598 				       __func__, addr, vma->size);
599 				err = -EINVAL;
600 				goto err_put;
601 			}
602 
603 			err = i915_vma_unbind(vma);
604 			if (err) {
605 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
606 				       __func__, addr, vma->size, err);
607 				goto err_put;
608 			}
609 
610 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
611 
612 			if (igt_timeout(end_time,
613 					"%s timed out at %llx\n",
614 					__func__, addr)) {
615 				err = -EINTR;
616 				goto err_put;
617 			}
618 		}
619 
620 err_put:
621 		i915_gem_object_put(obj);
622 		if (err)
623 			return err;
624 
625 		cleanup_freed_objects(vm->i915);
626 	}
627 
628 	return 0;
629 }
630 
631 static int pot_hole(struct i915_address_space *vm,
632 		    u64 hole_start, u64 hole_end,
633 		    unsigned long end_time)
634 {
635 	struct drm_i915_gem_object *obj;
636 	struct i915_vma *vma;
637 	unsigned long flags;
638 	unsigned int pot;
639 	int err = 0;
640 
641 	flags = PIN_OFFSET_FIXED | PIN_USER;
642 	if (i915_is_ggtt(vm))
643 		flags |= PIN_GLOBAL;
644 
645 	obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
646 	if (IS_ERR(obj))
647 		return PTR_ERR(obj);
648 
649 	vma = i915_vma_instance(obj, vm, NULL);
650 	if (IS_ERR(vma)) {
651 		err = PTR_ERR(vma);
652 		goto err_obj;
653 	}
654 
655 	/* Insert a pair of pages across every pot boundary within the hole */
656 	for (pot = fls64(hole_end - 1) - 1;
657 	     pot > ilog2(2 * I915_GTT_PAGE_SIZE);
658 	     pot--) {
659 		u64 step = BIT_ULL(pot);
660 		u64 addr;
661 
662 		for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
663 		     addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
664 		     addr += step) {
665 			err = i915_vma_pin(vma, 0, 0, addr | flags);
666 			if (err) {
667 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
668 				       __func__,
669 				       addr,
670 				       hole_start, hole_end,
671 				       err);
672 				goto err_obj;
673 			}
674 
675 			if (!drm_mm_node_allocated(&vma->node) ||
676 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
677 				pr_err("%s incorrect at %llx + %llx\n",
678 				       __func__, addr, vma->size);
679 				i915_vma_unpin(vma);
680 				err = i915_vma_unbind(vma);
681 				err = -EINVAL;
682 				goto err_obj;
683 			}
684 
685 			i915_vma_unpin(vma);
686 			err = i915_vma_unbind(vma);
687 			GEM_BUG_ON(err);
688 		}
689 
690 		if (igt_timeout(end_time,
691 				"%s timed out after %d/%d\n",
692 				__func__, pot, fls64(hole_end - 1) - 1)) {
693 			err = -EINTR;
694 			goto err_obj;
695 		}
696 	}
697 
698 err_obj:
699 	i915_gem_object_put(obj);
700 	return err;
701 }
702 
703 static int drunk_hole(struct i915_address_space *vm,
704 		      u64 hole_start, u64 hole_end,
705 		      unsigned long end_time)
706 {
707 	I915_RND_STATE(prng);
708 	unsigned int size;
709 	unsigned long flags;
710 
711 	flags = PIN_OFFSET_FIXED | PIN_USER;
712 	if (i915_is_ggtt(vm))
713 		flags |= PIN_GLOBAL;
714 
715 	/* Keep creating larger objects until one cannot fit into the hole */
716 	for (size = 12; (hole_end - hole_start) >> size; size++) {
717 		struct drm_i915_gem_object *obj;
718 		unsigned int *order, count, n;
719 		struct i915_vma *vma;
720 		u64 hole_size;
721 		int err = -ENODEV;
722 
723 		hole_size = (hole_end - hole_start) >> size;
724 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
725 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
726 		count = hole_size >> 1;
727 		if (!count) {
728 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
729 				 __func__, hole_start, hole_end, size, hole_size);
730 			break;
731 		}
732 
733 		do {
734 			order = i915_random_order(count, &prng);
735 			if (order)
736 				break;
737 		} while (count >>= 1);
738 		if (!count)
739 			return -ENOMEM;
740 		GEM_BUG_ON(!order);
741 
742 		/* Ignore allocation failures (i.e. don't report them as
743 		 * a test failure) as we are purposefully allocating very
744 		 * large objects without checking that we have sufficient
745 		 * memory. We expect to hit -ENOMEM.
746 		 */
747 
748 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
749 		if (IS_ERR(obj)) {
750 			kfree(order);
751 			break;
752 		}
753 
754 		vma = i915_vma_instance(obj, vm, NULL);
755 		if (IS_ERR(vma)) {
756 			err = PTR_ERR(vma);
757 			goto err_obj;
758 		}
759 
760 		GEM_BUG_ON(vma->size != BIT_ULL(size));
761 
762 		for (n = 0; n < count; n++) {
763 			u64 addr = hole_start + order[n] * BIT_ULL(size);
764 
765 			err = i915_vma_pin(vma, 0, 0, addr | flags);
766 			if (err) {
767 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
768 				       __func__,
769 				       addr, BIT_ULL(size),
770 				       hole_start, hole_end,
771 				       err);
772 				goto err_obj;
773 			}
774 
775 			if (!drm_mm_node_allocated(&vma->node) ||
776 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
777 				pr_err("%s incorrect at %llx + %llx\n",
778 				       __func__, addr, BIT_ULL(size));
779 				i915_vma_unpin(vma);
780 				err = i915_vma_unbind(vma);
781 				err = -EINVAL;
782 				goto err_obj;
783 			}
784 
785 			i915_vma_unpin(vma);
786 			err = i915_vma_unbind(vma);
787 			GEM_BUG_ON(err);
788 
789 			if (igt_timeout(end_time,
790 					"%s timed out after %d/%d\n",
791 					__func__, n, count)) {
792 				err = -EINTR;
793 				goto err_obj;
794 			}
795 		}
796 
797 err_obj:
798 		i915_gem_object_put(obj);
799 		kfree(order);
800 		if (err)
801 			return err;
802 
803 		cleanup_freed_objects(vm->i915);
804 	}
805 
806 	return 0;
807 }
808 
809 static int __shrink_hole(struct i915_address_space *vm,
810 			 u64 hole_start, u64 hole_end,
811 			 unsigned long end_time)
812 {
813 	struct drm_i915_gem_object *obj;
814 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
815 	unsigned int order = 12;
816 	LIST_HEAD(objects);
817 	int err = 0;
818 	u64 addr;
819 
820 	/* Keep creating larger objects until one cannot fit into the hole */
821 	for (addr = hole_start; addr < hole_end; ) {
822 		struct i915_vma *vma;
823 		u64 size = BIT_ULL(order++);
824 
825 		size = min(size, hole_end - addr);
826 		obj = fake_dma_object(vm->i915, size);
827 		if (IS_ERR(obj)) {
828 			err = PTR_ERR(obj);
829 			break;
830 		}
831 
832 		list_add(&obj->st_link, &objects);
833 
834 		vma = i915_vma_instance(obj, vm, NULL);
835 		if (IS_ERR(vma)) {
836 			err = PTR_ERR(vma);
837 			break;
838 		}
839 
840 		GEM_BUG_ON(vma->size != size);
841 
842 		err = i915_vma_pin(vma, 0, 0, addr | flags);
843 		if (err) {
844 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
845 			       __func__, addr, size, hole_start, hole_end, err);
846 			break;
847 		}
848 
849 		if (!drm_mm_node_allocated(&vma->node) ||
850 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
851 			pr_err("%s incorrect at %llx + %llx\n",
852 			       __func__, addr, size);
853 			i915_vma_unpin(vma);
854 			err = i915_vma_unbind(vma);
855 			err = -EINVAL;
856 			break;
857 		}
858 
859 		i915_vma_unpin(vma);
860 		addr += size;
861 
862 		/*
863 		 * Since we are injecting allocation faults at random intervals,
864 		 * wait for this allocation to complete before we change the
865 		 * faultinjection.
866 		 */
867 		err = i915_vma_sync(vma);
868 		if (err)
869 			break;
870 
871 		if (igt_timeout(end_time,
872 				"%s timed out at ofset %llx [%llx - %llx]\n",
873 				__func__, addr, hole_start, hole_end)) {
874 			err = -EINTR;
875 			break;
876 		}
877 	}
878 
879 	close_object_list(&objects, vm);
880 	cleanup_freed_objects(vm->i915);
881 	return err;
882 }
883 
884 static int shrink_hole(struct i915_address_space *vm,
885 		       u64 hole_start, u64 hole_end,
886 		       unsigned long end_time)
887 {
888 	unsigned long prime;
889 	int err;
890 
891 	vm->fault_attr.probability = 999;
892 	atomic_set(&vm->fault_attr.times, -1);
893 
894 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
895 		vm->fault_attr.interval = prime;
896 		err = __shrink_hole(vm, hole_start, hole_end, end_time);
897 		if (err)
898 			break;
899 	}
900 
901 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
902 
903 	return err;
904 }
905 
906 static int shrink_boom(struct i915_address_space *vm,
907 		       u64 hole_start, u64 hole_end,
908 		       unsigned long end_time)
909 {
910 	unsigned int sizes[] = { SZ_2M, SZ_1G };
911 	struct drm_i915_gem_object *purge;
912 	struct drm_i915_gem_object *explode;
913 	int err;
914 	int i;
915 
916 	/*
917 	 * Catch the case which shrink_hole seems to miss. The setup here
918 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
919 	 * ensuring that all vma assiocated with the respective pd/pdp are
920 	 * unpinned at the time.
921 	 */
922 
923 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
924 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
925 		unsigned int size = sizes[i];
926 		struct i915_vma *vma;
927 
928 		purge = fake_dma_object(vm->i915, size);
929 		if (IS_ERR(purge))
930 			return PTR_ERR(purge);
931 
932 		vma = i915_vma_instance(purge, vm, NULL);
933 		if (IS_ERR(vma)) {
934 			err = PTR_ERR(vma);
935 			goto err_purge;
936 		}
937 
938 		err = i915_vma_pin(vma, 0, 0, flags);
939 		if (err)
940 			goto err_purge;
941 
942 		/* Should now be ripe for purging */
943 		i915_vma_unpin(vma);
944 
945 		explode = fake_dma_object(vm->i915, size);
946 		if (IS_ERR(explode)) {
947 			err = PTR_ERR(explode);
948 			goto err_purge;
949 		}
950 
951 		vm->fault_attr.probability = 100;
952 		vm->fault_attr.interval = 1;
953 		atomic_set(&vm->fault_attr.times, -1);
954 
955 		vma = i915_vma_instance(explode, vm, NULL);
956 		if (IS_ERR(vma)) {
957 			err = PTR_ERR(vma);
958 			goto err_explode;
959 		}
960 
961 		err = i915_vma_pin(vma, 0, 0, flags | size);
962 		if (err)
963 			goto err_explode;
964 
965 		i915_vma_unpin(vma);
966 
967 		i915_gem_object_put(purge);
968 		i915_gem_object_put(explode);
969 
970 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
971 		cleanup_freed_objects(vm->i915);
972 	}
973 
974 	return 0;
975 
976 err_explode:
977 	i915_gem_object_put(explode);
978 err_purge:
979 	i915_gem_object_put(purge);
980 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
981 	return err;
982 }
983 
984 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
985 			  int (*func)(struct i915_address_space *vm,
986 				      u64 hole_start, u64 hole_end,
987 				      unsigned long end_time))
988 {
989 	struct i915_ppgtt *ppgtt;
990 	IGT_TIMEOUT(end_time);
991 	struct file *file;
992 	int err;
993 
994 	if (!HAS_FULL_PPGTT(dev_priv))
995 		return 0;
996 
997 	file = mock_file(dev_priv);
998 	if (IS_ERR(file))
999 		return PTR_ERR(file);
1000 
1001 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
1002 	if (IS_ERR(ppgtt)) {
1003 		err = PTR_ERR(ppgtt);
1004 		goto out_free;
1005 	}
1006 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1007 	GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1008 
1009 	err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1010 
1011 	i915_vm_put(&ppgtt->vm);
1012 
1013 out_free:
1014 	fput(file);
1015 	return err;
1016 }
1017 
1018 static int igt_ppgtt_fill(void *arg)
1019 {
1020 	return exercise_ppgtt(arg, fill_hole);
1021 }
1022 
1023 static int igt_ppgtt_walk(void *arg)
1024 {
1025 	return exercise_ppgtt(arg, walk_hole);
1026 }
1027 
1028 static int igt_ppgtt_pot(void *arg)
1029 {
1030 	return exercise_ppgtt(arg, pot_hole);
1031 }
1032 
1033 static int igt_ppgtt_drunk(void *arg)
1034 {
1035 	return exercise_ppgtt(arg, drunk_hole);
1036 }
1037 
1038 static int igt_ppgtt_lowlevel(void *arg)
1039 {
1040 	return exercise_ppgtt(arg, lowlevel_hole);
1041 }
1042 
1043 static int igt_ppgtt_shrink(void *arg)
1044 {
1045 	return exercise_ppgtt(arg, shrink_hole);
1046 }
1047 
1048 static int igt_ppgtt_shrink_boom(void *arg)
1049 {
1050 	return exercise_ppgtt(arg, shrink_boom);
1051 }
1052 
1053 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1054 {
1055 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1056 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1057 
1058 	if (a->start < b->start)
1059 		return -1;
1060 	else
1061 		return 1;
1062 }
1063 
1064 static int exercise_ggtt(struct drm_i915_private *i915,
1065 			 int (*func)(struct i915_address_space *vm,
1066 				     u64 hole_start, u64 hole_end,
1067 				     unsigned long end_time))
1068 {
1069 	struct i915_ggtt *ggtt = &i915->ggtt;
1070 	u64 hole_start, hole_end, last = 0;
1071 	struct drm_mm_node *node;
1072 	IGT_TIMEOUT(end_time);
1073 	int err = 0;
1074 
1075 restart:
1076 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1077 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1078 		if (hole_start < last)
1079 			continue;
1080 
1081 		if (ggtt->vm.mm.color_adjust)
1082 			ggtt->vm.mm.color_adjust(node, 0,
1083 						 &hole_start, &hole_end);
1084 		if (hole_start >= hole_end)
1085 			continue;
1086 
1087 		err = func(&ggtt->vm, hole_start, hole_end, end_time);
1088 		if (err)
1089 			break;
1090 
1091 		/* As we have manipulated the drm_mm, the list may be corrupt */
1092 		last = hole_end;
1093 		goto restart;
1094 	}
1095 
1096 	return err;
1097 }
1098 
1099 static int igt_ggtt_fill(void *arg)
1100 {
1101 	return exercise_ggtt(arg, fill_hole);
1102 }
1103 
1104 static int igt_ggtt_walk(void *arg)
1105 {
1106 	return exercise_ggtt(arg, walk_hole);
1107 }
1108 
1109 static int igt_ggtt_pot(void *arg)
1110 {
1111 	return exercise_ggtt(arg, pot_hole);
1112 }
1113 
1114 static int igt_ggtt_drunk(void *arg)
1115 {
1116 	return exercise_ggtt(arg, drunk_hole);
1117 }
1118 
1119 static int igt_ggtt_lowlevel(void *arg)
1120 {
1121 	return exercise_ggtt(arg, lowlevel_hole);
1122 }
1123 
1124 static int igt_ggtt_page(void *arg)
1125 {
1126 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1127 	I915_RND_STATE(prng);
1128 	struct drm_i915_private *i915 = arg;
1129 	struct i915_ggtt *ggtt = &i915->ggtt;
1130 	struct drm_i915_gem_object *obj;
1131 	intel_wakeref_t wakeref;
1132 	struct drm_mm_node tmp;
1133 	unsigned int *order, n;
1134 	int err;
1135 
1136 	if (!i915_ggtt_has_aperture(ggtt))
1137 		return 0;
1138 
1139 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1140 	if (IS_ERR(obj))
1141 		return PTR_ERR(obj);
1142 
1143 	err = i915_gem_object_pin_pages(obj);
1144 	if (err)
1145 		goto out_free;
1146 
1147 	memset(&tmp, 0, sizeof(tmp));
1148 	mutex_lock(&ggtt->vm.mutex);
1149 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1150 					  count * PAGE_SIZE, 0,
1151 					  I915_COLOR_UNEVICTABLE,
1152 					  0, ggtt->mappable_end,
1153 					  DRM_MM_INSERT_LOW);
1154 	mutex_unlock(&ggtt->vm.mutex);
1155 	if (err)
1156 		goto out_unpin;
1157 
1158 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1159 
1160 	for (n = 0; n < count; n++) {
1161 		u64 offset = tmp.start + n * PAGE_SIZE;
1162 
1163 		ggtt->vm.insert_page(&ggtt->vm,
1164 				     i915_gem_object_get_dma_address(obj, 0),
1165 				     offset, I915_CACHE_NONE, 0);
1166 	}
1167 
1168 	order = i915_random_order(count, &prng);
1169 	if (!order) {
1170 		err = -ENOMEM;
1171 		goto out_remove;
1172 	}
1173 
1174 	for (n = 0; n < count; n++) {
1175 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1176 		u32 __iomem *vaddr;
1177 
1178 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1179 		iowrite32(n, vaddr + n);
1180 		io_mapping_unmap_atomic(vaddr);
1181 	}
1182 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1183 
1184 	i915_random_reorder(order, count, &prng);
1185 	for (n = 0; n < count; n++) {
1186 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1187 		u32 __iomem *vaddr;
1188 		u32 val;
1189 
1190 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1191 		val = ioread32(vaddr + n);
1192 		io_mapping_unmap_atomic(vaddr);
1193 
1194 		if (val != n) {
1195 			pr_err("insert page failed: found %d, expected %d\n",
1196 			       val, n);
1197 			err = -EINVAL;
1198 			break;
1199 		}
1200 	}
1201 
1202 	kfree(order);
1203 out_remove:
1204 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1205 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1206 	mutex_lock(&ggtt->vm.mutex);
1207 	drm_mm_remove_node(&tmp);
1208 	mutex_unlock(&ggtt->vm.mutex);
1209 out_unpin:
1210 	i915_gem_object_unpin_pages(obj);
1211 out_free:
1212 	i915_gem_object_put(obj);
1213 	return err;
1214 }
1215 
1216 static void track_vma_bind(struct i915_vma *vma)
1217 {
1218 	struct drm_i915_gem_object *obj = vma->obj;
1219 
1220 	__i915_gem_object_pin_pages(obj);
1221 
1222 	GEM_BUG_ON(vma->pages);
1223 	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1224 	__i915_gem_object_pin_pages(obj);
1225 	vma->pages = obj->mm.pages;
1226 
1227 	mutex_lock(&vma->vm->mutex);
1228 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1229 	mutex_unlock(&vma->vm->mutex);
1230 }
1231 
1232 static int exercise_mock(struct drm_i915_private *i915,
1233 			 int (*func)(struct i915_address_space *vm,
1234 				     u64 hole_start, u64 hole_end,
1235 				     unsigned long end_time))
1236 {
1237 	const u64 limit = totalram_pages() << PAGE_SHIFT;
1238 	struct i915_address_space *vm;
1239 	struct i915_gem_context *ctx;
1240 	IGT_TIMEOUT(end_time);
1241 	int err;
1242 
1243 	ctx = mock_context(i915, "mock");
1244 	if (!ctx)
1245 		return -ENOMEM;
1246 
1247 	vm = i915_gem_context_get_vm_rcu(ctx);
1248 	err = func(vm, 0, min(vm->total, limit), end_time);
1249 	i915_vm_put(vm);
1250 
1251 	mock_context_close(ctx);
1252 	return err;
1253 }
1254 
1255 static int igt_mock_fill(void *arg)
1256 {
1257 	struct i915_ggtt *ggtt = arg;
1258 
1259 	return exercise_mock(ggtt->vm.i915, fill_hole);
1260 }
1261 
1262 static int igt_mock_walk(void *arg)
1263 {
1264 	struct i915_ggtt *ggtt = arg;
1265 
1266 	return exercise_mock(ggtt->vm.i915, walk_hole);
1267 }
1268 
1269 static int igt_mock_pot(void *arg)
1270 {
1271 	struct i915_ggtt *ggtt = arg;
1272 
1273 	return exercise_mock(ggtt->vm.i915, pot_hole);
1274 }
1275 
1276 static int igt_mock_drunk(void *arg)
1277 {
1278 	struct i915_ggtt *ggtt = arg;
1279 
1280 	return exercise_mock(ggtt->vm.i915, drunk_hole);
1281 }
1282 
1283 static int igt_gtt_reserve(void *arg)
1284 {
1285 	struct i915_ggtt *ggtt = arg;
1286 	struct drm_i915_gem_object *obj, *on;
1287 	I915_RND_STATE(prng);
1288 	LIST_HEAD(objects);
1289 	u64 total;
1290 	int err = -ENODEV;
1291 
1292 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1293 	 * for the node, and evicts if it has to. So our test checks that
1294 	 * it can give us the requsted space and prevent overlaps.
1295 	 */
1296 
1297 	/* Start by filling the GGTT */
1298 	for (total = 0;
1299 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1300 	     total += 2 * I915_GTT_PAGE_SIZE) {
1301 		struct i915_vma *vma;
1302 
1303 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1304 						      2 * PAGE_SIZE);
1305 		if (IS_ERR(obj)) {
1306 			err = PTR_ERR(obj);
1307 			goto out;
1308 		}
1309 
1310 		err = i915_gem_object_pin_pages(obj);
1311 		if (err) {
1312 			i915_gem_object_put(obj);
1313 			goto out;
1314 		}
1315 
1316 		list_add(&obj->st_link, &objects);
1317 
1318 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1319 		if (IS_ERR(vma)) {
1320 			err = PTR_ERR(vma);
1321 			goto out;
1322 		}
1323 
1324 		mutex_lock(&ggtt->vm.mutex);
1325 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1326 					   obj->base.size,
1327 					   total,
1328 					   obj->cache_level,
1329 					   0);
1330 		mutex_unlock(&ggtt->vm.mutex);
1331 		if (err) {
1332 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1333 			       total, ggtt->vm.total, err);
1334 			goto out;
1335 		}
1336 		track_vma_bind(vma);
1337 
1338 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1339 		if (vma->node.start != total ||
1340 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1341 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1342 			       vma->node.start, vma->node.size,
1343 			       total, 2*I915_GTT_PAGE_SIZE);
1344 			err = -EINVAL;
1345 			goto out;
1346 		}
1347 	}
1348 
1349 	/* Now we start forcing evictions */
1350 	for (total = I915_GTT_PAGE_SIZE;
1351 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1352 	     total += 2 * I915_GTT_PAGE_SIZE) {
1353 		struct i915_vma *vma;
1354 
1355 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1356 						      2 * PAGE_SIZE);
1357 		if (IS_ERR(obj)) {
1358 			err = PTR_ERR(obj);
1359 			goto out;
1360 		}
1361 
1362 		err = i915_gem_object_pin_pages(obj);
1363 		if (err) {
1364 			i915_gem_object_put(obj);
1365 			goto out;
1366 		}
1367 
1368 		list_add(&obj->st_link, &objects);
1369 
1370 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1371 		if (IS_ERR(vma)) {
1372 			err = PTR_ERR(vma);
1373 			goto out;
1374 		}
1375 
1376 		mutex_lock(&ggtt->vm.mutex);
1377 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1378 					   obj->base.size,
1379 					   total,
1380 					   obj->cache_level,
1381 					   0);
1382 		mutex_unlock(&ggtt->vm.mutex);
1383 		if (err) {
1384 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1385 			       total, ggtt->vm.total, err);
1386 			goto out;
1387 		}
1388 		track_vma_bind(vma);
1389 
1390 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1391 		if (vma->node.start != total ||
1392 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1393 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1394 			       vma->node.start, vma->node.size,
1395 			       total, 2*I915_GTT_PAGE_SIZE);
1396 			err = -EINVAL;
1397 			goto out;
1398 		}
1399 	}
1400 
1401 	/* And then try at random */
1402 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1403 		struct i915_vma *vma;
1404 		u64 offset;
1405 
1406 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1407 		if (IS_ERR(vma)) {
1408 			err = PTR_ERR(vma);
1409 			goto out;
1410 		}
1411 
1412 		err = i915_vma_unbind(vma);
1413 		if (err) {
1414 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1415 			goto out;
1416 		}
1417 
1418 		offset = igt_random_offset(&prng,
1419 					   0, ggtt->vm.total,
1420 					   2 * I915_GTT_PAGE_SIZE,
1421 					   I915_GTT_MIN_ALIGNMENT);
1422 
1423 		mutex_lock(&ggtt->vm.mutex);
1424 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1425 					   obj->base.size,
1426 					   offset,
1427 					   obj->cache_level,
1428 					   0);
1429 		mutex_unlock(&ggtt->vm.mutex);
1430 		if (err) {
1431 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1432 			       total, ggtt->vm.total, err);
1433 			goto out;
1434 		}
1435 		track_vma_bind(vma);
1436 
1437 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1438 		if (vma->node.start != offset ||
1439 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1440 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1441 			       vma->node.start, vma->node.size,
1442 			       offset, 2*I915_GTT_PAGE_SIZE);
1443 			err = -EINVAL;
1444 			goto out;
1445 		}
1446 	}
1447 
1448 out:
1449 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1450 		i915_gem_object_unpin_pages(obj);
1451 		i915_gem_object_put(obj);
1452 	}
1453 	return err;
1454 }
1455 
1456 static int igt_gtt_insert(void *arg)
1457 {
1458 	struct i915_ggtt *ggtt = arg;
1459 	struct drm_i915_gem_object *obj, *on;
1460 	struct drm_mm_node tmp = {};
1461 	const struct invalid_insert {
1462 		u64 size;
1463 		u64 alignment;
1464 		u64 start, end;
1465 	} invalid_insert[] = {
1466 		{
1467 			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1468 			0, ggtt->vm.total,
1469 		},
1470 		{
1471 			2*I915_GTT_PAGE_SIZE, 0,
1472 			0, I915_GTT_PAGE_SIZE,
1473 		},
1474 		{
1475 			-(u64)I915_GTT_PAGE_SIZE, 0,
1476 			0, 4*I915_GTT_PAGE_SIZE,
1477 		},
1478 		{
1479 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1480 			0, 4*I915_GTT_PAGE_SIZE,
1481 		},
1482 		{
1483 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1484 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1485 		},
1486 		{}
1487 	}, *ii;
1488 	LIST_HEAD(objects);
1489 	u64 total;
1490 	int err = -ENODEV;
1491 
1492 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1493 	 * to the node, evicting if required.
1494 	 */
1495 
1496 	/* Check a couple of obviously invalid requests */
1497 	for (ii = invalid_insert; ii->size; ii++) {
1498 		mutex_lock(&ggtt->vm.mutex);
1499 		err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1500 					  ii->size, ii->alignment,
1501 					  I915_COLOR_UNEVICTABLE,
1502 					  ii->start, ii->end,
1503 					  0);
1504 		mutex_unlock(&ggtt->vm.mutex);
1505 		if (err != -ENOSPC) {
1506 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1507 			       ii->size, ii->alignment, ii->start, ii->end,
1508 			       err);
1509 			return -EINVAL;
1510 		}
1511 	}
1512 
1513 	/* Start by filling the GGTT */
1514 	for (total = 0;
1515 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1516 	     total += I915_GTT_PAGE_SIZE) {
1517 		struct i915_vma *vma;
1518 
1519 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1520 						      I915_GTT_PAGE_SIZE);
1521 		if (IS_ERR(obj)) {
1522 			err = PTR_ERR(obj);
1523 			goto out;
1524 		}
1525 
1526 		err = i915_gem_object_pin_pages(obj);
1527 		if (err) {
1528 			i915_gem_object_put(obj);
1529 			goto out;
1530 		}
1531 
1532 		list_add(&obj->st_link, &objects);
1533 
1534 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1535 		if (IS_ERR(vma)) {
1536 			err = PTR_ERR(vma);
1537 			goto out;
1538 		}
1539 
1540 		mutex_lock(&ggtt->vm.mutex);
1541 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1542 					  obj->base.size, 0, obj->cache_level,
1543 					  0, ggtt->vm.total,
1544 					  0);
1545 		mutex_unlock(&ggtt->vm.mutex);
1546 		if (err == -ENOSPC) {
1547 			/* maxed out the GGTT space */
1548 			i915_gem_object_put(obj);
1549 			break;
1550 		}
1551 		if (err) {
1552 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1553 			       total, ggtt->vm.total, err);
1554 			goto out;
1555 		}
1556 		track_vma_bind(vma);
1557 		__i915_vma_pin(vma);
1558 
1559 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1560 	}
1561 
1562 	list_for_each_entry(obj, &objects, st_link) {
1563 		struct i915_vma *vma;
1564 
1565 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1566 		if (IS_ERR(vma)) {
1567 			err = PTR_ERR(vma);
1568 			goto out;
1569 		}
1570 
1571 		if (!drm_mm_node_allocated(&vma->node)) {
1572 			pr_err("VMA was unexpectedly evicted!\n");
1573 			err = -EINVAL;
1574 			goto out;
1575 		}
1576 
1577 		__i915_vma_unpin(vma);
1578 	}
1579 
1580 	/* If we then reinsert, we should find the same hole */
1581 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1582 		struct i915_vma *vma;
1583 		u64 offset;
1584 
1585 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1586 		if (IS_ERR(vma)) {
1587 			err = PTR_ERR(vma);
1588 			goto out;
1589 		}
1590 
1591 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1592 		offset = vma->node.start;
1593 
1594 		err = i915_vma_unbind(vma);
1595 		if (err) {
1596 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1597 			goto out;
1598 		}
1599 
1600 		mutex_lock(&ggtt->vm.mutex);
1601 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1602 					  obj->base.size, 0, obj->cache_level,
1603 					  0, ggtt->vm.total,
1604 					  0);
1605 		mutex_unlock(&ggtt->vm.mutex);
1606 		if (err) {
1607 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1608 			       total, ggtt->vm.total, err);
1609 			goto out;
1610 		}
1611 		track_vma_bind(vma);
1612 
1613 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1614 		if (vma->node.start != offset) {
1615 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1616 			       offset, vma->node.start);
1617 			err = -EINVAL;
1618 			goto out;
1619 		}
1620 	}
1621 
1622 	/* And then force evictions */
1623 	for (total = 0;
1624 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1625 	     total += 2 * I915_GTT_PAGE_SIZE) {
1626 		struct i915_vma *vma;
1627 
1628 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1629 						      2 * I915_GTT_PAGE_SIZE);
1630 		if (IS_ERR(obj)) {
1631 			err = PTR_ERR(obj);
1632 			goto out;
1633 		}
1634 
1635 		err = i915_gem_object_pin_pages(obj);
1636 		if (err) {
1637 			i915_gem_object_put(obj);
1638 			goto out;
1639 		}
1640 
1641 		list_add(&obj->st_link, &objects);
1642 
1643 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1644 		if (IS_ERR(vma)) {
1645 			err = PTR_ERR(vma);
1646 			goto out;
1647 		}
1648 
1649 		mutex_lock(&ggtt->vm.mutex);
1650 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1651 					  obj->base.size, 0, obj->cache_level,
1652 					  0, ggtt->vm.total,
1653 					  0);
1654 		mutex_unlock(&ggtt->vm.mutex);
1655 		if (err) {
1656 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1657 			       total, ggtt->vm.total, err);
1658 			goto out;
1659 		}
1660 		track_vma_bind(vma);
1661 
1662 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1663 	}
1664 
1665 out:
1666 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1667 		i915_gem_object_unpin_pages(obj);
1668 		i915_gem_object_put(obj);
1669 	}
1670 	return err;
1671 }
1672 
1673 int i915_gem_gtt_mock_selftests(void)
1674 {
1675 	static const struct i915_subtest tests[] = {
1676 		SUBTEST(igt_mock_drunk),
1677 		SUBTEST(igt_mock_walk),
1678 		SUBTEST(igt_mock_pot),
1679 		SUBTEST(igt_mock_fill),
1680 		SUBTEST(igt_gtt_reserve),
1681 		SUBTEST(igt_gtt_insert),
1682 	};
1683 	struct drm_i915_private *i915;
1684 	struct i915_ggtt *ggtt;
1685 	int err;
1686 
1687 	i915 = mock_gem_device();
1688 	if (!i915)
1689 		return -ENOMEM;
1690 
1691 	ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1692 	if (!ggtt) {
1693 		err = -ENOMEM;
1694 		goto out_put;
1695 	}
1696 	mock_init_ggtt(i915, ggtt);
1697 
1698 	err = i915_subtests(tests, ggtt);
1699 
1700 	mock_device_flush(i915);
1701 	i915_gem_drain_freed_objects(i915);
1702 	mock_fini_ggtt(ggtt);
1703 	kfree(ggtt);
1704 out_put:
1705 	drm_dev_put(&i915->drm);
1706 	return err;
1707 }
1708 
1709 static int context_sync(struct intel_context *ce)
1710 {
1711 	struct i915_request *rq;
1712 	long timeout;
1713 
1714 	rq = intel_context_create_request(ce);
1715 	if (IS_ERR(rq))
1716 		return PTR_ERR(rq);
1717 
1718 	i915_request_get(rq);
1719 	i915_request_add(rq);
1720 
1721 	timeout = i915_request_wait(rq, 0, HZ / 5);
1722 	i915_request_put(rq);
1723 
1724 	return timeout < 0 ? -EIO : 0;
1725 }
1726 
1727 static struct i915_request *
1728 submit_batch(struct intel_context *ce, u64 addr)
1729 {
1730 	struct i915_request *rq;
1731 	int err;
1732 
1733 	rq = intel_context_create_request(ce);
1734 	if (IS_ERR(rq))
1735 		return rq;
1736 
1737 	err = 0;
1738 	if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1739 		err = rq->engine->emit_init_breadcrumb(rq);
1740 	if (err == 0)
1741 		err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1742 
1743 	if (err == 0)
1744 		i915_request_get(rq);
1745 	i915_request_add(rq);
1746 
1747 	return err ? ERR_PTR(err) : rq;
1748 }
1749 
1750 static u32 *spinner(u32 *batch, int i)
1751 {
1752 	return batch + i * 64 / sizeof(*batch) + 4;
1753 }
1754 
1755 static void end_spin(u32 *batch, int i)
1756 {
1757 	*spinner(batch, i) = MI_BATCH_BUFFER_END;
1758 	wmb();
1759 }
1760 
1761 static int igt_cs_tlb(void *arg)
1762 {
1763 	const unsigned int count = PAGE_SIZE / 64;
1764 	const unsigned int chunk_size = count * PAGE_SIZE;
1765 	struct drm_i915_private *i915 = arg;
1766 	struct drm_i915_gem_object *bbe, *act, *out;
1767 	struct i915_gem_engines_iter it;
1768 	struct i915_address_space *vm;
1769 	struct i915_gem_context *ctx;
1770 	struct intel_context *ce;
1771 	struct i915_vma *vma;
1772 	I915_RND_STATE(prng);
1773 	struct file *file;
1774 	unsigned int i;
1775 	u32 *result;
1776 	u32 *batch;
1777 	int err = 0;
1778 
1779 	/*
1780 	 * Our mission here is to fool the hardware to execute something
1781 	 * from scratch as it has not seen the batch move (due to missing
1782 	 * the TLB invalidate).
1783 	 */
1784 
1785 	file = mock_file(i915);
1786 	if (IS_ERR(file))
1787 		return PTR_ERR(file);
1788 
1789 	ctx = live_context(i915, file);
1790 	if (IS_ERR(ctx)) {
1791 		err = PTR_ERR(ctx);
1792 		goto out_unlock;
1793 	}
1794 
1795 	vm = i915_gem_context_get_vm_rcu(ctx);
1796 	if (i915_is_ggtt(vm))
1797 		goto out_vm;
1798 
1799 	/* Create two pages; dummy we prefill the TLB, and intended */
1800 	bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1801 	if (IS_ERR(bbe)) {
1802 		err = PTR_ERR(bbe);
1803 		goto out_vm;
1804 	}
1805 
1806 	batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
1807 	if (IS_ERR(batch)) {
1808 		err = PTR_ERR(batch);
1809 		goto out_put_bbe;
1810 	}
1811 	memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1812 	i915_gem_object_flush_map(bbe);
1813 	i915_gem_object_unpin_map(bbe);
1814 
1815 	act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1816 	if (IS_ERR(act)) {
1817 		err = PTR_ERR(act);
1818 		goto out_put_bbe;
1819 	}
1820 
1821 	/* Track the execution of each request by writing into different slot */
1822 	batch = i915_gem_object_pin_map(act, I915_MAP_WC);
1823 	if (IS_ERR(batch)) {
1824 		err = PTR_ERR(batch);
1825 		goto out_put_act;
1826 	}
1827 	for (i = 0; i < count; i++) {
1828 		u32 *cs = batch + i * 64 / sizeof(*cs);
1829 		u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1830 
1831 		GEM_BUG_ON(INTEL_GEN(i915) < 6);
1832 		cs[0] = MI_STORE_DWORD_IMM_GEN4;
1833 		if (INTEL_GEN(i915) >= 8) {
1834 			cs[1] = lower_32_bits(addr);
1835 			cs[2] = upper_32_bits(addr);
1836 			cs[3] = i;
1837 			cs[4] = MI_NOOP;
1838 			cs[5] = MI_BATCH_BUFFER_START_GEN8;
1839 		} else {
1840 			cs[1] = 0;
1841 			cs[2] = lower_32_bits(addr);
1842 			cs[3] = i;
1843 			cs[4] = MI_NOOP;
1844 			cs[5] = MI_BATCH_BUFFER_START;
1845 		}
1846 	}
1847 
1848 	out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1849 	if (IS_ERR(out)) {
1850 		err = PTR_ERR(out);
1851 		goto out_put_batch;
1852 	}
1853 	i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1854 
1855 	vma = i915_vma_instance(out, vm, NULL);
1856 	if (IS_ERR(vma)) {
1857 		err = PTR_ERR(vma);
1858 		goto out_put_batch;
1859 	}
1860 
1861 	err = i915_vma_pin(vma, 0, 0,
1862 			   PIN_USER |
1863 			   PIN_OFFSET_FIXED |
1864 			   (vm->total - PAGE_SIZE));
1865 	if (err)
1866 		goto out_put_out;
1867 	GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1868 
1869 	result = i915_gem_object_pin_map(out, I915_MAP_WB);
1870 	if (IS_ERR(result)) {
1871 		err = PTR_ERR(result);
1872 		goto out_put_out;
1873 	}
1874 
1875 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1876 		IGT_TIMEOUT(end_time);
1877 		unsigned long pass = 0;
1878 
1879 		if (!intel_engine_can_store_dword(ce->engine))
1880 			continue;
1881 
1882 		while (!__igt_timeout(end_time, NULL)) {
1883 			struct i915_request *rq;
1884 			u64 offset;
1885 
1886 			offset = igt_random_offset(&prng,
1887 						   0, vm->total - PAGE_SIZE,
1888 						   chunk_size, PAGE_SIZE);
1889 
1890 			err = vm->allocate_va_range(vm, offset, chunk_size);
1891 			if (err)
1892 				goto end;
1893 
1894 			memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1895 
1896 			vma = i915_vma_instance(bbe, vm, NULL);
1897 			if (IS_ERR(vma)) {
1898 				err = PTR_ERR(vma);
1899 				goto end;
1900 			}
1901 
1902 			err = vma->ops->set_pages(vma);
1903 			if (err)
1904 				goto end;
1905 
1906 			/* Prime the TLB with the dummy pages */
1907 			for (i = 0; i < count; i++) {
1908 				vma->node.start = offset + i * PAGE_SIZE;
1909 				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1910 
1911 				rq = submit_batch(ce, vma->node.start);
1912 				if (IS_ERR(rq)) {
1913 					err = PTR_ERR(rq);
1914 					goto end;
1915 				}
1916 				i915_request_put(rq);
1917 			}
1918 
1919 			vma->ops->clear_pages(vma);
1920 
1921 			err = context_sync(ce);
1922 			if (err) {
1923 				pr_err("%s: dummy setup timed out\n",
1924 				       ce->engine->name);
1925 				goto end;
1926 			}
1927 
1928 			vma = i915_vma_instance(act, vm, NULL);
1929 			if (IS_ERR(vma)) {
1930 				err = PTR_ERR(vma);
1931 				goto end;
1932 			}
1933 
1934 			err = vma->ops->set_pages(vma);
1935 			if (err)
1936 				goto end;
1937 
1938 			/* Replace the TLB with target batches */
1939 			for (i = 0; i < count; i++) {
1940 				struct i915_request *rq;
1941 				u32 *cs = batch + i * 64 / sizeof(*cs);
1942 				u64 addr;
1943 
1944 				vma->node.start = offset + i * PAGE_SIZE;
1945 				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1946 
1947 				addr = vma->node.start + i * 64;
1948 				cs[4] = MI_NOOP;
1949 				cs[6] = lower_32_bits(addr);
1950 				cs[7] = upper_32_bits(addr);
1951 				wmb();
1952 
1953 				rq = submit_batch(ce, addr);
1954 				if (IS_ERR(rq)) {
1955 					err = PTR_ERR(rq);
1956 					goto end;
1957 				}
1958 
1959 				/* Wait until the context chain has started */
1960 				if (i == 0) {
1961 					while (READ_ONCE(result[i]) &&
1962 					       !i915_request_completed(rq))
1963 						cond_resched();
1964 				} else {
1965 					end_spin(batch, i - 1);
1966 				}
1967 
1968 				i915_request_put(rq);
1969 			}
1970 			end_spin(batch, count - 1);
1971 
1972 			vma->ops->clear_pages(vma);
1973 
1974 			err = context_sync(ce);
1975 			if (err) {
1976 				pr_err("%s: writes timed out\n",
1977 				       ce->engine->name);
1978 				goto end;
1979 			}
1980 
1981 			for (i = 0; i < count; i++) {
1982 				if (result[i] != i) {
1983 					pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
1984 					       ce->engine->name, pass,
1985 					       offset, i, result[i], i);
1986 					err = -EINVAL;
1987 					goto end;
1988 				}
1989 			}
1990 
1991 			vm->clear_range(vm, offset, chunk_size);
1992 			pass++;
1993 		}
1994 	}
1995 end:
1996 	if (igt_flush_test(i915))
1997 		err = -EIO;
1998 	i915_gem_context_unlock_engines(ctx);
1999 	i915_gem_object_unpin_map(out);
2000 out_put_out:
2001 	i915_gem_object_put(out);
2002 out_put_batch:
2003 	i915_gem_object_unpin_map(act);
2004 out_put_act:
2005 	i915_gem_object_put(act);
2006 out_put_bbe:
2007 	i915_gem_object_put(bbe);
2008 out_vm:
2009 	i915_vm_put(vm);
2010 out_unlock:
2011 	fput(file);
2012 	return err;
2013 }
2014 
2015 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2016 {
2017 	static const struct i915_subtest tests[] = {
2018 		SUBTEST(igt_ppgtt_alloc),
2019 		SUBTEST(igt_ppgtt_lowlevel),
2020 		SUBTEST(igt_ppgtt_drunk),
2021 		SUBTEST(igt_ppgtt_walk),
2022 		SUBTEST(igt_ppgtt_pot),
2023 		SUBTEST(igt_ppgtt_fill),
2024 		SUBTEST(igt_ppgtt_shrink),
2025 		SUBTEST(igt_ppgtt_shrink_boom),
2026 		SUBTEST(igt_ggtt_lowlevel),
2027 		SUBTEST(igt_ggtt_drunk),
2028 		SUBTEST(igt_ggtt_walk),
2029 		SUBTEST(igt_ggtt_pot),
2030 		SUBTEST(igt_ggtt_fill),
2031 		SUBTEST(igt_ggtt_page),
2032 		SUBTEST(igt_cs_tlb),
2033 	};
2034 
2035 	GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2036 
2037 	return i915_subtests(tests, i915);
2038 }
2039