1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "gem/i915_gem_context.h"
29 #include "gem/selftests/mock_context.h"
30 #include "gt/intel_context.h"
31 
32 #include "i915_random.h"
33 #include "i915_selftest.h"
34 
35 #include "mock_drm.h"
36 #include "mock_gem_device.h"
37 #include "mock_gtt.h"
38 #include "igt_flush_test.h"
39 
40 static void cleanup_freed_objects(struct drm_i915_private *i915)
41 {
42 	i915_gem_drain_freed_objects(i915);
43 }
44 
45 static void fake_free_pages(struct drm_i915_gem_object *obj,
46 			    struct sg_table *pages)
47 {
48 	sg_free_table(pages);
49 	kfree(pages);
50 }
51 
52 static int fake_get_pages(struct drm_i915_gem_object *obj)
53 {
54 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
55 #define PFN_BIAS 0x1000
56 	struct sg_table *pages;
57 	struct scatterlist *sg;
58 	unsigned int sg_page_sizes;
59 	typeof(obj->base.size) rem;
60 
61 	pages = kmalloc(sizeof(*pages), GFP);
62 	if (!pages)
63 		return -ENOMEM;
64 
65 	rem = round_up(obj->base.size, BIT(31)) >> 31;
66 	if (sg_alloc_table(pages, rem, GFP)) {
67 		kfree(pages);
68 		return -ENOMEM;
69 	}
70 
71 	sg_page_sizes = 0;
72 	rem = obj->base.size;
73 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
74 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
75 
76 		GEM_BUG_ON(!len);
77 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
78 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
79 		sg_dma_len(sg) = len;
80 		sg_page_sizes |= len;
81 
82 		rem -= len;
83 	}
84 	GEM_BUG_ON(rem);
85 
86 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
87 
88 	return 0;
89 #undef GFP
90 }
91 
92 static void fake_put_pages(struct drm_i915_gem_object *obj,
93 			   struct sg_table *pages)
94 {
95 	fake_free_pages(obj, pages);
96 	obj->mm.dirty = false;
97 }
98 
99 static const struct drm_i915_gem_object_ops fake_ops = {
100 	.name = "fake-gem",
101 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
102 	.get_pages = fake_get_pages,
103 	.put_pages = fake_put_pages,
104 };
105 
106 static struct drm_i915_gem_object *
107 fake_dma_object(struct drm_i915_private *i915, u64 size)
108 {
109 	static struct lock_class_key lock_class;
110 	struct drm_i915_gem_object *obj;
111 
112 	GEM_BUG_ON(!size);
113 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
114 
115 	if (overflows_type(size, obj->base.size))
116 		return ERR_PTR(-E2BIG);
117 
118 	obj = i915_gem_object_alloc();
119 	if (!obj)
120 		goto err;
121 
122 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
123 	i915_gem_object_init(obj, &fake_ops, &lock_class);
124 
125 	i915_gem_object_set_volatile(obj);
126 
127 	obj->write_domain = I915_GEM_DOMAIN_CPU;
128 	obj->read_domains = I915_GEM_DOMAIN_CPU;
129 	obj->cache_level = I915_CACHE_NONE;
130 
131 	/* Preallocate the "backing storage" */
132 	if (i915_gem_object_pin_pages(obj))
133 		goto err_obj;
134 
135 	i915_gem_object_unpin_pages(obj);
136 	return obj;
137 
138 err_obj:
139 	i915_gem_object_put(obj);
140 err:
141 	return ERR_PTR(-ENOMEM);
142 }
143 
144 static int igt_ppgtt_alloc(void *arg)
145 {
146 	struct drm_i915_private *dev_priv = arg;
147 	struct i915_ppgtt *ppgtt;
148 	u64 size, last, limit;
149 	int err = 0;
150 
151 	/* Allocate a ppggt and try to fill the entire range */
152 
153 	if (!HAS_PPGTT(dev_priv))
154 		return 0;
155 
156 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
157 	if (IS_ERR(ppgtt))
158 		return PTR_ERR(ppgtt);
159 
160 	if (!ppgtt->vm.allocate_va_range)
161 		goto err_ppgtt_cleanup;
162 
163 	/*
164 	 * While we only allocate the page tables here and so we could
165 	 * address a much larger GTT than we could actually fit into
166 	 * RAM, a practical limit is the amount of physical pages in the system.
167 	 * This should ensure that we do not run into the oomkiller during
168 	 * the test and take down the machine wilfully.
169 	 */
170 	limit = totalram_pages() << PAGE_SHIFT;
171 	limit = min(ppgtt->vm.total, limit);
172 
173 	/* Check we can allocate the entire range */
174 	for (size = 4096; size <= limit; size <<= 2) {
175 		err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
176 		if (err) {
177 			if (err == -ENOMEM) {
178 				pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
179 					size, ilog2(size));
180 				err = 0; /* virtual space too large! */
181 			}
182 			goto err_ppgtt_cleanup;
183 		}
184 
185 		cond_resched();
186 
187 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
188 	}
189 
190 	/* Check we can incrementally allocate the entire range */
191 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
192 		err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
193 						  last, size - last);
194 		if (err) {
195 			if (err == -ENOMEM) {
196 				pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
197 					last, size - last, ilog2(size));
198 				err = 0; /* virtual space too large! */
199 			}
200 			goto err_ppgtt_cleanup;
201 		}
202 
203 		cond_resched();
204 	}
205 
206 err_ppgtt_cleanup:
207 	i915_vm_put(&ppgtt->vm);
208 	return err;
209 }
210 
211 static int lowlevel_hole(struct i915_address_space *vm,
212 			 u64 hole_start, u64 hole_end,
213 			 unsigned long end_time)
214 {
215 	I915_RND_STATE(seed_prng);
216 	struct i915_vma *mock_vma;
217 	unsigned int size;
218 
219 	mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
220 	if (!mock_vma)
221 		return -ENOMEM;
222 
223 	/* Keep creating larger objects until one cannot fit into the hole */
224 	for (size = 12; (hole_end - hole_start) >> size; size++) {
225 		I915_RND_SUBSTATE(prng, seed_prng);
226 		struct drm_i915_gem_object *obj;
227 		unsigned int *order, count, n;
228 		u64 hole_size;
229 
230 		hole_size = (hole_end - hole_start) >> size;
231 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
232 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
233 		count = hole_size >> 1;
234 		if (!count) {
235 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
236 				 __func__, hole_start, hole_end, size, hole_size);
237 			break;
238 		}
239 
240 		do {
241 			order = i915_random_order(count, &prng);
242 			if (order)
243 				break;
244 		} while (count >>= 1);
245 		if (!count) {
246 			kfree(mock_vma);
247 			return -ENOMEM;
248 		}
249 		GEM_BUG_ON(!order);
250 
251 		GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
252 		GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
253 
254 		/* Ignore allocation failures (i.e. don't report them as
255 		 * a test failure) as we are purposefully allocating very
256 		 * large objects without checking that we have sufficient
257 		 * memory. We expect to hit -ENOMEM.
258 		 */
259 
260 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
261 		if (IS_ERR(obj)) {
262 			kfree(order);
263 			break;
264 		}
265 
266 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
267 
268 		if (i915_gem_object_pin_pages(obj)) {
269 			i915_gem_object_put(obj);
270 			kfree(order);
271 			break;
272 		}
273 
274 		for (n = 0; n < count; n++) {
275 			u64 addr = hole_start + order[n] * BIT_ULL(size);
276 			intel_wakeref_t wakeref;
277 
278 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
279 
280 			if (igt_timeout(end_time,
281 					"%s timed out before %d/%d\n",
282 					__func__, n, count)) {
283 				hole_end = hole_start; /* quit */
284 				break;
285 			}
286 
287 			if (vm->allocate_va_range &&
288 			    vm->allocate_va_range(vm, addr, BIT_ULL(size)))
289 				break;
290 
291 			mock_vma->pages = obj->mm.pages;
292 			mock_vma->node.size = BIT_ULL(size);
293 			mock_vma->node.start = addr;
294 
295 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
296 				vm->insert_entries(vm, mock_vma,
297 						   I915_CACHE_NONE, 0);
298 		}
299 		count = n;
300 
301 		i915_random_reorder(order, count, &prng);
302 		for (n = 0; n < count; n++) {
303 			u64 addr = hole_start + order[n] * BIT_ULL(size);
304 			intel_wakeref_t wakeref;
305 
306 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
307 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
308 				vm->clear_range(vm, addr, BIT_ULL(size));
309 		}
310 
311 		i915_gem_object_unpin_pages(obj);
312 		i915_gem_object_put(obj);
313 
314 		kfree(order);
315 
316 		cleanup_freed_objects(vm->i915);
317 	}
318 
319 	kfree(mock_vma);
320 	return 0;
321 }
322 
323 static void close_object_list(struct list_head *objects,
324 			      struct i915_address_space *vm)
325 {
326 	struct drm_i915_gem_object *obj, *on;
327 	int ignored;
328 
329 	list_for_each_entry_safe(obj, on, objects, st_link) {
330 		struct i915_vma *vma;
331 
332 		vma = i915_vma_instance(obj, vm, NULL);
333 		if (!IS_ERR(vma))
334 			ignored = i915_vma_unbind(vma);
335 
336 		list_del(&obj->st_link);
337 		i915_gem_object_put(obj);
338 	}
339 }
340 
341 static int fill_hole(struct i915_address_space *vm,
342 		     u64 hole_start, u64 hole_end,
343 		     unsigned long end_time)
344 {
345 	const u64 hole_size = hole_end - hole_start;
346 	struct drm_i915_gem_object *obj;
347 	const unsigned long max_pages =
348 		min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
349 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
350 	unsigned long npages, prime, flags;
351 	struct i915_vma *vma;
352 	LIST_HEAD(objects);
353 	int err;
354 
355 	/* Try binding many VMA working inwards from either edge */
356 
357 	flags = PIN_OFFSET_FIXED | PIN_USER;
358 	if (i915_is_ggtt(vm))
359 		flags |= PIN_GLOBAL;
360 
361 	for_each_prime_number_from(prime, 2, max_step) {
362 		for (npages = 1; npages <= max_pages; npages *= prime) {
363 			const u64 full_size = npages << PAGE_SHIFT;
364 			const struct {
365 				const char *name;
366 				u64 offset;
367 				int step;
368 			} phases[] = {
369 				{ "top-down", hole_end, -1, },
370 				{ "bottom-up", hole_start, 1, },
371 				{ }
372 			}, *p;
373 
374 			obj = fake_dma_object(vm->i915, full_size);
375 			if (IS_ERR(obj))
376 				break;
377 
378 			list_add(&obj->st_link, &objects);
379 
380 			/* Align differing sized objects against the edges, and
381 			 * check we don't walk off into the void when binding
382 			 * them into the GTT.
383 			 */
384 			for (p = phases; p->name; p++) {
385 				u64 offset;
386 
387 				offset = p->offset;
388 				list_for_each_entry(obj, &objects, st_link) {
389 					vma = i915_vma_instance(obj, vm, NULL);
390 					if (IS_ERR(vma))
391 						continue;
392 
393 					if (p->step < 0) {
394 						if (offset < hole_start + obj->base.size)
395 							break;
396 						offset -= obj->base.size;
397 					}
398 
399 					err = i915_vma_pin(vma, 0, 0, offset | flags);
400 					if (err) {
401 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
402 						       __func__, p->name, err, npages, prime, offset);
403 						goto err;
404 					}
405 
406 					if (!drm_mm_node_allocated(&vma->node) ||
407 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
408 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
409 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
410 						       offset);
411 						err = -EINVAL;
412 						goto err;
413 					}
414 
415 					i915_vma_unpin(vma);
416 
417 					if (p->step > 0) {
418 						if (offset + obj->base.size > hole_end)
419 							break;
420 						offset += obj->base.size;
421 					}
422 				}
423 
424 				offset = p->offset;
425 				list_for_each_entry(obj, &objects, st_link) {
426 					vma = i915_vma_instance(obj, vm, NULL);
427 					if (IS_ERR(vma))
428 						continue;
429 
430 					if (p->step < 0) {
431 						if (offset < hole_start + obj->base.size)
432 							break;
433 						offset -= obj->base.size;
434 					}
435 
436 					if (!drm_mm_node_allocated(&vma->node) ||
437 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
438 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
439 						       __func__, p->name, vma->node.start, vma->node.size,
440 						       offset);
441 						err = -EINVAL;
442 						goto err;
443 					}
444 
445 					err = i915_vma_unbind(vma);
446 					if (err) {
447 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
448 						       __func__, p->name, vma->node.start, vma->node.size,
449 						       err);
450 						goto err;
451 					}
452 
453 					if (p->step > 0) {
454 						if (offset + obj->base.size > hole_end)
455 							break;
456 						offset += obj->base.size;
457 					}
458 				}
459 
460 				offset = p->offset;
461 				list_for_each_entry_reverse(obj, &objects, st_link) {
462 					vma = i915_vma_instance(obj, vm, NULL);
463 					if (IS_ERR(vma))
464 						continue;
465 
466 					if (p->step < 0) {
467 						if (offset < hole_start + obj->base.size)
468 							break;
469 						offset -= obj->base.size;
470 					}
471 
472 					err = i915_vma_pin(vma, 0, 0, offset | flags);
473 					if (err) {
474 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
475 						       __func__, p->name, err, npages, prime, offset);
476 						goto err;
477 					}
478 
479 					if (!drm_mm_node_allocated(&vma->node) ||
480 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
481 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
482 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
483 						       offset);
484 						err = -EINVAL;
485 						goto err;
486 					}
487 
488 					i915_vma_unpin(vma);
489 
490 					if (p->step > 0) {
491 						if (offset + obj->base.size > hole_end)
492 							break;
493 						offset += obj->base.size;
494 					}
495 				}
496 
497 				offset = p->offset;
498 				list_for_each_entry_reverse(obj, &objects, st_link) {
499 					vma = i915_vma_instance(obj, vm, NULL);
500 					if (IS_ERR(vma))
501 						continue;
502 
503 					if (p->step < 0) {
504 						if (offset < hole_start + obj->base.size)
505 							break;
506 						offset -= obj->base.size;
507 					}
508 
509 					if (!drm_mm_node_allocated(&vma->node) ||
510 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
511 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
512 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
513 						       offset);
514 						err = -EINVAL;
515 						goto err;
516 					}
517 
518 					err = i915_vma_unbind(vma);
519 					if (err) {
520 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
521 						       __func__, p->name, vma->node.start, vma->node.size,
522 						       err);
523 						goto err;
524 					}
525 
526 					if (p->step > 0) {
527 						if (offset + obj->base.size > hole_end)
528 							break;
529 						offset += obj->base.size;
530 					}
531 				}
532 			}
533 
534 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
535 					__func__, npages, prime)) {
536 				err = -EINTR;
537 				goto err;
538 			}
539 		}
540 
541 		close_object_list(&objects, vm);
542 		cleanup_freed_objects(vm->i915);
543 	}
544 
545 	return 0;
546 
547 err:
548 	close_object_list(&objects, vm);
549 	return err;
550 }
551 
552 static int walk_hole(struct i915_address_space *vm,
553 		     u64 hole_start, u64 hole_end,
554 		     unsigned long end_time)
555 {
556 	const u64 hole_size = hole_end - hole_start;
557 	const unsigned long max_pages =
558 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
559 	unsigned long flags;
560 	u64 size;
561 
562 	/* Try binding a single VMA in different positions within the hole */
563 
564 	flags = PIN_OFFSET_FIXED | PIN_USER;
565 	if (i915_is_ggtt(vm))
566 		flags |= PIN_GLOBAL;
567 
568 	for_each_prime_number_from(size, 1, max_pages) {
569 		struct drm_i915_gem_object *obj;
570 		struct i915_vma *vma;
571 		u64 addr;
572 		int err = 0;
573 
574 		obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
575 		if (IS_ERR(obj))
576 			break;
577 
578 		vma = i915_vma_instance(obj, vm, NULL);
579 		if (IS_ERR(vma)) {
580 			err = PTR_ERR(vma);
581 			goto err_put;
582 		}
583 
584 		for (addr = hole_start;
585 		     addr + obj->base.size < hole_end;
586 		     addr += obj->base.size) {
587 			err = i915_vma_pin(vma, 0, 0, addr | flags);
588 			if (err) {
589 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
590 				       __func__, addr, vma->size,
591 				       hole_start, hole_end, err);
592 				goto err_put;
593 			}
594 			i915_vma_unpin(vma);
595 
596 			if (!drm_mm_node_allocated(&vma->node) ||
597 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
598 				pr_err("%s incorrect at %llx + %llx\n",
599 				       __func__, addr, vma->size);
600 				err = -EINVAL;
601 				goto err_put;
602 			}
603 
604 			err = i915_vma_unbind(vma);
605 			if (err) {
606 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
607 				       __func__, addr, vma->size, err);
608 				goto err_put;
609 			}
610 
611 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
612 
613 			if (igt_timeout(end_time,
614 					"%s timed out at %llx\n",
615 					__func__, addr)) {
616 				err = -EINTR;
617 				goto err_put;
618 			}
619 		}
620 
621 err_put:
622 		i915_gem_object_put(obj);
623 		if (err)
624 			return err;
625 
626 		cleanup_freed_objects(vm->i915);
627 	}
628 
629 	return 0;
630 }
631 
632 static int pot_hole(struct i915_address_space *vm,
633 		    u64 hole_start, u64 hole_end,
634 		    unsigned long end_time)
635 {
636 	struct drm_i915_gem_object *obj;
637 	struct i915_vma *vma;
638 	unsigned long flags;
639 	unsigned int pot;
640 	int err = 0;
641 
642 	flags = PIN_OFFSET_FIXED | PIN_USER;
643 	if (i915_is_ggtt(vm))
644 		flags |= PIN_GLOBAL;
645 
646 	obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
647 	if (IS_ERR(obj))
648 		return PTR_ERR(obj);
649 
650 	vma = i915_vma_instance(obj, vm, NULL);
651 	if (IS_ERR(vma)) {
652 		err = PTR_ERR(vma);
653 		goto err_obj;
654 	}
655 
656 	/* Insert a pair of pages across every pot boundary within the hole */
657 	for (pot = fls64(hole_end - 1) - 1;
658 	     pot > ilog2(2 * I915_GTT_PAGE_SIZE);
659 	     pot--) {
660 		u64 step = BIT_ULL(pot);
661 		u64 addr;
662 
663 		for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
664 		     addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
665 		     addr += step) {
666 			err = i915_vma_pin(vma, 0, 0, addr | flags);
667 			if (err) {
668 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
669 				       __func__,
670 				       addr,
671 				       hole_start, hole_end,
672 				       err);
673 				goto err_obj;
674 			}
675 
676 			if (!drm_mm_node_allocated(&vma->node) ||
677 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
678 				pr_err("%s incorrect at %llx + %llx\n",
679 				       __func__, addr, vma->size);
680 				i915_vma_unpin(vma);
681 				err = i915_vma_unbind(vma);
682 				err = -EINVAL;
683 				goto err_obj;
684 			}
685 
686 			i915_vma_unpin(vma);
687 			err = i915_vma_unbind(vma);
688 			GEM_BUG_ON(err);
689 		}
690 
691 		if (igt_timeout(end_time,
692 				"%s timed out after %d/%d\n",
693 				__func__, pot, fls64(hole_end - 1) - 1)) {
694 			err = -EINTR;
695 			goto err_obj;
696 		}
697 	}
698 
699 err_obj:
700 	i915_gem_object_put(obj);
701 	return err;
702 }
703 
704 static int drunk_hole(struct i915_address_space *vm,
705 		      u64 hole_start, u64 hole_end,
706 		      unsigned long end_time)
707 {
708 	I915_RND_STATE(prng);
709 	unsigned int size;
710 	unsigned long flags;
711 
712 	flags = PIN_OFFSET_FIXED | PIN_USER;
713 	if (i915_is_ggtt(vm))
714 		flags |= PIN_GLOBAL;
715 
716 	/* Keep creating larger objects until one cannot fit into the hole */
717 	for (size = 12; (hole_end - hole_start) >> size; size++) {
718 		struct drm_i915_gem_object *obj;
719 		unsigned int *order, count, n;
720 		struct i915_vma *vma;
721 		u64 hole_size;
722 		int err = -ENODEV;
723 
724 		hole_size = (hole_end - hole_start) >> size;
725 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
726 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
727 		count = hole_size >> 1;
728 		if (!count) {
729 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
730 				 __func__, hole_start, hole_end, size, hole_size);
731 			break;
732 		}
733 
734 		do {
735 			order = i915_random_order(count, &prng);
736 			if (order)
737 				break;
738 		} while (count >>= 1);
739 		if (!count)
740 			return -ENOMEM;
741 		GEM_BUG_ON(!order);
742 
743 		/* Ignore allocation failures (i.e. don't report them as
744 		 * a test failure) as we are purposefully allocating very
745 		 * large objects without checking that we have sufficient
746 		 * memory. We expect to hit -ENOMEM.
747 		 */
748 
749 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
750 		if (IS_ERR(obj)) {
751 			kfree(order);
752 			break;
753 		}
754 
755 		vma = i915_vma_instance(obj, vm, NULL);
756 		if (IS_ERR(vma)) {
757 			err = PTR_ERR(vma);
758 			goto err_obj;
759 		}
760 
761 		GEM_BUG_ON(vma->size != BIT_ULL(size));
762 
763 		for (n = 0; n < count; n++) {
764 			u64 addr = hole_start + order[n] * BIT_ULL(size);
765 
766 			err = i915_vma_pin(vma, 0, 0, addr | flags);
767 			if (err) {
768 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
769 				       __func__,
770 				       addr, BIT_ULL(size),
771 				       hole_start, hole_end,
772 				       err);
773 				goto err_obj;
774 			}
775 
776 			if (!drm_mm_node_allocated(&vma->node) ||
777 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
778 				pr_err("%s incorrect at %llx + %llx\n",
779 				       __func__, addr, BIT_ULL(size));
780 				i915_vma_unpin(vma);
781 				err = i915_vma_unbind(vma);
782 				err = -EINVAL;
783 				goto err_obj;
784 			}
785 
786 			i915_vma_unpin(vma);
787 			err = i915_vma_unbind(vma);
788 			GEM_BUG_ON(err);
789 
790 			if (igt_timeout(end_time,
791 					"%s timed out after %d/%d\n",
792 					__func__, n, count)) {
793 				err = -EINTR;
794 				goto err_obj;
795 			}
796 		}
797 
798 err_obj:
799 		i915_gem_object_put(obj);
800 		kfree(order);
801 		if (err)
802 			return err;
803 
804 		cleanup_freed_objects(vm->i915);
805 	}
806 
807 	return 0;
808 }
809 
810 static int __shrink_hole(struct i915_address_space *vm,
811 			 u64 hole_start, u64 hole_end,
812 			 unsigned long end_time)
813 {
814 	struct drm_i915_gem_object *obj;
815 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
816 	unsigned int order = 12;
817 	LIST_HEAD(objects);
818 	int err = 0;
819 	u64 addr;
820 
821 	/* Keep creating larger objects until one cannot fit into the hole */
822 	for (addr = hole_start; addr < hole_end; ) {
823 		struct i915_vma *vma;
824 		u64 size = BIT_ULL(order++);
825 
826 		size = min(size, hole_end - addr);
827 		obj = fake_dma_object(vm->i915, size);
828 		if (IS_ERR(obj)) {
829 			err = PTR_ERR(obj);
830 			break;
831 		}
832 
833 		list_add(&obj->st_link, &objects);
834 
835 		vma = i915_vma_instance(obj, vm, NULL);
836 		if (IS_ERR(vma)) {
837 			err = PTR_ERR(vma);
838 			break;
839 		}
840 
841 		GEM_BUG_ON(vma->size != size);
842 
843 		err = i915_vma_pin(vma, 0, 0, addr | flags);
844 		if (err) {
845 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
846 			       __func__, addr, size, hole_start, hole_end, err);
847 			break;
848 		}
849 
850 		if (!drm_mm_node_allocated(&vma->node) ||
851 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
852 			pr_err("%s incorrect at %llx + %llx\n",
853 			       __func__, addr, size);
854 			i915_vma_unpin(vma);
855 			err = i915_vma_unbind(vma);
856 			err = -EINVAL;
857 			break;
858 		}
859 
860 		i915_vma_unpin(vma);
861 		addr += size;
862 
863 		/*
864 		 * Since we are injecting allocation faults at random intervals,
865 		 * wait for this allocation to complete before we change the
866 		 * faultinjection.
867 		 */
868 		err = i915_vma_sync(vma);
869 		if (err)
870 			break;
871 
872 		if (igt_timeout(end_time,
873 				"%s timed out at ofset %llx [%llx - %llx]\n",
874 				__func__, addr, hole_start, hole_end)) {
875 			err = -EINTR;
876 			break;
877 		}
878 	}
879 
880 	close_object_list(&objects, vm);
881 	cleanup_freed_objects(vm->i915);
882 	return err;
883 }
884 
885 static int shrink_hole(struct i915_address_space *vm,
886 		       u64 hole_start, u64 hole_end,
887 		       unsigned long end_time)
888 {
889 	unsigned long prime;
890 	int err;
891 
892 	vm->fault_attr.probability = 999;
893 	atomic_set(&vm->fault_attr.times, -1);
894 
895 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
896 		vm->fault_attr.interval = prime;
897 		err = __shrink_hole(vm, hole_start, hole_end, end_time);
898 		if (err)
899 			break;
900 	}
901 
902 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
903 
904 	return err;
905 }
906 
907 static int shrink_boom(struct i915_address_space *vm,
908 		       u64 hole_start, u64 hole_end,
909 		       unsigned long end_time)
910 {
911 	unsigned int sizes[] = { SZ_2M, SZ_1G };
912 	struct drm_i915_gem_object *purge;
913 	struct drm_i915_gem_object *explode;
914 	int err;
915 	int i;
916 
917 	/*
918 	 * Catch the case which shrink_hole seems to miss. The setup here
919 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
920 	 * ensuring that all vma assiocated with the respective pd/pdp are
921 	 * unpinned at the time.
922 	 */
923 
924 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
925 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
926 		unsigned int size = sizes[i];
927 		struct i915_vma *vma;
928 
929 		purge = fake_dma_object(vm->i915, size);
930 		if (IS_ERR(purge))
931 			return PTR_ERR(purge);
932 
933 		vma = i915_vma_instance(purge, vm, NULL);
934 		if (IS_ERR(vma)) {
935 			err = PTR_ERR(vma);
936 			goto err_purge;
937 		}
938 
939 		err = i915_vma_pin(vma, 0, 0, flags);
940 		if (err)
941 			goto err_purge;
942 
943 		/* Should now be ripe for purging */
944 		i915_vma_unpin(vma);
945 
946 		explode = fake_dma_object(vm->i915, size);
947 		if (IS_ERR(explode)) {
948 			err = PTR_ERR(explode);
949 			goto err_purge;
950 		}
951 
952 		vm->fault_attr.probability = 100;
953 		vm->fault_attr.interval = 1;
954 		atomic_set(&vm->fault_attr.times, -1);
955 
956 		vma = i915_vma_instance(explode, vm, NULL);
957 		if (IS_ERR(vma)) {
958 			err = PTR_ERR(vma);
959 			goto err_explode;
960 		}
961 
962 		err = i915_vma_pin(vma, 0, 0, flags | size);
963 		if (err)
964 			goto err_explode;
965 
966 		i915_vma_unpin(vma);
967 
968 		i915_gem_object_put(purge);
969 		i915_gem_object_put(explode);
970 
971 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
972 		cleanup_freed_objects(vm->i915);
973 	}
974 
975 	return 0;
976 
977 err_explode:
978 	i915_gem_object_put(explode);
979 err_purge:
980 	i915_gem_object_put(purge);
981 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
982 	return err;
983 }
984 
985 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
986 			  int (*func)(struct i915_address_space *vm,
987 				      u64 hole_start, u64 hole_end,
988 				      unsigned long end_time))
989 {
990 	struct i915_ppgtt *ppgtt;
991 	IGT_TIMEOUT(end_time);
992 	struct file *file;
993 	int err;
994 
995 	if (!HAS_FULL_PPGTT(dev_priv))
996 		return 0;
997 
998 	file = mock_file(dev_priv);
999 	if (IS_ERR(file))
1000 		return PTR_ERR(file);
1001 
1002 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
1003 	if (IS_ERR(ppgtt)) {
1004 		err = PTR_ERR(ppgtt);
1005 		goto out_free;
1006 	}
1007 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1008 	GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1009 
1010 	err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1011 
1012 	i915_vm_put(&ppgtt->vm);
1013 
1014 out_free:
1015 	fput(file);
1016 	return err;
1017 }
1018 
1019 static int igt_ppgtt_fill(void *arg)
1020 {
1021 	return exercise_ppgtt(arg, fill_hole);
1022 }
1023 
1024 static int igt_ppgtt_walk(void *arg)
1025 {
1026 	return exercise_ppgtt(arg, walk_hole);
1027 }
1028 
1029 static int igt_ppgtt_pot(void *arg)
1030 {
1031 	return exercise_ppgtt(arg, pot_hole);
1032 }
1033 
1034 static int igt_ppgtt_drunk(void *arg)
1035 {
1036 	return exercise_ppgtt(arg, drunk_hole);
1037 }
1038 
1039 static int igt_ppgtt_lowlevel(void *arg)
1040 {
1041 	return exercise_ppgtt(arg, lowlevel_hole);
1042 }
1043 
1044 static int igt_ppgtt_shrink(void *arg)
1045 {
1046 	return exercise_ppgtt(arg, shrink_hole);
1047 }
1048 
1049 static int igt_ppgtt_shrink_boom(void *arg)
1050 {
1051 	return exercise_ppgtt(arg, shrink_boom);
1052 }
1053 
1054 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1055 {
1056 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1057 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1058 
1059 	if (a->start < b->start)
1060 		return -1;
1061 	else
1062 		return 1;
1063 }
1064 
1065 static int exercise_ggtt(struct drm_i915_private *i915,
1066 			 int (*func)(struct i915_address_space *vm,
1067 				     u64 hole_start, u64 hole_end,
1068 				     unsigned long end_time))
1069 {
1070 	struct i915_ggtt *ggtt = &i915->ggtt;
1071 	u64 hole_start, hole_end, last = 0;
1072 	struct drm_mm_node *node;
1073 	IGT_TIMEOUT(end_time);
1074 	int err = 0;
1075 
1076 restart:
1077 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1078 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1079 		if (hole_start < last)
1080 			continue;
1081 
1082 		if (ggtt->vm.mm.color_adjust)
1083 			ggtt->vm.mm.color_adjust(node, 0,
1084 						 &hole_start, &hole_end);
1085 		if (hole_start >= hole_end)
1086 			continue;
1087 
1088 		err = func(&ggtt->vm, hole_start, hole_end, end_time);
1089 		if (err)
1090 			break;
1091 
1092 		/* As we have manipulated the drm_mm, the list may be corrupt */
1093 		last = hole_end;
1094 		goto restart;
1095 	}
1096 
1097 	return err;
1098 }
1099 
1100 static int igt_ggtt_fill(void *arg)
1101 {
1102 	return exercise_ggtt(arg, fill_hole);
1103 }
1104 
1105 static int igt_ggtt_walk(void *arg)
1106 {
1107 	return exercise_ggtt(arg, walk_hole);
1108 }
1109 
1110 static int igt_ggtt_pot(void *arg)
1111 {
1112 	return exercise_ggtt(arg, pot_hole);
1113 }
1114 
1115 static int igt_ggtt_drunk(void *arg)
1116 {
1117 	return exercise_ggtt(arg, drunk_hole);
1118 }
1119 
1120 static int igt_ggtt_lowlevel(void *arg)
1121 {
1122 	return exercise_ggtt(arg, lowlevel_hole);
1123 }
1124 
1125 static int igt_ggtt_page(void *arg)
1126 {
1127 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1128 	I915_RND_STATE(prng);
1129 	struct drm_i915_private *i915 = arg;
1130 	struct i915_ggtt *ggtt = &i915->ggtt;
1131 	struct drm_i915_gem_object *obj;
1132 	intel_wakeref_t wakeref;
1133 	struct drm_mm_node tmp;
1134 	unsigned int *order, n;
1135 	int err;
1136 
1137 	if (!i915_ggtt_has_aperture(ggtt))
1138 		return 0;
1139 
1140 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1141 	if (IS_ERR(obj))
1142 		return PTR_ERR(obj);
1143 
1144 	err = i915_gem_object_pin_pages(obj);
1145 	if (err)
1146 		goto out_free;
1147 
1148 	memset(&tmp, 0, sizeof(tmp));
1149 	mutex_lock(&ggtt->vm.mutex);
1150 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1151 					  count * PAGE_SIZE, 0,
1152 					  I915_COLOR_UNEVICTABLE,
1153 					  0, ggtt->mappable_end,
1154 					  DRM_MM_INSERT_LOW);
1155 	mutex_unlock(&ggtt->vm.mutex);
1156 	if (err)
1157 		goto out_unpin;
1158 
1159 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1160 
1161 	for (n = 0; n < count; n++) {
1162 		u64 offset = tmp.start + n * PAGE_SIZE;
1163 
1164 		ggtt->vm.insert_page(&ggtt->vm,
1165 				     i915_gem_object_get_dma_address(obj, 0),
1166 				     offset, I915_CACHE_NONE, 0);
1167 	}
1168 
1169 	order = i915_random_order(count, &prng);
1170 	if (!order) {
1171 		err = -ENOMEM;
1172 		goto out_remove;
1173 	}
1174 
1175 	for (n = 0; n < count; n++) {
1176 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1177 		u32 __iomem *vaddr;
1178 
1179 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1180 		iowrite32(n, vaddr + n);
1181 		io_mapping_unmap_atomic(vaddr);
1182 	}
1183 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1184 
1185 	i915_random_reorder(order, count, &prng);
1186 	for (n = 0; n < count; n++) {
1187 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1188 		u32 __iomem *vaddr;
1189 		u32 val;
1190 
1191 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1192 		val = ioread32(vaddr + n);
1193 		io_mapping_unmap_atomic(vaddr);
1194 
1195 		if (val != n) {
1196 			pr_err("insert page failed: found %d, expected %d\n",
1197 			       val, n);
1198 			err = -EINVAL;
1199 			break;
1200 		}
1201 	}
1202 
1203 	kfree(order);
1204 out_remove:
1205 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1206 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1207 	mutex_lock(&ggtt->vm.mutex);
1208 	drm_mm_remove_node(&tmp);
1209 	mutex_unlock(&ggtt->vm.mutex);
1210 out_unpin:
1211 	i915_gem_object_unpin_pages(obj);
1212 out_free:
1213 	i915_gem_object_put(obj);
1214 	return err;
1215 }
1216 
1217 static void track_vma_bind(struct i915_vma *vma)
1218 {
1219 	struct drm_i915_gem_object *obj = vma->obj;
1220 
1221 	__i915_gem_object_pin_pages(obj);
1222 
1223 	GEM_BUG_ON(vma->pages);
1224 	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1225 	__i915_gem_object_pin_pages(obj);
1226 	vma->pages = obj->mm.pages;
1227 
1228 	mutex_lock(&vma->vm->mutex);
1229 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1230 	mutex_unlock(&vma->vm->mutex);
1231 }
1232 
1233 static int exercise_mock(struct drm_i915_private *i915,
1234 			 int (*func)(struct i915_address_space *vm,
1235 				     u64 hole_start, u64 hole_end,
1236 				     unsigned long end_time))
1237 {
1238 	const u64 limit = totalram_pages() << PAGE_SHIFT;
1239 	struct i915_address_space *vm;
1240 	struct i915_gem_context *ctx;
1241 	IGT_TIMEOUT(end_time);
1242 	int err;
1243 
1244 	ctx = mock_context(i915, "mock");
1245 	if (!ctx)
1246 		return -ENOMEM;
1247 
1248 	vm = i915_gem_context_get_vm_rcu(ctx);
1249 	err = func(vm, 0, min(vm->total, limit), end_time);
1250 	i915_vm_put(vm);
1251 
1252 	mock_context_close(ctx);
1253 	return err;
1254 }
1255 
1256 static int igt_mock_fill(void *arg)
1257 {
1258 	struct i915_ggtt *ggtt = arg;
1259 
1260 	return exercise_mock(ggtt->vm.i915, fill_hole);
1261 }
1262 
1263 static int igt_mock_walk(void *arg)
1264 {
1265 	struct i915_ggtt *ggtt = arg;
1266 
1267 	return exercise_mock(ggtt->vm.i915, walk_hole);
1268 }
1269 
1270 static int igt_mock_pot(void *arg)
1271 {
1272 	struct i915_ggtt *ggtt = arg;
1273 
1274 	return exercise_mock(ggtt->vm.i915, pot_hole);
1275 }
1276 
1277 static int igt_mock_drunk(void *arg)
1278 {
1279 	struct i915_ggtt *ggtt = arg;
1280 
1281 	return exercise_mock(ggtt->vm.i915, drunk_hole);
1282 }
1283 
1284 static int igt_gtt_reserve(void *arg)
1285 {
1286 	struct i915_ggtt *ggtt = arg;
1287 	struct drm_i915_gem_object *obj, *on;
1288 	I915_RND_STATE(prng);
1289 	LIST_HEAD(objects);
1290 	u64 total;
1291 	int err = -ENODEV;
1292 
1293 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1294 	 * for the node, and evicts if it has to. So our test checks that
1295 	 * it can give us the requsted space and prevent overlaps.
1296 	 */
1297 
1298 	/* Start by filling the GGTT */
1299 	for (total = 0;
1300 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1301 	     total += 2 * I915_GTT_PAGE_SIZE) {
1302 		struct i915_vma *vma;
1303 
1304 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1305 						      2 * PAGE_SIZE);
1306 		if (IS_ERR(obj)) {
1307 			err = PTR_ERR(obj);
1308 			goto out;
1309 		}
1310 
1311 		err = i915_gem_object_pin_pages(obj);
1312 		if (err) {
1313 			i915_gem_object_put(obj);
1314 			goto out;
1315 		}
1316 
1317 		list_add(&obj->st_link, &objects);
1318 
1319 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1320 		if (IS_ERR(vma)) {
1321 			err = PTR_ERR(vma);
1322 			goto out;
1323 		}
1324 
1325 		mutex_lock(&ggtt->vm.mutex);
1326 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1327 					   obj->base.size,
1328 					   total,
1329 					   obj->cache_level,
1330 					   0);
1331 		mutex_unlock(&ggtt->vm.mutex);
1332 		if (err) {
1333 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1334 			       total, ggtt->vm.total, err);
1335 			goto out;
1336 		}
1337 		track_vma_bind(vma);
1338 
1339 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1340 		if (vma->node.start != total ||
1341 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1342 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1343 			       vma->node.start, vma->node.size,
1344 			       total, 2*I915_GTT_PAGE_SIZE);
1345 			err = -EINVAL;
1346 			goto out;
1347 		}
1348 	}
1349 
1350 	/* Now we start forcing evictions */
1351 	for (total = I915_GTT_PAGE_SIZE;
1352 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1353 	     total += 2 * I915_GTT_PAGE_SIZE) {
1354 		struct i915_vma *vma;
1355 
1356 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1357 						      2 * PAGE_SIZE);
1358 		if (IS_ERR(obj)) {
1359 			err = PTR_ERR(obj);
1360 			goto out;
1361 		}
1362 
1363 		err = i915_gem_object_pin_pages(obj);
1364 		if (err) {
1365 			i915_gem_object_put(obj);
1366 			goto out;
1367 		}
1368 
1369 		list_add(&obj->st_link, &objects);
1370 
1371 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1372 		if (IS_ERR(vma)) {
1373 			err = PTR_ERR(vma);
1374 			goto out;
1375 		}
1376 
1377 		mutex_lock(&ggtt->vm.mutex);
1378 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1379 					   obj->base.size,
1380 					   total,
1381 					   obj->cache_level,
1382 					   0);
1383 		mutex_unlock(&ggtt->vm.mutex);
1384 		if (err) {
1385 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1386 			       total, ggtt->vm.total, err);
1387 			goto out;
1388 		}
1389 		track_vma_bind(vma);
1390 
1391 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1392 		if (vma->node.start != total ||
1393 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1394 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1395 			       vma->node.start, vma->node.size,
1396 			       total, 2*I915_GTT_PAGE_SIZE);
1397 			err = -EINVAL;
1398 			goto out;
1399 		}
1400 	}
1401 
1402 	/* And then try at random */
1403 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1404 		struct i915_vma *vma;
1405 		u64 offset;
1406 
1407 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1408 		if (IS_ERR(vma)) {
1409 			err = PTR_ERR(vma);
1410 			goto out;
1411 		}
1412 
1413 		err = i915_vma_unbind(vma);
1414 		if (err) {
1415 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1416 			goto out;
1417 		}
1418 
1419 		offset = igt_random_offset(&prng,
1420 					   0, ggtt->vm.total,
1421 					   2 * I915_GTT_PAGE_SIZE,
1422 					   I915_GTT_MIN_ALIGNMENT);
1423 
1424 		mutex_lock(&ggtt->vm.mutex);
1425 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1426 					   obj->base.size,
1427 					   offset,
1428 					   obj->cache_level,
1429 					   0);
1430 		mutex_unlock(&ggtt->vm.mutex);
1431 		if (err) {
1432 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1433 			       total, ggtt->vm.total, err);
1434 			goto out;
1435 		}
1436 		track_vma_bind(vma);
1437 
1438 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1439 		if (vma->node.start != offset ||
1440 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1441 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1442 			       vma->node.start, vma->node.size,
1443 			       offset, 2*I915_GTT_PAGE_SIZE);
1444 			err = -EINVAL;
1445 			goto out;
1446 		}
1447 	}
1448 
1449 out:
1450 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1451 		i915_gem_object_unpin_pages(obj);
1452 		i915_gem_object_put(obj);
1453 	}
1454 	return err;
1455 }
1456 
1457 static int igt_gtt_insert(void *arg)
1458 {
1459 	struct i915_ggtt *ggtt = arg;
1460 	struct drm_i915_gem_object *obj, *on;
1461 	struct drm_mm_node tmp = {};
1462 	const struct invalid_insert {
1463 		u64 size;
1464 		u64 alignment;
1465 		u64 start, end;
1466 	} invalid_insert[] = {
1467 		{
1468 			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1469 			0, ggtt->vm.total,
1470 		},
1471 		{
1472 			2*I915_GTT_PAGE_SIZE, 0,
1473 			0, I915_GTT_PAGE_SIZE,
1474 		},
1475 		{
1476 			-(u64)I915_GTT_PAGE_SIZE, 0,
1477 			0, 4*I915_GTT_PAGE_SIZE,
1478 		},
1479 		{
1480 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1481 			0, 4*I915_GTT_PAGE_SIZE,
1482 		},
1483 		{
1484 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1485 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1486 		},
1487 		{}
1488 	}, *ii;
1489 	LIST_HEAD(objects);
1490 	u64 total;
1491 	int err = -ENODEV;
1492 
1493 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1494 	 * to the node, evicting if required.
1495 	 */
1496 
1497 	/* Check a couple of obviously invalid requests */
1498 	for (ii = invalid_insert; ii->size; ii++) {
1499 		mutex_lock(&ggtt->vm.mutex);
1500 		err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1501 					  ii->size, ii->alignment,
1502 					  I915_COLOR_UNEVICTABLE,
1503 					  ii->start, ii->end,
1504 					  0);
1505 		mutex_unlock(&ggtt->vm.mutex);
1506 		if (err != -ENOSPC) {
1507 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1508 			       ii->size, ii->alignment, ii->start, ii->end,
1509 			       err);
1510 			return -EINVAL;
1511 		}
1512 	}
1513 
1514 	/* Start by filling the GGTT */
1515 	for (total = 0;
1516 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1517 	     total += I915_GTT_PAGE_SIZE) {
1518 		struct i915_vma *vma;
1519 
1520 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1521 						      I915_GTT_PAGE_SIZE);
1522 		if (IS_ERR(obj)) {
1523 			err = PTR_ERR(obj);
1524 			goto out;
1525 		}
1526 
1527 		err = i915_gem_object_pin_pages(obj);
1528 		if (err) {
1529 			i915_gem_object_put(obj);
1530 			goto out;
1531 		}
1532 
1533 		list_add(&obj->st_link, &objects);
1534 
1535 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1536 		if (IS_ERR(vma)) {
1537 			err = PTR_ERR(vma);
1538 			goto out;
1539 		}
1540 
1541 		mutex_lock(&ggtt->vm.mutex);
1542 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1543 					  obj->base.size, 0, obj->cache_level,
1544 					  0, ggtt->vm.total,
1545 					  0);
1546 		mutex_unlock(&ggtt->vm.mutex);
1547 		if (err == -ENOSPC) {
1548 			/* maxed out the GGTT space */
1549 			i915_gem_object_put(obj);
1550 			break;
1551 		}
1552 		if (err) {
1553 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1554 			       total, ggtt->vm.total, err);
1555 			goto out;
1556 		}
1557 		track_vma_bind(vma);
1558 		__i915_vma_pin(vma);
1559 
1560 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1561 	}
1562 
1563 	list_for_each_entry(obj, &objects, st_link) {
1564 		struct i915_vma *vma;
1565 
1566 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1567 		if (IS_ERR(vma)) {
1568 			err = PTR_ERR(vma);
1569 			goto out;
1570 		}
1571 
1572 		if (!drm_mm_node_allocated(&vma->node)) {
1573 			pr_err("VMA was unexpectedly evicted!\n");
1574 			err = -EINVAL;
1575 			goto out;
1576 		}
1577 
1578 		__i915_vma_unpin(vma);
1579 	}
1580 
1581 	/* If we then reinsert, we should find the same hole */
1582 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1583 		struct i915_vma *vma;
1584 		u64 offset;
1585 
1586 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1587 		if (IS_ERR(vma)) {
1588 			err = PTR_ERR(vma);
1589 			goto out;
1590 		}
1591 
1592 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1593 		offset = vma->node.start;
1594 
1595 		err = i915_vma_unbind(vma);
1596 		if (err) {
1597 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1598 			goto out;
1599 		}
1600 
1601 		mutex_lock(&ggtt->vm.mutex);
1602 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1603 					  obj->base.size, 0, obj->cache_level,
1604 					  0, ggtt->vm.total,
1605 					  0);
1606 		mutex_unlock(&ggtt->vm.mutex);
1607 		if (err) {
1608 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1609 			       total, ggtt->vm.total, err);
1610 			goto out;
1611 		}
1612 		track_vma_bind(vma);
1613 
1614 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1615 		if (vma->node.start != offset) {
1616 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1617 			       offset, vma->node.start);
1618 			err = -EINVAL;
1619 			goto out;
1620 		}
1621 	}
1622 
1623 	/* And then force evictions */
1624 	for (total = 0;
1625 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1626 	     total += 2 * I915_GTT_PAGE_SIZE) {
1627 		struct i915_vma *vma;
1628 
1629 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1630 						      2 * I915_GTT_PAGE_SIZE);
1631 		if (IS_ERR(obj)) {
1632 			err = PTR_ERR(obj);
1633 			goto out;
1634 		}
1635 
1636 		err = i915_gem_object_pin_pages(obj);
1637 		if (err) {
1638 			i915_gem_object_put(obj);
1639 			goto out;
1640 		}
1641 
1642 		list_add(&obj->st_link, &objects);
1643 
1644 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1645 		if (IS_ERR(vma)) {
1646 			err = PTR_ERR(vma);
1647 			goto out;
1648 		}
1649 
1650 		mutex_lock(&ggtt->vm.mutex);
1651 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1652 					  obj->base.size, 0, obj->cache_level,
1653 					  0, ggtt->vm.total,
1654 					  0);
1655 		mutex_unlock(&ggtt->vm.mutex);
1656 		if (err) {
1657 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1658 			       total, ggtt->vm.total, err);
1659 			goto out;
1660 		}
1661 		track_vma_bind(vma);
1662 
1663 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1664 	}
1665 
1666 out:
1667 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1668 		i915_gem_object_unpin_pages(obj);
1669 		i915_gem_object_put(obj);
1670 	}
1671 	return err;
1672 }
1673 
1674 int i915_gem_gtt_mock_selftests(void)
1675 {
1676 	static const struct i915_subtest tests[] = {
1677 		SUBTEST(igt_mock_drunk),
1678 		SUBTEST(igt_mock_walk),
1679 		SUBTEST(igt_mock_pot),
1680 		SUBTEST(igt_mock_fill),
1681 		SUBTEST(igt_gtt_reserve),
1682 		SUBTEST(igt_gtt_insert),
1683 	};
1684 	struct drm_i915_private *i915;
1685 	struct i915_ggtt *ggtt;
1686 	int err;
1687 
1688 	i915 = mock_gem_device();
1689 	if (!i915)
1690 		return -ENOMEM;
1691 
1692 	ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1693 	if (!ggtt) {
1694 		err = -ENOMEM;
1695 		goto out_put;
1696 	}
1697 	mock_init_ggtt(i915, ggtt);
1698 
1699 	err = i915_subtests(tests, ggtt);
1700 
1701 	mock_device_flush(i915);
1702 	i915_gem_drain_freed_objects(i915);
1703 	mock_fini_ggtt(ggtt);
1704 	kfree(ggtt);
1705 out_put:
1706 	drm_dev_put(&i915->drm);
1707 	return err;
1708 }
1709 
1710 static int context_sync(struct intel_context *ce)
1711 {
1712 	struct i915_request *rq;
1713 	long timeout;
1714 
1715 	rq = intel_context_create_request(ce);
1716 	if (IS_ERR(rq))
1717 		return PTR_ERR(rq);
1718 
1719 	i915_request_get(rq);
1720 	i915_request_add(rq);
1721 
1722 	timeout = i915_request_wait(rq, 0, HZ / 5);
1723 	i915_request_put(rq);
1724 
1725 	return timeout < 0 ? -EIO : 0;
1726 }
1727 
1728 static struct i915_request *
1729 submit_batch(struct intel_context *ce, u64 addr)
1730 {
1731 	struct i915_request *rq;
1732 	int err;
1733 
1734 	rq = intel_context_create_request(ce);
1735 	if (IS_ERR(rq))
1736 		return rq;
1737 
1738 	err = 0;
1739 	if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1740 		err = rq->engine->emit_init_breadcrumb(rq);
1741 	if (err == 0)
1742 		err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1743 
1744 	if (err == 0)
1745 		i915_request_get(rq);
1746 	i915_request_add(rq);
1747 
1748 	return err ? ERR_PTR(err) : rq;
1749 }
1750 
1751 static u32 *spinner(u32 *batch, int i)
1752 {
1753 	return batch + i * 64 / sizeof(*batch) + 4;
1754 }
1755 
1756 static void end_spin(u32 *batch, int i)
1757 {
1758 	*spinner(batch, i) = MI_BATCH_BUFFER_END;
1759 	wmb();
1760 }
1761 
1762 static int igt_cs_tlb(void *arg)
1763 {
1764 	const unsigned int count = PAGE_SIZE / 64;
1765 	const unsigned int chunk_size = count * PAGE_SIZE;
1766 	struct drm_i915_private *i915 = arg;
1767 	struct drm_i915_gem_object *bbe, *act, *out;
1768 	struct i915_gem_engines_iter it;
1769 	struct i915_address_space *vm;
1770 	struct i915_gem_context *ctx;
1771 	struct intel_context *ce;
1772 	struct i915_vma *vma;
1773 	I915_RND_STATE(prng);
1774 	struct file *file;
1775 	unsigned int i;
1776 	u32 *result;
1777 	u32 *batch;
1778 	int err = 0;
1779 
1780 	/*
1781 	 * Our mission here is to fool the hardware to execute something
1782 	 * from scratch as it has not seen the batch move (due to missing
1783 	 * the TLB invalidate).
1784 	 */
1785 
1786 	file = mock_file(i915);
1787 	if (IS_ERR(file))
1788 		return PTR_ERR(file);
1789 
1790 	ctx = live_context(i915, file);
1791 	if (IS_ERR(ctx)) {
1792 		err = PTR_ERR(ctx);
1793 		goto out_unlock;
1794 	}
1795 
1796 	vm = i915_gem_context_get_vm_rcu(ctx);
1797 	if (i915_is_ggtt(vm))
1798 		goto out_vm;
1799 
1800 	/* Create two pages; dummy we prefill the TLB, and intended */
1801 	bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1802 	if (IS_ERR(bbe)) {
1803 		err = PTR_ERR(bbe);
1804 		goto out_vm;
1805 	}
1806 
1807 	batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
1808 	if (IS_ERR(batch)) {
1809 		err = PTR_ERR(batch);
1810 		goto out_put_bbe;
1811 	}
1812 	memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1813 	i915_gem_object_flush_map(bbe);
1814 	i915_gem_object_unpin_map(bbe);
1815 
1816 	act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1817 	if (IS_ERR(act)) {
1818 		err = PTR_ERR(act);
1819 		goto out_put_bbe;
1820 	}
1821 
1822 	/* Track the execution of each request by writing into different slot */
1823 	batch = i915_gem_object_pin_map(act, I915_MAP_WC);
1824 	if (IS_ERR(batch)) {
1825 		err = PTR_ERR(batch);
1826 		goto out_put_act;
1827 	}
1828 	for (i = 0; i < count; i++) {
1829 		u32 *cs = batch + i * 64 / sizeof(*cs);
1830 		u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1831 
1832 		GEM_BUG_ON(INTEL_GEN(i915) < 6);
1833 		cs[0] = MI_STORE_DWORD_IMM_GEN4;
1834 		if (INTEL_GEN(i915) >= 8) {
1835 			cs[1] = lower_32_bits(addr);
1836 			cs[2] = upper_32_bits(addr);
1837 			cs[3] = i;
1838 			cs[4] = MI_NOOP;
1839 			cs[5] = MI_BATCH_BUFFER_START_GEN8;
1840 		} else {
1841 			cs[1] = 0;
1842 			cs[2] = lower_32_bits(addr);
1843 			cs[3] = i;
1844 			cs[4] = MI_NOOP;
1845 			cs[5] = MI_BATCH_BUFFER_START;
1846 		}
1847 	}
1848 
1849 	out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1850 	if (IS_ERR(out)) {
1851 		err = PTR_ERR(out);
1852 		goto out_put_batch;
1853 	}
1854 	i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1855 
1856 	vma = i915_vma_instance(out, vm, NULL);
1857 	if (IS_ERR(vma)) {
1858 		err = PTR_ERR(vma);
1859 		goto out_put_batch;
1860 	}
1861 
1862 	err = i915_vma_pin(vma, 0, 0,
1863 			   PIN_USER |
1864 			   PIN_OFFSET_FIXED |
1865 			   (vm->total - PAGE_SIZE));
1866 	if (err)
1867 		goto out_put_out;
1868 	GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1869 
1870 	result = i915_gem_object_pin_map(out, I915_MAP_WB);
1871 	if (IS_ERR(result)) {
1872 		err = PTR_ERR(result);
1873 		goto out_put_out;
1874 	}
1875 
1876 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1877 		IGT_TIMEOUT(end_time);
1878 		unsigned long pass = 0;
1879 
1880 		if (!intel_engine_can_store_dword(ce->engine))
1881 			continue;
1882 
1883 		while (!__igt_timeout(end_time, NULL)) {
1884 			struct i915_request *rq;
1885 			u64 offset;
1886 
1887 			offset = igt_random_offset(&prng,
1888 						   0, vm->total - PAGE_SIZE,
1889 						   chunk_size, PAGE_SIZE);
1890 
1891 			err = vm->allocate_va_range(vm, offset, chunk_size);
1892 			if (err)
1893 				goto end;
1894 
1895 			memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1896 
1897 			vma = i915_vma_instance(bbe, vm, NULL);
1898 			if (IS_ERR(vma)) {
1899 				err = PTR_ERR(vma);
1900 				goto end;
1901 			}
1902 
1903 			err = vma->ops->set_pages(vma);
1904 			if (err)
1905 				goto end;
1906 
1907 			/* Prime the TLB with the dummy pages */
1908 			for (i = 0; i < count; i++) {
1909 				vma->node.start = offset + i * PAGE_SIZE;
1910 				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1911 
1912 				rq = submit_batch(ce, vma->node.start);
1913 				if (IS_ERR(rq)) {
1914 					err = PTR_ERR(rq);
1915 					goto end;
1916 				}
1917 				i915_request_put(rq);
1918 			}
1919 
1920 			vma->ops->clear_pages(vma);
1921 
1922 			err = context_sync(ce);
1923 			if (err) {
1924 				pr_err("%s: dummy setup timed out\n",
1925 				       ce->engine->name);
1926 				goto end;
1927 			}
1928 
1929 			vma = i915_vma_instance(act, vm, NULL);
1930 			if (IS_ERR(vma)) {
1931 				err = PTR_ERR(vma);
1932 				goto end;
1933 			}
1934 
1935 			err = vma->ops->set_pages(vma);
1936 			if (err)
1937 				goto end;
1938 
1939 			/* Replace the TLB with target batches */
1940 			for (i = 0; i < count; i++) {
1941 				struct i915_request *rq;
1942 				u32 *cs = batch + i * 64 / sizeof(*cs);
1943 				u64 addr;
1944 
1945 				vma->node.start = offset + i * PAGE_SIZE;
1946 				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1947 
1948 				addr = vma->node.start + i * 64;
1949 				cs[4] = MI_NOOP;
1950 				cs[6] = lower_32_bits(addr);
1951 				cs[7] = upper_32_bits(addr);
1952 				wmb();
1953 
1954 				rq = submit_batch(ce, addr);
1955 				if (IS_ERR(rq)) {
1956 					err = PTR_ERR(rq);
1957 					goto end;
1958 				}
1959 
1960 				/* Wait until the context chain has started */
1961 				if (i == 0) {
1962 					while (READ_ONCE(result[i]) &&
1963 					       !i915_request_completed(rq))
1964 						cond_resched();
1965 				} else {
1966 					end_spin(batch, i - 1);
1967 				}
1968 
1969 				i915_request_put(rq);
1970 			}
1971 			end_spin(batch, count - 1);
1972 
1973 			vma->ops->clear_pages(vma);
1974 
1975 			err = context_sync(ce);
1976 			if (err) {
1977 				pr_err("%s: writes timed out\n",
1978 				       ce->engine->name);
1979 				goto end;
1980 			}
1981 
1982 			for (i = 0; i < count; i++) {
1983 				if (result[i] != i) {
1984 					pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
1985 					       ce->engine->name, pass,
1986 					       offset, i, result[i], i);
1987 					err = -EINVAL;
1988 					goto end;
1989 				}
1990 			}
1991 
1992 			vm->clear_range(vm, offset, chunk_size);
1993 			pass++;
1994 		}
1995 	}
1996 end:
1997 	if (igt_flush_test(i915))
1998 		err = -EIO;
1999 	i915_gem_context_unlock_engines(ctx);
2000 	i915_gem_object_unpin_map(out);
2001 out_put_out:
2002 	i915_gem_object_put(out);
2003 out_put_batch:
2004 	i915_gem_object_unpin_map(act);
2005 out_put_act:
2006 	i915_gem_object_put(act);
2007 out_put_bbe:
2008 	i915_gem_object_put(bbe);
2009 out_vm:
2010 	i915_vm_put(vm);
2011 out_unlock:
2012 	fput(file);
2013 	return err;
2014 }
2015 
2016 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2017 {
2018 	static const struct i915_subtest tests[] = {
2019 		SUBTEST(igt_ppgtt_alloc),
2020 		SUBTEST(igt_ppgtt_lowlevel),
2021 		SUBTEST(igt_ppgtt_drunk),
2022 		SUBTEST(igt_ppgtt_walk),
2023 		SUBTEST(igt_ppgtt_pot),
2024 		SUBTEST(igt_ppgtt_fill),
2025 		SUBTEST(igt_ppgtt_shrink),
2026 		SUBTEST(igt_ppgtt_shrink_boom),
2027 		SUBTEST(igt_ggtt_lowlevel),
2028 		SUBTEST(igt_ggtt_drunk),
2029 		SUBTEST(igt_ggtt_walk),
2030 		SUBTEST(igt_ggtt_pot),
2031 		SUBTEST(igt_ggtt_fill),
2032 		SUBTEST(igt_ggtt_page),
2033 		SUBTEST(igt_cs_tlb),
2034 	};
2035 
2036 	GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2037 
2038 	return i915_subtests(tests, i915);
2039 }
2040