1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "gem/i915_gem_context.h"
29 #include "gem/i915_gem_internal.h"
30 #include "gem/selftests/mock_context.h"
31 #include "gt/intel_context.h"
32 #include "gt/intel_gpu_commands.h"
33 
34 #include "i915_random.h"
35 #include "i915_selftest.h"
36 #include "i915_vma_resource.h"
37 
38 #include "mock_drm.h"
39 #include "mock_gem_device.h"
40 #include "mock_gtt.h"
41 #include "igt_flush_test.h"
42 
43 static void cleanup_freed_objects(struct drm_i915_private *i915)
44 {
45 	i915_gem_drain_freed_objects(i915);
46 }
47 
48 static void fake_free_pages(struct drm_i915_gem_object *obj,
49 			    struct sg_table *pages)
50 {
51 	sg_free_table(pages);
52 	kfree(pages);
53 }
54 
55 static int fake_get_pages(struct drm_i915_gem_object *obj)
56 {
57 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
58 #define PFN_BIAS 0x1000
59 	struct sg_table *pages;
60 	struct scatterlist *sg;
61 	unsigned int sg_page_sizes;
62 	typeof(obj->base.size) rem;
63 
64 	pages = kmalloc(sizeof(*pages), GFP);
65 	if (!pages)
66 		return -ENOMEM;
67 
68 	rem = round_up(obj->base.size, BIT(31)) >> 31;
69 	if (sg_alloc_table(pages, rem, GFP)) {
70 		kfree(pages);
71 		return -ENOMEM;
72 	}
73 
74 	sg_page_sizes = 0;
75 	rem = obj->base.size;
76 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
77 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
78 
79 		GEM_BUG_ON(!len);
80 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
81 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
82 		sg_dma_len(sg) = len;
83 		sg_page_sizes |= len;
84 
85 		rem -= len;
86 	}
87 	GEM_BUG_ON(rem);
88 
89 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
90 
91 	return 0;
92 #undef GFP
93 }
94 
95 static void fake_put_pages(struct drm_i915_gem_object *obj,
96 			   struct sg_table *pages)
97 {
98 	fake_free_pages(obj, pages);
99 	obj->mm.dirty = false;
100 }
101 
102 static const struct drm_i915_gem_object_ops fake_ops = {
103 	.name = "fake-gem",
104 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
105 	.get_pages = fake_get_pages,
106 	.put_pages = fake_put_pages,
107 };
108 
109 static struct drm_i915_gem_object *
110 fake_dma_object(struct drm_i915_private *i915, u64 size)
111 {
112 	static struct lock_class_key lock_class;
113 	struct drm_i915_gem_object *obj;
114 
115 	GEM_BUG_ON(!size);
116 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
117 
118 	if (overflows_type(size, obj->base.size))
119 		return ERR_PTR(-E2BIG);
120 
121 	obj = i915_gem_object_alloc();
122 	if (!obj)
123 		goto err;
124 
125 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
126 	i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
127 
128 	i915_gem_object_set_volatile(obj);
129 
130 	obj->write_domain = I915_GEM_DOMAIN_CPU;
131 	obj->read_domains = I915_GEM_DOMAIN_CPU;
132 	obj->cache_level = I915_CACHE_NONE;
133 
134 	/* Preallocate the "backing storage" */
135 	if (i915_gem_object_pin_pages_unlocked(obj))
136 		goto err_obj;
137 
138 	i915_gem_object_unpin_pages(obj);
139 	return obj;
140 
141 err_obj:
142 	i915_gem_object_put(obj);
143 err:
144 	return ERR_PTR(-ENOMEM);
145 }
146 
147 static int igt_ppgtt_alloc(void *arg)
148 {
149 	struct drm_i915_private *dev_priv = arg;
150 	struct i915_ppgtt *ppgtt;
151 	struct i915_gem_ww_ctx ww;
152 	u64 size, last, limit;
153 	int err = 0;
154 
155 	/* Allocate a ppggt and try to fill the entire range */
156 
157 	if (!HAS_PPGTT(dev_priv))
158 		return 0;
159 
160 	ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
161 	if (IS_ERR(ppgtt))
162 		return PTR_ERR(ppgtt);
163 
164 	if (!ppgtt->vm.allocate_va_range)
165 		goto err_ppgtt_cleanup;
166 
167 	/*
168 	 * While we only allocate the page tables here and so we could
169 	 * address a much larger GTT than we could actually fit into
170 	 * RAM, a practical limit is the amount of physical pages in the system.
171 	 * This should ensure that we do not run into the oomkiller during
172 	 * the test and take down the machine wilfully.
173 	 */
174 	limit = totalram_pages() << PAGE_SHIFT;
175 	limit = min(ppgtt->vm.total, limit);
176 
177 	i915_gem_ww_ctx_init(&ww, false);
178 retry:
179 	err = i915_vm_lock_objects(&ppgtt->vm, &ww);
180 	if (err)
181 		goto err_ppgtt_cleanup;
182 
183 	/* Check we can allocate the entire range */
184 	for (size = 4096; size <= limit; size <<= 2) {
185 		struct i915_vm_pt_stash stash = {};
186 
187 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
188 		if (err)
189 			goto err_ppgtt_cleanup;
190 
191 		err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
192 		if (err) {
193 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
194 			goto err_ppgtt_cleanup;
195 		}
196 
197 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
198 		cond_resched();
199 
200 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
201 
202 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
203 	}
204 
205 	/* Check we can incrementally allocate the entire range */
206 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
207 		struct i915_vm_pt_stash stash = {};
208 
209 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
210 		if (err)
211 			goto err_ppgtt_cleanup;
212 
213 		err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
214 		if (err) {
215 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
216 			goto err_ppgtt_cleanup;
217 		}
218 
219 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
220 					    last, size - last);
221 		cond_resched();
222 
223 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
224 	}
225 
226 err_ppgtt_cleanup:
227 	if (err == -EDEADLK) {
228 		err = i915_gem_ww_ctx_backoff(&ww);
229 		if (!err)
230 			goto retry;
231 	}
232 	i915_gem_ww_ctx_fini(&ww);
233 
234 	i915_vm_put(&ppgtt->vm);
235 	return err;
236 }
237 
238 static int lowlevel_hole(struct i915_address_space *vm,
239 			 u64 hole_start, u64 hole_end,
240 			 unsigned long end_time)
241 {
242 	I915_RND_STATE(seed_prng);
243 	struct i915_vma_resource *mock_vma_res;
244 	unsigned int size;
245 
246 	mock_vma_res = kzalloc(sizeof(*mock_vma_res), GFP_KERNEL);
247 	if (!mock_vma_res)
248 		return -ENOMEM;
249 
250 	/* Keep creating larger objects until one cannot fit into the hole */
251 	for (size = 12; (hole_end - hole_start) >> size; size++) {
252 		I915_RND_SUBSTATE(prng, seed_prng);
253 		struct drm_i915_gem_object *obj;
254 		unsigned int *order, count, n;
255 		u64 hole_size;
256 
257 		hole_size = (hole_end - hole_start) >> size;
258 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
259 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
260 		count = hole_size >> 1;
261 		if (!count) {
262 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
263 				 __func__, hole_start, hole_end, size, hole_size);
264 			break;
265 		}
266 
267 		do {
268 			order = i915_random_order(count, &prng);
269 			if (order)
270 				break;
271 		} while (count >>= 1);
272 		if (!count) {
273 			kfree(mock_vma_res);
274 			return -ENOMEM;
275 		}
276 		GEM_BUG_ON(!order);
277 
278 		GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
279 		GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
280 
281 		/* Ignore allocation failures (i.e. don't report them as
282 		 * a test failure) as we are purposefully allocating very
283 		 * large objects without checking that we have sufficient
284 		 * memory. We expect to hit -ENOMEM.
285 		 */
286 
287 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
288 		if (IS_ERR(obj)) {
289 			kfree(order);
290 			break;
291 		}
292 
293 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
294 
295 		if (i915_gem_object_pin_pages_unlocked(obj)) {
296 			i915_gem_object_put(obj);
297 			kfree(order);
298 			break;
299 		}
300 
301 		for (n = 0; n < count; n++) {
302 			u64 addr = hole_start + order[n] * BIT_ULL(size);
303 			intel_wakeref_t wakeref;
304 
305 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
306 
307 			if (igt_timeout(end_time,
308 					"%s timed out before %d/%d\n",
309 					__func__, n, count)) {
310 				hole_end = hole_start; /* quit */
311 				break;
312 			}
313 
314 			if (vm->allocate_va_range) {
315 				struct i915_vm_pt_stash stash = {};
316 				struct i915_gem_ww_ctx ww;
317 				int err;
318 
319 				i915_gem_ww_ctx_init(&ww, false);
320 retry:
321 				err = i915_vm_lock_objects(vm, &ww);
322 				if (err)
323 					goto alloc_vm_end;
324 
325 				err = -ENOMEM;
326 				if (i915_vm_alloc_pt_stash(vm, &stash,
327 							   BIT_ULL(size)))
328 					goto alloc_vm_end;
329 
330 				err = i915_vm_map_pt_stash(vm, &stash);
331 				if (!err)
332 					vm->allocate_va_range(vm, &stash,
333 							      addr, BIT_ULL(size));
334 				i915_vm_free_pt_stash(vm, &stash);
335 alloc_vm_end:
336 				if (err == -EDEADLK) {
337 					err = i915_gem_ww_ctx_backoff(&ww);
338 					if (!err)
339 						goto retry;
340 				}
341 				i915_gem_ww_ctx_fini(&ww);
342 
343 				if (err)
344 					break;
345 			}
346 
347 			mock_vma_res->bi.pages = obj->mm.pages;
348 			mock_vma_res->node_size = BIT_ULL(size);
349 			mock_vma_res->start = addr;
350 
351 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
352 			  vm->insert_entries(vm, mock_vma_res,
353 						   I915_CACHE_NONE, 0);
354 		}
355 		count = n;
356 
357 		i915_random_reorder(order, count, &prng);
358 		for (n = 0; n < count; n++) {
359 			u64 addr = hole_start + order[n] * BIT_ULL(size);
360 			intel_wakeref_t wakeref;
361 
362 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
363 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
364 				vm->clear_range(vm, addr, BIT_ULL(size));
365 		}
366 
367 		i915_gem_object_unpin_pages(obj);
368 		i915_gem_object_put(obj);
369 
370 		kfree(order);
371 
372 		cleanup_freed_objects(vm->i915);
373 	}
374 
375 	kfree(mock_vma_res);
376 	return 0;
377 }
378 
379 static void close_object_list(struct list_head *objects,
380 			      struct i915_address_space *vm)
381 {
382 	struct drm_i915_gem_object *obj, *on;
383 	int ignored;
384 
385 	list_for_each_entry_safe(obj, on, objects, st_link) {
386 		struct i915_vma *vma;
387 
388 		vma = i915_vma_instance(obj, vm, NULL);
389 		if (!IS_ERR(vma))
390 			ignored = i915_vma_unbind_unlocked(vma);
391 
392 		list_del(&obj->st_link);
393 		i915_gem_object_put(obj);
394 	}
395 }
396 
397 static int fill_hole(struct i915_address_space *vm,
398 		     u64 hole_start, u64 hole_end,
399 		     unsigned long end_time)
400 {
401 	const u64 hole_size = hole_end - hole_start;
402 	struct drm_i915_gem_object *obj;
403 	const unsigned long max_pages =
404 		min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
405 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
406 	unsigned long npages, prime, flags;
407 	struct i915_vma *vma;
408 	LIST_HEAD(objects);
409 	int err;
410 
411 	/* Try binding many VMA working inwards from either edge */
412 
413 	flags = PIN_OFFSET_FIXED | PIN_USER;
414 	if (i915_is_ggtt(vm))
415 		flags |= PIN_GLOBAL;
416 
417 	for_each_prime_number_from(prime, 2, max_step) {
418 		for (npages = 1; npages <= max_pages; npages *= prime) {
419 			const u64 full_size = npages << PAGE_SHIFT;
420 			const struct {
421 				const char *name;
422 				u64 offset;
423 				int step;
424 			} phases[] = {
425 				{ "top-down", hole_end, -1, },
426 				{ "bottom-up", hole_start, 1, },
427 				{ }
428 			}, *p;
429 
430 			obj = fake_dma_object(vm->i915, full_size);
431 			if (IS_ERR(obj))
432 				break;
433 
434 			list_add(&obj->st_link, &objects);
435 
436 			/* Align differing sized objects against the edges, and
437 			 * check we don't walk off into the void when binding
438 			 * them into the GTT.
439 			 */
440 			for (p = phases; p->name; p++) {
441 				u64 offset;
442 
443 				offset = p->offset;
444 				list_for_each_entry(obj, &objects, st_link) {
445 					vma = i915_vma_instance(obj, vm, NULL);
446 					if (IS_ERR(vma))
447 						continue;
448 
449 					if (p->step < 0) {
450 						if (offset < hole_start + obj->base.size)
451 							break;
452 						offset -= obj->base.size;
453 					}
454 
455 					err = i915_vma_pin(vma, 0, 0, offset | flags);
456 					if (err) {
457 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
458 						       __func__, p->name, err, npages, prime, offset);
459 						goto err;
460 					}
461 
462 					if (!drm_mm_node_allocated(&vma->node) ||
463 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
464 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
465 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
466 						       offset);
467 						err = -EINVAL;
468 						goto err;
469 					}
470 
471 					i915_vma_unpin(vma);
472 
473 					if (p->step > 0) {
474 						if (offset + obj->base.size > hole_end)
475 							break;
476 						offset += obj->base.size;
477 					}
478 				}
479 
480 				offset = p->offset;
481 				list_for_each_entry(obj, &objects, st_link) {
482 					vma = i915_vma_instance(obj, vm, NULL);
483 					if (IS_ERR(vma))
484 						continue;
485 
486 					if (p->step < 0) {
487 						if (offset < hole_start + obj->base.size)
488 							break;
489 						offset -= obj->base.size;
490 					}
491 
492 					if (!drm_mm_node_allocated(&vma->node) ||
493 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
494 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
495 						       __func__, p->name, vma->node.start, vma->node.size,
496 						       offset);
497 						err = -EINVAL;
498 						goto err;
499 					}
500 
501 					err = i915_vma_unbind_unlocked(vma);
502 					if (err) {
503 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
504 						       __func__, p->name, vma->node.start, vma->node.size,
505 						       err);
506 						goto err;
507 					}
508 
509 					if (p->step > 0) {
510 						if (offset + obj->base.size > hole_end)
511 							break;
512 						offset += obj->base.size;
513 					}
514 				}
515 
516 				offset = p->offset;
517 				list_for_each_entry_reverse(obj, &objects, st_link) {
518 					vma = i915_vma_instance(obj, vm, NULL);
519 					if (IS_ERR(vma))
520 						continue;
521 
522 					if (p->step < 0) {
523 						if (offset < hole_start + obj->base.size)
524 							break;
525 						offset -= obj->base.size;
526 					}
527 
528 					err = i915_vma_pin(vma, 0, 0, offset | flags);
529 					if (err) {
530 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
531 						       __func__, p->name, err, npages, prime, offset);
532 						goto err;
533 					}
534 
535 					if (!drm_mm_node_allocated(&vma->node) ||
536 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
537 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
538 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
539 						       offset);
540 						err = -EINVAL;
541 						goto err;
542 					}
543 
544 					i915_vma_unpin(vma);
545 
546 					if (p->step > 0) {
547 						if (offset + obj->base.size > hole_end)
548 							break;
549 						offset += obj->base.size;
550 					}
551 				}
552 
553 				offset = p->offset;
554 				list_for_each_entry_reverse(obj, &objects, st_link) {
555 					vma = i915_vma_instance(obj, vm, NULL);
556 					if (IS_ERR(vma))
557 						continue;
558 
559 					if (p->step < 0) {
560 						if (offset < hole_start + obj->base.size)
561 							break;
562 						offset -= obj->base.size;
563 					}
564 
565 					if (!drm_mm_node_allocated(&vma->node) ||
566 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
567 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
568 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
569 						       offset);
570 						err = -EINVAL;
571 						goto err;
572 					}
573 
574 					err = i915_vma_unbind_unlocked(vma);
575 					if (err) {
576 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
577 						       __func__, p->name, vma->node.start, vma->node.size,
578 						       err);
579 						goto err;
580 					}
581 
582 					if (p->step > 0) {
583 						if (offset + obj->base.size > hole_end)
584 							break;
585 						offset += obj->base.size;
586 					}
587 				}
588 			}
589 
590 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
591 					__func__, npages, prime)) {
592 				err = -EINTR;
593 				goto err;
594 			}
595 		}
596 
597 		close_object_list(&objects, vm);
598 		cleanup_freed_objects(vm->i915);
599 	}
600 
601 	return 0;
602 
603 err:
604 	close_object_list(&objects, vm);
605 	return err;
606 }
607 
608 static int walk_hole(struct i915_address_space *vm,
609 		     u64 hole_start, u64 hole_end,
610 		     unsigned long end_time)
611 {
612 	const u64 hole_size = hole_end - hole_start;
613 	const unsigned long max_pages =
614 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
615 	unsigned long flags;
616 	u64 size;
617 
618 	/* Try binding a single VMA in different positions within the hole */
619 
620 	flags = PIN_OFFSET_FIXED | PIN_USER;
621 	if (i915_is_ggtt(vm))
622 		flags |= PIN_GLOBAL;
623 
624 	for_each_prime_number_from(size, 1, max_pages) {
625 		struct drm_i915_gem_object *obj;
626 		struct i915_vma *vma;
627 		u64 addr;
628 		int err = 0;
629 
630 		obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
631 		if (IS_ERR(obj))
632 			break;
633 
634 		vma = i915_vma_instance(obj, vm, NULL);
635 		if (IS_ERR(vma)) {
636 			err = PTR_ERR(vma);
637 			goto err_put;
638 		}
639 
640 		for (addr = hole_start;
641 		     addr + obj->base.size < hole_end;
642 		     addr += obj->base.size) {
643 			err = i915_vma_pin(vma, 0, 0, addr | flags);
644 			if (err) {
645 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
646 				       __func__, addr, vma->size,
647 				       hole_start, hole_end, err);
648 				goto err_put;
649 			}
650 			i915_vma_unpin(vma);
651 
652 			if (!drm_mm_node_allocated(&vma->node) ||
653 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
654 				pr_err("%s incorrect at %llx + %llx\n",
655 				       __func__, addr, vma->size);
656 				err = -EINVAL;
657 				goto err_put;
658 			}
659 
660 			err = i915_vma_unbind_unlocked(vma);
661 			if (err) {
662 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
663 				       __func__, addr, vma->size, err);
664 				goto err_put;
665 			}
666 
667 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
668 
669 			if (igt_timeout(end_time,
670 					"%s timed out at %llx\n",
671 					__func__, addr)) {
672 				err = -EINTR;
673 				goto err_put;
674 			}
675 		}
676 
677 err_put:
678 		i915_gem_object_put(obj);
679 		if (err)
680 			return err;
681 
682 		cleanup_freed_objects(vm->i915);
683 	}
684 
685 	return 0;
686 }
687 
688 static int pot_hole(struct i915_address_space *vm,
689 		    u64 hole_start, u64 hole_end,
690 		    unsigned long end_time)
691 {
692 	struct drm_i915_gem_object *obj;
693 	struct i915_vma *vma;
694 	unsigned long flags;
695 	unsigned int pot;
696 	int err = 0;
697 
698 	flags = PIN_OFFSET_FIXED | PIN_USER;
699 	if (i915_is_ggtt(vm))
700 		flags |= PIN_GLOBAL;
701 
702 	obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
703 	if (IS_ERR(obj))
704 		return PTR_ERR(obj);
705 
706 	vma = i915_vma_instance(obj, vm, NULL);
707 	if (IS_ERR(vma)) {
708 		err = PTR_ERR(vma);
709 		goto err_obj;
710 	}
711 
712 	/* Insert a pair of pages across every pot boundary within the hole */
713 	for (pot = fls64(hole_end - 1) - 1;
714 	     pot > ilog2(2 * I915_GTT_PAGE_SIZE);
715 	     pot--) {
716 		u64 step = BIT_ULL(pot);
717 		u64 addr;
718 
719 		for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
720 		     addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
721 		     addr += step) {
722 			err = i915_vma_pin(vma, 0, 0, addr | flags);
723 			if (err) {
724 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
725 				       __func__,
726 				       addr,
727 				       hole_start, hole_end,
728 				       err);
729 				goto err_obj;
730 			}
731 
732 			if (!drm_mm_node_allocated(&vma->node) ||
733 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
734 				pr_err("%s incorrect at %llx + %llx\n",
735 				       __func__, addr, vma->size);
736 				i915_vma_unpin(vma);
737 				err = i915_vma_unbind_unlocked(vma);
738 				err = -EINVAL;
739 				goto err_obj;
740 			}
741 
742 			i915_vma_unpin(vma);
743 			err = i915_vma_unbind_unlocked(vma);
744 			GEM_BUG_ON(err);
745 		}
746 
747 		if (igt_timeout(end_time,
748 				"%s timed out after %d/%d\n",
749 				__func__, pot, fls64(hole_end - 1) - 1)) {
750 			err = -EINTR;
751 			goto err_obj;
752 		}
753 	}
754 
755 err_obj:
756 	i915_gem_object_put(obj);
757 	return err;
758 }
759 
760 static int drunk_hole(struct i915_address_space *vm,
761 		      u64 hole_start, u64 hole_end,
762 		      unsigned long end_time)
763 {
764 	I915_RND_STATE(prng);
765 	unsigned int size;
766 	unsigned long flags;
767 
768 	flags = PIN_OFFSET_FIXED | PIN_USER;
769 	if (i915_is_ggtt(vm))
770 		flags |= PIN_GLOBAL;
771 
772 	/* Keep creating larger objects until one cannot fit into the hole */
773 	for (size = 12; (hole_end - hole_start) >> size; size++) {
774 		struct drm_i915_gem_object *obj;
775 		unsigned int *order, count, n;
776 		struct i915_vma *vma;
777 		u64 hole_size;
778 		int err = -ENODEV;
779 
780 		hole_size = (hole_end - hole_start) >> size;
781 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
782 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
783 		count = hole_size >> 1;
784 		if (!count) {
785 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
786 				 __func__, hole_start, hole_end, size, hole_size);
787 			break;
788 		}
789 
790 		do {
791 			order = i915_random_order(count, &prng);
792 			if (order)
793 				break;
794 		} while (count >>= 1);
795 		if (!count)
796 			return -ENOMEM;
797 		GEM_BUG_ON(!order);
798 
799 		/* Ignore allocation failures (i.e. don't report them as
800 		 * a test failure) as we are purposefully allocating very
801 		 * large objects without checking that we have sufficient
802 		 * memory. We expect to hit -ENOMEM.
803 		 */
804 
805 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
806 		if (IS_ERR(obj)) {
807 			kfree(order);
808 			break;
809 		}
810 
811 		vma = i915_vma_instance(obj, vm, NULL);
812 		if (IS_ERR(vma)) {
813 			err = PTR_ERR(vma);
814 			goto err_obj;
815 		}
816 
817 		GEM_BUG_ON(vma->size != BIT_ULL(size));
818 
819 		for (n = 0; n < count; n++) {
820 			u64 addr = hole_start + order[n] * BIT_ULL(size);
821 
822 			err = i915_vma_pin(vma, 0, 0, addr | flags);
823 			if (err) {
824 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
825 				       __func__,
826 				       addr, BIT_ULL(size),
827 				       hole_start, hole_end,
828 				       err);
829 				goto err_obj;
830 			}
831 
832 			if (!drm_mm_node_allocated(&vma->node) ||
833 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
834 				pr_err("%s incorrect at %llx + %llx\n",
835 				       __func__, addr, BIT_ULL(size));
836 				i915_vma_unpin(vma);
837 				err = i915_vma_unbind_unlocked(vma);
838 				err = -EINVAL;
839 				goto err_obj;
840 			}
841 
842 			i915_vma_unpin(vma);
843 			err = i915_vma_unbind_unlocked(vma);
844 			GEM_BUG_ON(err);
845 
846 			if (igt_timeout(end_time,
847 					"%s timed out after %d/%d\n",
848 					__func__, n, count)) {
849 				err = -EINTR;
850 				goto err_obj;
851 			}
852 		}
853 
854 err_obj:
855 		i915_gem_object_put(obj);
856 		kfree(order);
857 		if (err)
858 			return err;
859 
860 		cleanup_freed_objects(vm->i915);
861 	}
862 
863 	return 0;
864 }
865 
866 static int __shrink_hole(struct i915_address_space *vm,
867 			 u64 hole_start, u64 hole_end,
868 			 unsigned long end_time)
869 {
870 	struct drm_i915_gem_object *obj;
871 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
872 	unsigned int order = 12;
873 	LIST_HEAD(objects);
874 	int err = 0;
875 	u64 addr;
876 
877 	/* Keep creating larger objects until one cannot fit into the hole */
878 	for (addr = hole_start; addr < hole_end; ) {
879 		struct i915_vma *vma;
880 		u64 size = BIT_ULL(order++);
881 
882 		size = min(size, hole_end - addr);
883 		obj = fake_dma_object(vm->i915, size);
884 		if (IS_ERR(obj)) {
885 			err = PTR_ERR(obj);
886 			break;
887 		}
888 
889 		list_add(&obj->st_link, &objects);
890 
891 		vma = i915_vma_instance(obj, vm, NULL);
892 		if (IS_ERR(vma)) {
893 			err = PTR_ERR(vma);
894 			break;
895 		}
896 
897 		GEM_BUG_ON(vma->size != size);
898 
899 		err = i915_vma_pin(vma, 0, 0, addr | flags);
900 		if (err) {
901 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
902 			       __func__, addr, size, hole_start, hole_end, err);
903 			break;
904 		}
905 
906 		if (!drm_mm_node_allocated(&vma->node) ||
907 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
908 			pr_err("%s incorrect at %llx + %llx\n",
909 			       __func__, addr, size);
910 			i915_vma_unpin(vma);
911 			err = i915_vma_unbind_unlocked(vma);
912 			err = -EINVAL;
913 			break;
914 		}
915 
916 		i915_vma_unpin(vma);
917 		addr += size;
918 
919 		/*
920 		 * Since we are injecting allocation faults at random intervals,
921 		 * wait for this allocation to complete before we change the
922 		 * faultinjection.
923 		 */
924 		err = i915_vma_sync(vma);
925 		if (err)
926 			break;
927 
928 		if (igt_timeout(end_time,
929 				"%s timed out at ofset %llx [%llx - %llx]\n",
930 				__func__, addr, hole_start, hole_end)) {
931 			err = -EINTR;
932 			break;
933 		}
934 	}
935 
936 	close_object_list(&objects, vm);
937 	cleanup_freed_objects(vm->i915);
938 	return err;
939 }
940 
941 static int shrink_hole(struct i915_address_space *vm,
942 		       u64 hole_start, u64 hole_end,
943 		       unsigned long end_time)
944 {
945 	unsigned long prime;
946 	int err;
947 
948 	vm->fault_attr.probability = 999;
949 	atomic_set(&vm->fault_attr.times, -1);
950 
951 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
952 		vm->fault_attr.interval = prime;
953 		err = __shrink_hole(vm, hole_start, hole_end, end_time);
954 		if (err)
955 			break;
956 	}
957 
958 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
959 
960 	return err;
961 }
962 
963 static int shrink_boom(struct i915_address_space *vm,
964 		       u64 hole_start, u64 hole_end,
965 		       unsigned long end_time)
966 {
967 	unsigned int sizes[] = { SZ_2M, SZ_1G };
968 	struct drm_i915_gem_object *purge;
969 	struct drm_i915_gem_object *explode;
970 	int err;
971 	int i;
972 
973 	/*
974 	 * Catch the case which shrink_hole seems to miss. The setup here
975 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
976 	 * ensuring that all vma assiocated with the respective pd/pdp are
977 	 * unpinned at the time.
978 	 */
979 
980 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
981 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
982 		unsigned int size = sizes[i];
983 		struct i915_vma *vma;
984 
985 		purge = fake_dma_object(vm->i915, size);
986 		if (IS_ERR(purge))
987 			return PTR_ERR(purge);
988 
989 		vma = i915_vma_instance(purge, vm, NULL);
990 		if (IS_ERR(vma)) {
991 			err = PTR_ERR(vma);
992 			goto err_purge;
993 		}
994 
995 		err = i915_vma_pin(vma, 0, 0, flags);
996 		if (err)
997 			goto err_purge;
998 
999 		/* Should now be ripe for purging */
1000 		i915_vma_unpin(vma);
1001 
1002 		explode = fake_dma_object(vm->i915, size);
1003 		if (IS_ERR(explode)) {
1004 			err = PTR_ERR(explode);
1005 			goto err_purge;
1006 		}
1007 
1008 		vm->fault_attr.probability = 100;
1009 		vm->fault_attr.interval = 1;
1010 		atomic_set(&vm->fault_attr.times, -1);
1011 
1012 		vma = i915_vma_instance(explode, vm, NULL);
1013 		if (IS_ERR(vma)) {
1014 			err = PTR_ERR(vma);
1015 			goto err_explode;
1016 		}
1017 
1018 		err = i915_vma_pin(vma, 0, 0, flags | size);
1019 		if (err)
1020 			goto err_explode;
1021 
1022 		i915_vma_unpin(vma);
1023 
1024 		i915_gem_object_put(purge);
1025 		i915_gem_object_put(explode);
1026 
1027 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1028 		cleanup_freed_objects(vm->i915);
1029 	}
1030 
1031 	return 0;
1032 
1033 err_explode:
1034 	i915_gem_object_put(explode);
1035 err_purge:
1036 	i915_gem_object_put(purge);
1037 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1038 	return err;
1039 }
1040 
1041 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1042 			  int (*func)(struct i915_address_space *vm,
1043 				      u64 hole_start, u64 hole_end,
1044 				      unsigned long end_time))
1045 {
1046 	struct i915_ppgtt *ppgtt;
1047 	IGT_TIMEOUT(end_time);
1048 	struct file *file;
1049 	int err;
1050 
1051 	if (!HAS_FULL_PPGTT(dev_priv))
1052 		return 0;
1053 
1054 	file = mock_file(dev_priv);
1055 	if (IS_ERR(file))
1056 		return PTR_ERR(file);
1057 
1058 	ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
1059 	if (IS_ERR(ppgtt)) {
1060 		err = PTR_ERR(ppgtt);
1061 		goto out_free;
1062 	}
1063 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1064 	GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1065 
1066 	err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1067 
1068 	i915_vm_put(&ppgtt->vm);
1069 
1070 out_free:
1071 	fput(file);
1072 	return err;
1073 }
1074 
1075 static int igt_ppgtt_fill(void *arg)
1076 {
1077 	return exercise_ppgtt(arg, fill_hole);
1078 }
1079 
1080 static int igt_ppgtt_walk(void *arg)
1081 {
1082 	return exercise_ppgtt(arg, walk_hole);
1083 }
1084 
1085 static int igt_ppgtt_pot(void *arg)
1086 {
1087 	return exercise_ppgtt(arg, pot_hole);
1088 }
1089 
1090 static int igt_ppgtt_drunk(void *arg)
1091 {
1092 	return exercise_ppgtt(arg, drunk_hole);
1093 }
1094 
1095 static int igt_ppgtt_lowlevel(void *arg)
1096 {
1097 	return exercise_ppgtt(arg, lowlevel_hole);
1098 }
1099 
1100 static int igt_ppgtt_shrink(void *arg)
1101 {
1102 	return exercise_ppgtt(arg, shrink_hole);
1103 }
1104 
1105 static int igt_ppgtt_shrink_boom(void *arg)
1106 {
1107 	return exercise_ppgtt(arg, shrink_boom);
1108 }
1109 
1110 static int sort_holes(void *priv, const struct list_head *A,
1111 		      const struct list_head *B)
1112 {
1113 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1114 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1115 
1116 	if (a->start < b->start)
1117 		return -1;
1118 	else
1119 		return 1;
1120 }
1121 
1122 static int exercise_ggtt(struct drm_i915_private *i915,
1123 			 int (*func)(struct i915_address_space *vm,
1124 				     u64 hole_start, u64 hole_end,
1125 				     unsigned long end_time))
1126 {
1127 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1128 	u64 hole_start, hole_end, last = 0;
1129 	struct drm_mm_node *node;
1130 	IGT_TIMEOUT(end_time);
1131 	int err = 0;
1132 
1133 restart:
1134 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1135 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1136 		if (hole_start < last)
1137 			continue;
1138 
1139 		if (ggtt->vm.mm.color_adjust)
1140 			ggtt->vm.mm.color_adjust(node, 0,
1141 						 &hole_start, &hole_end);
1142 		if (hole_start >= hole_end)
1143 			continue;
1144 
1145 		err = func(&ggtt->vm, hole_start, hole_end, end_time);
1146 		if (err)
1147 			break;
1148 
1149 		/* As we have manipulated the drm_mm, the list may be corrupt */
1150 		last = hole_end;
1151 		goto restart;
1152 	}
1153 
1154 	return err;
1155 }
1156 
1157 static int igt_ggtt_fill(void *arg)
1158 {
1159 	return exercise_ggtt(arg, fill_hole);
1160 }
1161 
1162 static int igt_ggtt_walk(void *arg)
1163 {
1164 	return exercise_ggtt(arg, walk_hole);
1165 }
1166 
1167 static int igt_ggtt_pot(void *arg)
1168 {
1169 	return exercise_ggtt(arg, pot_hole);
1170 }
1171 
1172 static int igt_ggtt_drunk(void *arg)
1173 {
1174 	return exercise_ggtt(arg, drunk_hole);
1175 }
1176 
1177 static int igt_ggtt_lowlevel(void *arg)
1178 {
1179 	return exercise_ggtt(arg, lowlevel_hole);
1180 }
1181 
1182 static int igt_ggtt_page(void *arg)
1183 {
1184 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1185 	I915_RND_STATE(prng);
1186 	struct drm_i915_private *i915 = arg;
1187 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1188 	struct drm_i915_gem_object *obj;
1189 	intel_wakeref_t wakeref;
1190 	struct drm_mm_node tmp;
1191 	unsigned int *order, n;
1192 	int err;
1193 
1194 	if (!i915_ggtt_has_aperture(ggtt))
1195 		return 0;
1196 
1197 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1198 	if (IS_ERR(obj))
1199 		return PTR_ERR(obj);
1200 
1201 	err = i915_gem_object_pin_pages_unlocked(obj);
1202 	if (err)
1203 		goto out_free;
1204 
1205 	memset(&tmp, 0, sizeof(tmp));
1206 	mutex_lock(&ggtt->vm.mutex);
1207 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1208 					  count * PAGE_SIZE, 0,
1209 					  I915_COLOR_UNEVICTABLE,
1210 					  0, ggtt->mappable_end,
1211 					  DRM_MM_INSERT_LOW);
1212 	mutex_unlock(&ggtt->vm.mutex);
1213 	if (err)
1214 		goto out_unpin;
1215 
1216 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1217 
1218 	for (n = 0; n < count; n++) {
1219 		u64 offset = tmp.start + n * PAGE_SIZE;
1220 
1221 		ggtt->vm.insert_page(&ggtt->vm,
1222 				     i915_gem_object_get_dma_address(obj, 0),
1223 				     offset, I915_CACHE_NONE, 0);
1224 	}
1225 
1226 	order = i915_random_order(count, &prng);
1227 	if (!order) {
1228 		err = -ENOMEM;
1229 		goto out_remove;
1230 	}
1231 
1232 	for (n = 0; n < count; n++) {
1233 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1234 		u32 __iomem *vaddr;
1235 
1236 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1237 		iowrite32(n, vaddr + n);
1238 		io_mapping_unmap_atomic(vaddr);
1239 	}
1240 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1241 
1242 	i915_random_reorder(order, count, &prng);
1243 	for (n = 0; n < count; n++) {
1244 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1245 		u32 __iomem *vaddr;
1246 		u32 val;
1247 
1248 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1249 		val = ioread32(vaddr + n);
1250 		io_mapping_unmap_atomic(vaddr);
1251 
1252 		if (val != n) {
1253 			pr_err("insert page failed: found %d, expected %d\n",
1254 			       val, n);
1255 			err = -EINVAL;
1256 			break;
1257 		}
1258 	}
1259 
1260 	kfree(order);
1261 out_remove:
1262 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1263 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1264 	mutex_lock(&ggtt->vm.mutex);
1265 	drm_mm_remove_node(&tmp);
1266 	mutex_unlock(&ggtt->vm.mutex);
1267 out_unpin:
1268 	i915_gem_object_unpin_pages(obj);
1269 out_free:
1270 	i915_gem_object_put(obj);
1271 	return err;
1272 }
1273 
1274 static void track_vma_bind(struct i915_vma *vma)
1275 {
1276 	struct drm_i915_gem_object *obj = vma->obj;
1277 
1278 	__i915_gem_object_pin_pages(obj);
1279 
1280 	GEM_BUG_ON(atomic_read(&vma->pages_count));
1281 	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1282 	__i915_gem_object_pin_pages(obj);
1283 	vma->pages = obj->mm.pages;
1284 	vma->resource->bi.pages = vma->pages;
1285 
1286 	mutex_lock(&vma->vm->mutex);
1287 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1288 	mutex_unlock(&vma->vm->mutex);
1289 }
1290 
1291 static int exercise_mock(struct drm_i915_private *i915,
1292 			 int (*func)(struct i915_address_space *vm,
1293 				     u64 hole_start, u64 hole_end,
1294 				     unsigned long end_time))
1295 {
1296 	const u64 limit = totalram_pages() << PAGE_SHIFT;
1297 	struct i915_address_space *vm;
1298 	struct i915_gem_context *ctx;
1299 	IGT_TIMEOUT(end_time);
1300 	int err;
1301 
1302 	ctx = mock_context(i915, "mock");
1303 	if (!ctx)
1304 		return -ENOMEM;
1305 
1306 	vm = i915_gem_context_get_eb_vm(ctx);
1307 	err = func(vm, 0, min(vm->total, limit), end_time);
1308 	i915_vm_put(vm);
1309 
1310 	mock_context_close(ctx);
1311 	return err;
1312 }
1313 
1314 static int igt_mock_fill(void *arg)
1315 {
1316 	struct i915_ggtt *ggtt = arg;
1317 
1318 	return exercise_mock(ggtt->vm.i915, fill_hole);
1319 }
1320 
1321 static int igt_mock_walk(void *arg)
1322 {
1323 	struct i915_ggtt *ggtt = arg;
1324 
1325 	return exercise_mock(ggtt->vm.i915, walk_hole);
1326 }
1327 
1328 static int igt_mock_pot(void *arg)
1329 {
1330 	struct i915_ggtt *ggtt = arg;
1331 
1332 	return exercise_mock(ggtt->vm.i915, pot_hole);
1333 }
1334 
1335 static int igt_mock_drunk(void *arg)
1336 {
1337 	struct i915_ggtt *ggtt = arg;
1338 
1339 	return exercise_mock(ggtt->vm.i915, drunk_hole);
1340 }
1341 
1342 static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset)
1343 {
1344 	struct i915_address_space *vm = vma->vm;
1345 	struct i915_vma_resource *vma_res;
1346 	struct drm_i915_gem_object *obj = vma->obj;
1347 	int err;
1348 
1349 	vma_res = i915_vma_resource_alloc();
1350 	if (IS_ERR(vma_res))
1351 		return PTR_ERR(vma_res);
1352 
1353 	mutex_lock(&vm->mutex);
1354 	err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
1355 				   offset,
1356 				   obj->cache_level,
1357 				   0);
1358 	if (!err) {
1359 		i915_vma_resource_init_from_vma(vma_res, vma);
1360 		vma->resource = vma_res;
1361 	} else {
1362 		kfree(vma_res);
1363 	}
1364 	mutex_unlock(&vm->mutex);
1365 
1366 	return err;
1367 }
1368 
1369 static int igt_gtt_reserve(void *arg)
1370 {
1371 	struct i915_ggtt *ggtt = arg;
1372 	struct drm_i915_gem_object *obj, *on;
1373 	I915_RND_STATE(prng);
1374 	LIST_HEAD(objects);
1375 	u64 total;
1376 	int err = -ENODEV;
1377 
1378 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1379 	 * for the node, and evicts if it has to. So our test checks that
1380 	 * it can give us the requsted space and prevent overlaps.
1381 	 */
1382 
1383 	/* Start by filling the GGTT */
1384 	for (total = 0;
1385 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1386 	     total += 2 * I915_GTT_PAGE_SIZE) {
1387 		struct i915_vma *vma;
1388 
1389 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1390 						      2 * PAGE_SIZE);
1391 		if (IS_ERR(obj)) {
1392 			err = PTR_ERR(obj);
1393 			goto out;
1394 		}
1395 
1396 		err = i915_gem_object_pin_pages_unlocked(obj);
1397 		if (err) {
1398 			i915_gem_object_put(obj);
1399 			goto out;
1400 		}
1401 
1402 		list_add(&obj->st_link, &objects);
1403 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1404 		if (IS_ERR(vma)) {
1405 			err = PTR_ERR(vma);
1406 			goto out;
1407 		}
1408 
1409 		err = reserve_gtt_with_resource(vma, total);
1410 		if (err) {
1411 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1412 			       total, ggtt->vm.total, err);
1413 			goto out;
1414 		}
1415 		track_vma_bind(vma);
1416 
1417 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1418 		if (vma->node.start != total ||
1419 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1420 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1421 			       vma->node.start, vma->node.size,
1422 			       total, 2*I915_GTT_PAGE_SIZE);
1423 			err = -EINVAL;
1424 			goto out;
1425 		}
1426 	}
1427 
1428 	/* Now we start forcing evictions */
1429 	for (total = I915_GTT_PAGE_SIZE;
1430 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1431 	     total += 2 * I915_GTT_PAGE_SIZE) {
1432 		struct i915_vma *vma;
1433 
1434 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1435 						      2 * PAGE_SIZE);
1436 		if (IS_ERR(obj)) {
1437 			err = PTR_ERR(obj);
1438 			goto out;
1439 		}
1440 
1441 		err = i915_gem_object_pin_pages_unlocked(obj);
1442 		if (err) {
1443 			i915_gem_object_put(obj);
1444 			goto out;
1445 		}
1446 
1447 		list_add(&obj->st_link, &objects);
1448 
1449 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1450 		if (IS_ERR(vma)) {
1451 			err = PTR_ERR(vma);
1452 			goto out;
1453 		}
1454 
1455 		err = reserve_gtt_with_resource(vma, total);
1456 		if (err) {
1457 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1458 			       total, ggtt->vm.total, err);
1459 			goto out;
1460 		}
1461 		track_vma_bind(vma);
1462 
1463 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1464 		if (vma->node.start != total ||
1465 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1466 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1467 			       vma->node.start, vma->node.size,
1468 			       total, 2*I915_GTT_PAGE_SIZE);
1469 			err = -EINVAL;
1470 			goto out;
1471 		}
1472 	}
1473 
1474 	/* And then try at random */
1475 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1476 		struct i915_vma *vma;
1477 		u64 offset;
1478 
1479 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1480 		if (IS_ERR(vma)) {
1481 			err = PTR_ERR(vma);
1482 			goto out;
1483 		}
1484 
1485 		err = i915_vma_unbind_unlocked(vma);
1486 		if (err) {
1487 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1488 			goto out;
1489 		}
1490 
1491 		offset = igt_random_offset(&prng,
1492 					   0, ggtt->vm.total,
1493 					   2 * I915_GTT_PAGE_SIZE,
1494 					   I915_GTT_MIN_ALIGNMENT);
1495 
1496 		err = reserve_gtt_with_resource(vma, offset);
1497 		if (err) {
1498 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1499 			       total, ggtt->vm.total, err);
1500 			goto out;
1501 		}
1502 		track_vma_bind(vma);
1503 
1504 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1505 		if (vma->node.start != offset ||
1506 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1507 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1508 			       vma->node.start, vma->node.size,
1509 			       offset, 2*I915_GTT_PAGE_SIZE);
1510 			err = -EINVAL;
1511 			goto out;
1512 		}
1513 	}
1514 
1515 out:
1516 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1517 		i915_gem_object_unpin_pages(obj);
1518 		i915_gem_object_put(obj);
1519 	}
1520 	return err;
1521 }
1522 
1523 static int insert_gtt_with_resource(struct i915_vma *vma)
1524 {
1525 	struct i915_address_space *vm = vma->vm;
1526 	struct i915_vma_resource *vma_res;
1527 	struct drm_i915_gem_object *obj = vma->obj;
1528 	int err;
1529 
1530 	vma_res = i915_vma_resource_alloc();
1531 	if (IS_ERR(vma_res))
1532 		return PTR_ERR(vma_res);
1533 
1534 	mutex_lock(&vm->mutex);
1535 	err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
1536 				  obj->cache_level, 0, vm->total, 0);
1537 	if (!err) {
1538 		i915_vma_resource_init_from_vma(vma_res, vma);
1539 		vma->resource = vma_res;
1540 	} else {
1541 		kfree(vma_res);
1542 	}
1543 	mutex_unlock(&vm->mutex);
1544 
1545 	return err;
1546 }
1547 
1548 static int igt_gtt_insert(void *arg)
1549 {
1550 	struct i915_ggtt *ggtt = arg;
1551 	struct drm_i915_gem_object *obj, *on;
1552 	struct drm_mm_node tmp = {};
1553 	const struct invalid_insert {
1554 		u64 size;
1555 		u64 alignment;
1556 		u64 start, end;
1557 	} invalid_insert[] = {
1558 		{
1559 			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1560 			0, ggtt->vm.total,
1561 		},
1562 		{
1563 			2*I915_GTT_PAGE_SIZE, 0,
1564 			0, I915_GTT_PAGE_SIZE,
1565 		},
1566 		{
1567 			-(u64)I915_GTT_PAGE_SIZE, 0,
1568 			0, 4*I915_GTT_PAGE_SIZE,
1569 		},
1570 		{
1571 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1572 			0, 4*I915_GTT_PAGE_SIZE,
1573 		},
1574 		{
1575 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1576 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1577 		},
1578 		{}
1579 	}, *ii;
1580 	LIST_HEAD(objects);
1581 	u64 total;
1582 	int err = -ENODEV;
1583 
1584 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1585 	 * to the node, evicting if required.
1586 	 */
1587 
1588 	/* Check a couple of obviously invalid requests */
1589 	for (ii = invalid_insert; ii->size; ii++) {
1590 		mutex_lock(&ggtt->vm.mutex);
1591 		err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp,
1592 					  ii->size, ii->alignment,
1593 					  I915_COLOR_UNEVICTABLE,
1594 					  ii->start, ii->end,
1595 					  0);
1596 		mutex_unlock(&ggtt->vm.mutex);
1597 		if (err != -ENOSPC) {
1598 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1599 			       ii->size, ii->alignment, ii->start, ii->end,
1600 			       err);
1601 			return -EINVAL;
1602 		}
1603 	}
1604 
1605 	/* Start by filling the GGTT */
1606 	for (total = 0;
1607 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1608 	     total += I915_GTT_PAGE_SIZE) {
1609 		struct i915_vma *vma;
1610 
1611 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1612 						      I915_GTT_PAGE_SIZE);
1613 		if (IS_ERR(obj)) {
1614 			err = PTR_ERR(obj);
1615 			goto out;
1616 		}
1617 
1618 		err = i915_gem_object_pin_pages_unlocked(obj);
1619 		if (err) {
1620 			i915_gem_object_put(obj);
1621 			goto out;
1622 		}
1623 
1624 		list_add(&obj->st_link, &objects);
1625 
1626 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1627 		if (IS_ERR(vma)) {
1628 			err = PTR_ERR(vma);
1629 			goto out;
1630 		}
1631 
1632 		err = insert_gtt_with_resource(vma);
1633 		if (err == -ENOSPC) {
1634 			/* maxed out the GGTT space */
1635 			i915_gem_object_put(obj);
1636 			break;
1637 		}
1638 		if (err) {
1639 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1640 			       total, ggtt->vm.total, err);
1641 			goto out;
1642 		}
1643 		track_vma_bind(vma);
1644 		__i915_vma_pin(vma);
1645 
1646 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1647 	}
1648 
1649 	list_for_each_entry(obj, &objects, st_link) {
1650 		struct i915_vma *vma;
1651 
1652 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1653 		if (IS_ERR(vma)) {
1654 			err = PTR_ERR(vma);
1655 			goto out;
1656 		}
1657 
1658 		if (!drm_mm_node_allocated(&vma->node)) {
1659 			pr_err("VMA was unexpectedly evicted!\n");
1660 			err = -EINVAL;
1661 			goto out;
1662 		}
1663 
1664 		__i915_vma_unpin(vma);
1665 	}
1666 
1667 	/* If we then reinsert, we should find the same hole */
1668 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1669 		struct i915_vma *vma;
1670 		u64 offset;
1671 
1672 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1673 		if (IS_ERR(vma)) {
1674 			err = PTR_ERR(vma);
1675 			goto out;
1676 		}
1677 
1678 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1679 		offset = vma->node.start;
1680 
1681 		err = i915_vma_unbind_unlocked(vma);
1682 		if (err) {
1683 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1684 			goto out;
1685 		}
1686 
1687 		err = insert_gtt_with_resource(vma);
1688 		if (err) {
1689 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1690 			       total, ggtt->vm.total, err);
1691 			goto out;
1692 		}
1693 		track_vma_bind(vma);
1694 
1695 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1696 		if (vma->node.start != offset) {
1697 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1698 			       offset, vma->node.start);
1699 			err = -EINVAL;
1700 			goto out;
1701 		}
1702 	}
1703 
1704 	/* And then force evictions */
1705 	for (total = 0;
1706 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1707 	     total += 2 * I915_GTT_PAGE_SIZE) {
1708 		struct i915_vma *vma;
1709 
1710 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1711 						      2 * I915_GTT_PAGE_SIZE);
1712 		if (IS_ERR(obj)) {
1713 			err = PTR_ERR(obj);
1714 			goto out;
1715 		}
1716 
1717 		err = i915_gem_object_pin_pages_unlocked(obj);
1718 		if (err) {
1719 			i915_gem_object_put(obj);
1720 			goto out;
1721 		}
1722 
1723 		list_add(&obj->st_link, &objects);
1724 
1725 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1726 		if (IS_ERR(vma)) {
1727 			err = PTR_ERR(vma);
1728 			goto out;
1729 		}
1730 
1731 		err = insert_gtt_with_resource(vma);
1732 		if (err) {
1733 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1734 			       total, ggtt->vm.total, err);
1735 			goto out;
1736 		}
1737 		track_vma_bind(vma);
1738 
1739 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1740 	}
1741 
1742 out:
1743 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1744 		i915_gem_object_unpin_pages(obj);
1745 		i915_gem_object_put(obj);
1746 	}
1747 	return err;
1748 }
1749 
1750 int i915_gem_gtt_mock_selftests(void)
1751 {
1752 	static const struct i915_subtest tests[] = {
1753 		SUBTEST(igt_mock_drunk),
1754 		SUBTEST(igt_mock_walk),
1755 		SUBTEST(igt_mock_pot),
1756 		SUBTEST(igt_mock_fill),
1757 		SUBTEST(igt_gtt_reserve),
1758 		SUBTEST(igt_gtt_insert),
1759 	};
1760 	struct drm_i915_private *i915;
1761 	struct intel_gt *gt;
1762 	int err;
1763 
1764 	i915 = mock_gem_device();
1765 	if (!i915)
1766 		return -ENOMEM;
1767 
1768 	/* allocate the ggtt */
1769 	err = intel_gt_assign_ggtt(to_gt(i915));
1770 	if (err)
1771 		goto out_put;
1772 
1773 	gt = to_gt(i915);
1774 
1775 	mock_init_ggtt(gt);
1776 
1777 	err = i915_subtests(tests, gt->ggtt);
1778 
1779 	mock_device_flush(i915);
1780 	i915_gem_drain_freed_objects(i915);
1781 	mock_fini_ggtt(gt->ggtt);
1782 
1783 out_put:
1784 	mock_destroy_device(i915);
1785 	return err;
1786 }
1787 
1788 static int context_sync(struct intel_context *ce)
1789 {
1790 	struct i915_request *rq;
1791 	long timeout;
1792 
1793 	rq = intel_context_create_request(ce);
1794 	if (IS_ERR(rq))
1795 		return PTR_ERR(rq);
1796 
1797 	i915_request_get(rq);
1798 	i915_request_add(rq);
1799 
1800 	timeout = i915_request_wait(rq, 0, HZ / 5);
1801 	i915_request_put(rq);
1802 
1803 	return timeout < 0 ? -EIO : 0;
1804 }
1805 
1806 static struct i915_request *
1807 submit_batch(struct intel_context *ce, u64 addr)
1808 {
1809 	struct i915_request *rq;
1810 	int err;
1811 
1812 	rq = intel_context_create_request(ce);
1813 	if (IS_ERR(rq))
1814 		return rq;
1815 
1816 	err = 0;
1817 	if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1818 		err = rq->engine->emit_init_breadcrumb(rq);
1819 	if (err == 0)
1820 		err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1821 
1822 	if (err == 0)
1823 		i915_request_get(rq);
1824 	i915_request_add(rq);
1825 
1826 	return err ? ERR_PTR(err) : rq;
1827 }
1828 
1829 static u32 *spinner(u32 *batch, int i)
1830 {
1831 	return batch + i * 64 / sizeof(*batch) + 4;
1832 }
1833 
1834 static void end_spin(u32 *batch, int i)
1835 {
1836 	*spinner(batch, i) = MI_BATCH_BUFFER_END;
1837 	wmb();
1838 }
1839 
1840 static int igt_cs_tlb(void *arg)
1841 {
1842 	const unsigned int count = PAGE_SIZE / 64;
1843 	const unsigned int chunk_size = count * PAGE_SIZE;
1844 	struct drm_i915_private *i915 = arg;
1845 	struct drm_i915_gem_object *bbe, *act, *out;
1846 	struct i915_gem_engines_iter it;
1847 	struct i915_address_space *vm;
1848 	struct i915_gem_context *ctx;
1849 	struct intel_context *ce;
1850 	struct i915_vma *vma;
1851 	I915_RND_STATE(prng);
1852 	struct file *file;
1853 	unsigned int i;
1854 	u32 *result;
1855 	u32 *batch;
1856 	int err = 0;
1857 
1858 	/*
1859 	 * Our mission here is to fool the hardware to execute something
1860 	 * from scratch as it has not seen the batch move (due to missing
1861 	 * the TLB invalidate).
1862 	 */
1863 
1864 	file = mock_file(i915);
1865 	if (IS_ERR(file))
1866 		return PTR_ERR(file);
1867 
1868 	ctx = live_context(i915, file);
1869 	if (IS_ERR(ctx)) {
1870 		err = PTR_ERR(ctx);
1871 		goto out_unlock;
1872 	}
1873 
1874 	vm = i915_gem_context_get_eb_vm(ctx);
1875 	if (i915_is_ggtt(vm))
1876 		goto out_vm;
1877 
1878 	/* Create two pages; dummy we prefill the TLB, and intended */
1879 	bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1880 	if (IS_ERR(bbe)) {
1881 		err = PTR_ERR(bbe);
1882 		goto out_vm;
1883 	}
1884 
1885 	batch = i915_gem_object_pin_map_unlocked(bbe, I915_MAP_WC);
1886 	if (IS_ERR(batch)) {
1887 		err = PTR_ERR(batch);
1888 		goto out_put_bbe;
1889 	}
1890 	memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1891 	i915_gem_object_flush_map(bbe);
1892 	i915_gem_object_unpin_map(bbe);
1893 
1894 	act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1895 	if (IS_ERR(act)) {
1896 		err = PTR_ERR(act);
1897 		goto out_put_bbe;
1898 	}
1899 
1900 	/* Track the execution of each request by writing into different slot */
1901 	batch = i915_gem_object_pin_map_unlocked(act, I915_MAP_WC);
1902 	if (IS_ERR(batch)) {
1903 		err = PTR_ERR(batch);
1904 		goto out_put_act;
1905 	}
1906 	for (i = 0; i < count; i++) {
1907 		u32 *cs = batch + i * 64 / sizeof(*cs);
1908 		u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1909 
1910 		GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
1911 		cs[0] = MI_STORE_DWORD_IMM_GEN4;
1912 		if (GRAPHICS_VER(i915) >= 8) {
1913 			cs[1] = lower_32_bits(addr);
1914 			cs[2] = upper_32_bits(addr);
1915 			cs[3] = i;
1916 			cs[4] = MI_NOOP;
1917 			cs[5] = MI_BATCH_BUFFER_START_GEN8;
1918 		} else {
1919 			cs[1] = 0;
1920 			cs[2] = lower_32_bits(addr);
1921 			cs[3] = i;
1922 			cs[4] = MI_NOOP;
1923 			cs[5] = MI_BATCH_BUFFER_START;
1924 		}
1925 	}
1926 
1927 	out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1928 	if (IS_ERR(out)) {
1929 		err = PTR_ERR(out);
1930 		goto out_put_batch;
1931 	}
1932 	i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1933 
1934 	vma = i915_vma_instance(out, vm, NULL);
1935 	if (IS_ERR(vma)) {
1936 		err = PTR_ERR(vma);
1937 		goto out_put_out;
1938 	}
1939 
1940 	err = i915_vma_pin(vma, 0, 0,
1941 			   PIN_USER |
1942 			   PIN_OFFSET_FIXED |
1943 			   (vm->total - PAGE_SIZE));
1944 	if (err)
1945 		goto out_put_out;
1946 	GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1947 
1948 	result = i915_gem_object_pin_map_unlocked(out, I915_MAP_WB);
1949 	if (IS_ERR(result)) {
1950 		err = PTR_ERR(result);
1951 		goto out_put_out;
1952 	}
1953 
1954 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1955 		IGT_TIMEOUT(end_time);
1956 		unsigned long pass = 0;
1957 
1958 		if (!intel_engine_can_store_dword(ce->engine))
1959 			continue;
1960 
1961 		while (!__igt_timeout(end_time, NULL)) {
1962 			struct i915_vm_pt_stash stash = {};
1963 			struct i915_request *rq;
1964 			struct i915_gem_ww_ctx ww;
1965 			struct i915_vma_resource *vma_res;
1966 			u64 offset;
1967 
1968 			offset = igt_random_offset(&prng,
1969 						   0, vm->total - PAGE_SIZE,
1970 						   chunk_size, PAGE_SIZE);
1971 
1972 			memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1973 
1974 			vma = i915_vma_instance(bbe, vm, NULL);
1975 			if (IS_ERR(vma)) {
1976 				err = PTR_ERR(vma);
1977 				goto end;
1978 			}
1979 
1980 			i915_gem_object_lock(bbe, NULL);
1981 			err = i915_vma_get_pages(vma);
1982 			i915_gem_object_unlock(bbe);
1983 			if (err)
1984 				goto end;
1985 
1986 			vma_res = i915_vma_resource_alloc();
1987 			if (IS_ERR(vma_res)) {
1988 				i915_vma_put_pages(vma);
1989 				err = PTR_ERR(vma_res);
1990 				goto end;
1991 			}
1992 
1993 			i915_gem_ww_ctx_init(&ww, false);
1994 retry:
1995 			err = i915_vm_lock_objects(vm, &ww);
1996 			if (err)
1997 				goto end_ww;
1998 
1999 			err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
2000 			if (err)
2001 				goto end_ww;
2002 
2003 			err = i915_vm_map_pt_stash(vm, &stash);
2004 			if (!err)
2005 				vm->allocate_va_range(vm, &stash, offset, chunk_size);
2006 			i915_vm_free_pt_stash(vm, &stash);
2007 end_ww:
2008 			if (err == -EDEADLK) {
2009 				err = i915_gem_ww_ctx_backoff(&ww);
2010 				if (!err)
2011 					goto retry;
2012 			}
2013 			i915_gem_ww_ctx_fini(&ww);
2014 			if (err) {
2015 				kfree(vma_res);
2016 				goto end;
2017 			}
2018 
2019 			i915_vma_resource_init_from_vma(vma_res, vma);
2020 			/* Prime the TLB with the dummy pages */
2021 			for (i = 0; i < count; i++) {
2022 				vma_res->start = offset + i * PAGE_SIZE;
2023 				vm->insert_entries(vm, vma_res, I915_CACHE_NONE,
2024 						   0);
2025 
2026 				rq = submit_batch(ce, vma_res->start);
2027 				if (IS_ERR(rq)) {
2028 					err = PTR_ERR(rq);
2029 					i915_vma_resource_fini(vma_res);
2030 					kfree(vma_res);
2031 					goto end;
2032 				}
2033 				i915_request_put(rq);
2034 			}
2035 			i915_vma_resource_fini(vma_res);
2036 			i915_vma_put_pages(vma);
2037 
2038 			err = context_sync(ce);
2039 			if (err) {
2040 				pr_err("%s: dummy setup timed out\n",
2041 				       ce->engine->name);
2042 				kfree(vma_res);
2043 				goto end;
2044 			}
2045 
2046 			vma = i915_vma_instance(act, vm, NULL);
2047 			if (IS_ERR(vma)) {
2048 				kfree(vma_res);
2049 				err = PTR_ERR(vma);
2050 				goto end;
2051 			}
2052 
2053 			i915_gem_object_lock(act, NULL);
2054 			err = i915_vma_get_pages(vma);
2055 			i915_gem_object_unlock(act);
2056 			if (err) {
2057 				kfree(vma_res);
2058 				goto end;
2059 			}
2060 
2061 			i915_vma_resource_init_from_vma(vma_res, vma);
2062 			/* Replace the TLB with target batches */
2063 			for (i = 0; i < count; i++) {
2064 				struct i915_request *rq;
2065 				u32 *cs = batch + i * 64 / sizeof(*cs);
2066 				u64 addr;
2067 
2068 				vma_res->start = offset + i * PAGE_SIZE;
2069 				vm->insert_entries(vm, vma_res, I915_CACHE_NONE, 0);
2070 
2071 				addr = vma_res->start + i * 64;
2072 				cs[4] = MI_NOOP;
2073 				cs[6] = lower_32_bits(addr);
2074 				cs[7] = upper_32_bits(addr);
2075 				wmb();
2076 
2077 				rq = submit_batch(ce, addr);
2078 				if (IS_ERR(rq)) {
2079 					err = PTR_ERR(rq);
2080 					i915_vma_resource_fini(vma_res);
2081 					kfree(vma_res);
2082 					goto end;
2083 				}
2084 
2085 				/* Wait until the context chain has started */
2086 				if (i == 0) {
2087 					while (READ_ONCE(result[i]) &&
2088 					       !i915_request_completed(rq))
2089 						cond_resched();
2090 				} else {
2091 					end_spin(batch, i - 1);
2092 				}
2093 
2094 				i915_request_put(rq);
2095 			}
2096 			end_spin(batch, count - 1);
2097 
2098 			i915_vma_resource_fini(vma_res);
2099 			kfree(vma_res);
2100 			i915_vma_put_pages(vma);
2101 
2102 			err = context_sync(ce);
2103 			if (err) {
2104 				pr_err("%s: writes timed out\n",
2105 				       ce->engine->name);
2106 				goto end;
2107 			}
2108 
2109 			for (i = 0; i < count; i++) {
2110 				if (result[i] != i) {
2111 					pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
2112 					       ce->engine->name, pass,
2113 					       offset, i, result[i], i);
2114 					err = -EINVAL;
2115 					goto end;
2116 				}
2117 			}
2118 
2119 			vm->clear_range(vm, offset, chunk_size);
2120 			pass++;
2121 		}
2122 	}
2123 end:
2124 	if (igt_flush_test(i915))
2125 		err = -EIO;
2126 	i915_gem_context_unlock_engines(ctx);
2127 	i915_gem_object_unpin_map(out);
2128 out_put_out:
2129 	i915_gem_object_put(out);
2130 out_put_batch:
2131 	i915_gem_object_unpin_map(act);
2132 out_put_act:
2133 	i915_gem_object_put(act);
2134 out_put_bbe:
2135 	i915_gem_object_put(bbe);
2136 out_vm:
2137 	i915_vm_put(vm);
2138 out_unlock:
2139 	fput(file);
2140 	return err;
2141 }
2142 
2143 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2144 {
2145 	static const struct i915_subtest tests[] = {
2146 		SUBTEST(igt_ppgtt_alloc),
2147 		SUBTEST(igt_ppgtt_lowlevel),
2148 		SUBTEST(igt_ppgtt_drunk),
2149 		SUBTEST(igt_ppgtt_walk),
2150 		SUBTEST(igt_ppgtt_pot),
2151 		SUBTEST(igt_ppgtt_fill),
2152 		SUBTEST(igt_ppgtt_shrink),
2153 		SUBTEST(igt_ppgtt_shrink_boom),
2154 		SUBTEST(igt_ggtt_lowlevel),
2155 		SUBTEST(igt_ggtt_drunk),
2156 		SUBTEST(igt_ggtt_walk),
2157 		SUBTEST(igt_ggtt_pot),
2158 		SUBTEST(igt_ggtt_fill),
2159 		SUBTEST(igt_ggtt_page),
2160 		SUBTEST(igt_cs_tlb),
2161 	};
2162 
2163 	GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
2164 
2165 	return i915_subtests(tests, i915);
2166 }
2167