1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "gem/i915_gem_context.h"
29 #include "gem/selftests/mock_context.h"
30 #include "gt/intel_context.h"
31 #include "gt/intel_gpu_commands.h"
32 
33 #include "i915_random.h"
34 #include "i915_selftest.h"
35 
36 #include "mock_drm.h"
37 #include "mock_gem_device.h"
38 #include "mock_gtt.h"
39 #include "igt_flush_test.h"
40 
41 static void cleanup_freed_objects(struct drm_i915_private *i915)
42 {
43 	i915_gem_drain_freed_objects(i915);
44 }
45 
46 static void fake_free_pages(struct drm_i915_gem_object *obj,
47 			    struct sg_table *pages)
48 {
49 	sg_free_table(pages);
50 	kfree(pages);
51 }
52 
53 static int fake_get_pages(struct drm_i915_gem_object *obj)
54 {
55 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
56 #define PFN_BIAS 0x1000
57 	struct sg_table *pages;
58 	struct scatterlist *sg;
59 	unsigned int sg_page_sizes;
60 	typeof(obj->base.size) rem;
61 
62 	pages = kmalloc(sizeof(*pages), GFP);
63 	if (!pages)
64 		return -ENOMEM;
65 
66 	rem = round_up(obj->base.size, BIT(31)) >> 31;
67 	if (sg_alloc_table(pages, rem, GFP)) {
68 		kfree(pages);
69 		return -ENOMEM;
70 	}
71 
72 	sg_page_sizes = 0;
73 	rem = obj->base.size;
74 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
75 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
76 
77 		GEM_BUG_ON(!len);
78 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
79 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
80 		sg_dma_len(sg) = len;
81 		sg_page_sizes |= len;
82 
83 		rem -= len;
84 	}
85 	GEM_BUG_ON(rem);
86 
87 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
88 
89 	return 0;
90 #undef GFP
91 }
92 
93 static void fake_put_pages(struct drm_i915_gem_object *obj,
94 			   struct sg_table *pages)
95 {
96 	fake_free_pages(obj, pages);
97 	obj->mm.dirty = false;
98 }
99 
100 static const struct drm_i915_gem_object_ops fake_ops = {
101 	.name = "fake-gem",
102 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
103 	.get_pages = fake_get_pages,
104 	.put_pages = fake_put_pages,
105 };
106 
107 static struct drm_i915_gem_object *
108 fake_dma_object(struct drm_i915_private *i915, u64 size)
109 {
110 	static struct lock_class_key lock_class;
111 	struct drm_i915_gem_object *obj;
112 
113 	GEM_BUG_ON(!size);
114 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
115 
116 	if (overflows_type(size, obj->base.size))
117 		return ERR_PTR(-E2BIG);
118 
119 	obj = i915_gem_object_alloc();
120 	if (!obj)
121 		goto err;
122 
123 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
124 	i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
125 
126 	i915_gem_object_set_volatile(obj);
127 
128 	obj->write_domain = I915_GEM_DOMAIN_CPU;
129 	obj->read_domains = I915_GEM_DOMAIN_CPU;
130 	obj->cache_level = I915_CACHE_NONE;
131 
132 	/* Preallocate the "backing storage" */
133 	if (i915_gem_object_pin_pages_unlocked(obj))
134 		goto err_obj;
135 
136 	i915_gem_object_unpin_pages(obj);
137 	return obj;
138 
139 err_obj:
140 	i915_gem_object_put(obj);
141 err:
142 	return ERR_PTR(-ENOMEM);
143 }
144 
145 static int igt_ppgtt_alloc(void *arg)
146 {
147 	struct drm_i915_private *dev_priv = arg;
148 	struct i915_ppgtt *ppgtt;
149 	struct i915_gem_ww_ctx ww;
150 	u64 size, last, limit;
151 	int err = 0;
152 
153 	/* Allocate a ppggt and try to fill the entire range */
154 
155 	if (!HAS_PPGTT(dev_priv))
156 		return 0;
157 
158 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
159 	if (IS_ERR(ppgtt))
160 		return PTR_ERR(ppgtt);
161 
162 	if (!ppgtt->vm.allocate_va_range)
163 		goto err_ppgtt_cleanup;
164 
165 	/*
166 	 * While we only allocate the page tables here and so we could
167 	 * address a much larger GTT than we could actually fit into
168 	 * RAM, a practical limit is the amount of physical pages in the system.
169 	 * This should ensure that we do not run into the oomkiller during
170 	 * the test and take down the machine wilfully.
171 	 */
172 	limit = totalram_pages() << PAGE_SHIFT;
173 	limit = min(ppgtt->vm.total, limit);
174 
175 	i915_gem_ww_ctx_init(&ww, false);
176 retry:
177 	err = i915_vm_lock_objects(&ppgtt->vm, &ww);
178 	if (err)
179 		goto err_ppgtt_cleanup;
180 
181 	/* Check we can allocate the entire range */
182 	for (size = 4096; size <= limit; size <<= 2) {
183 		struct i915_vm_pt_stash stash = {};
184 
185 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
186 		if (err)
187 			goto err_ppgtt_cleanup;
188 
189 		err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
190 		if (err) {
191 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
192 			goto err_ppgtt_cleanup;
193 		}
194 
195 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
196 		cond_resched();
197 
198 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
199 
200 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
201 	}
202 
203 	/* Check we can incrementally allocate the entire range */
204 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
205 		struct i915_vm_pt_stash stash = {};
206 
207 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
208 		if (err)
209 			goto err_ppgtt_cleanup;
210 
211 		err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
212 		if (err) {
213 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
214 			goto err_ppgtt_cleanup;
215 		}
216 
217 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
218 					    last, size - last);
219 		cond_resched();
220 
221 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
222 	}
223 
224 err_ppgtt_cleanup:
225 	if (err == -EDEADLK) {
226 		err = i915_gem_ww_ctx_backoff(&ww);
227 		if (!err)
228 			goto retry;
229 	}
230 	i915_gem_ww_ctx_fini(&ww);
231 
232 	i915_vm_put(&ppgtt->vm);
233 	return err;
234 }
235 
236 static int lowlevel_hole(struct i915_address_space *vm,
237 			 u64 hole_start, u64 hole_end,
238 			 unsigned long end_time)
239 {
240 	I915_RND_STATE(seed_prng);
241 	struct i915_vma *mock_vma;
242 	unsigned int size;
243 
244 	mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
245 	if (!mock_vma)
246 		return -ENOMEM;
247 
248 	/* Keep creating larger objects until one cannot fit into the hole */
249 	for (size = 12; (hole_end - hole_start) >> size; size++) {
250 		I915_RND_SUBSTATE(prng, seed_prng);
251 		struct drm_i915_gem_object *obj;
252 		unsigned int *order, count, n;
253 		u64 hole_size;
254 
255 		hole_size = (hole_end - hole_start) >> size;
256 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
257 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
258 		count = hole_size >> 1;
259 		if (!count) {
260 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
261 				 __func__, hole_start, hole_end, size, hole_size);
262 			break;
263 		}
264 
265 		do {
266 			order = i915_random_order(count, &prng);
267 			if (order)
268 				break;
269 		} while (count >>= 1);
270 		if (!count) {
271 			kfree(mock_vma);
272 			return -ENOMEM;
273 		}
274 		GEM_BUG_ON(!order);
275 
276 		GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
277 		GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
278 
279 		/* Ignore allocation failures (i.e. don't report them as
280 		 * a test failure) as we are purposefully allocating very
281 		 * large objects without checking that we have sufficient
282 		 * memory. We expect to hit -ENOMEM.
283 		 */
284 
285 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
286 		if (IS_ERR(obj)) {
287 			kfree(order);
288 			break;
289 		}
290 
291 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
292 
293 		if (i915_gem_object_pin_pages_unlocked(obj)) {
294 			i915_gem_object_put(obj);
295 			kfree(order);
296 			break;
297 		}
298 
299 		for (n = 0; n < count; n++) {
300 			u64 addr = hole_start + order[n] * BIT_ULL(size);
301 			intel_wakeref_t wakeref;
302 
303 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
304 
305 			if (igt_timeout(end_time,
306 					"%s timed out before %d/%d\n",
307 					__func__, n, count)) {
308 				hole_end = hole_start; /* quit */
309 				break;
310 			}
311 
312 			if (vm->allocate_va_range) {
313 				struct i915_vm_pt_stash stash = {};
314 				struct i915_gem_ww_ctx ww;
315 				int err;
316 
317 				i915_gem_ww_ctx_init(&ww, false);
318 retry:
319 				err = i915_vm_lock_objects(vm, &ww);
320 				if (err)
321 					goto alloc_vm_end;
322 
323 				err = -ENOMEM;
324 				if (i915_vm_alloc_pt_stash(vm, &stash,
325 							   BIT_ULL(size)))
326 					goto alloc_vm_end;
327 
328 				err = i915_vm_pin_pt_stash(vm, &stash);
329 				if (!err)
330 					vm->allocate_va_range(vm, &stash,
331 							      addr, BIT_ULL(size));
332 
333 				i915_vm_free_pt_stash(vm, &stash);
334 alloc_vm_end:
335 				if (err == -EDEADLK) {
336 					err = i915_gem_ww_ctx_backoff(&ww);
337 					if (!err)
338 						goto retry;
339 				}
340 				i915_gem_ww_ctx_fini(&ww);
341 
342 				if (err)
343 					break;
344 			}
345 
346 			mock_vma->pages = obj->mm.pages;
347 			mock_vma->node.size = BIT_ULL(size);
348 			mock_vma->node.start = addr;
349 
350 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
351 				vm->insert_entries(vm, mock_vma,
352 						   I915_CACHE_NONE, 0);
353 		}
354 		count = n;
355 
356 		i915_random_reorder(order, count, &prng);
357 		for (n = 0; n < count; n++) {
358 			u64 addr = hole_start + order[n] * BIT_ULL(size);
359 			intel_wakeref_t wakeref;
360 
361 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
362 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
363 				vm->clear_range(vm, addr, BIT_ULL(size));
364 		}
365 
366 		i915_gem_object_unpin_pages(obj);
367 		i915_gem_object_put(obj);
368 
369 		kfree(order);
370 
371 		cleanup_freed_objects(vm->i915);
372 	}
373 
374 	kfree(mock_vma);
375 	return 0;
376 }
377 
378 static void close_object_list(struct list_head *objects,
379 			      struct i915_address_space *vm)
380 {
381 	struct drm_i915_gem_object *obj, *on;
382 	int ignored;
383 
384 	list_for_each_entry_safe(obj, on, objects, st_link) {
385 		struct i915_vma *vma;
386 
387 		vma = i915_vma_instance(obj, vm, NULL);
388 		if (!IS_ERR(vma))
389 			ignored = i915_vma_unbind(vma);
390 
391 		list_del(&obj->st_link);
392 		i915_gem_object_put(obj);
393 	}
394 }
395 
396 static int fill_hole(struct i915_address_space *vm,
397 		     u64 hole_start, u64 hole_end,
398 		     unsigned long end_time)
399 {
400 	const u64 hole_size = hole_end - hole_start;
401 	struct drm_i915_gem_object *obj;
402 	const unsigned long max_pages =
403 		min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
404 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
405 	unsigned long npages, prime, flags;
406 	struct i915_vma *vma;
407 	LIST_HEAD(objects);
408 	int err;
409 
410 	/* Try binding many VMA working inwards from either edge */
411 
412 	flags = PIN_OFFSET_FIXED | PIN_USER;
413 	if (i915_is_ggtt(vm))
414 		flags |= PIN_GLOBAL;
415 
416 	for_each_prime_number_from(prime, 2, max_step) {
417 		for (npages = 1; npages <= max_pages; npages *= prime) {
418 			const u64 full_size = npages << PAGE_SHIFT;
419 			const struct {
420 				const char *name;
421 				u64 offset;
422 				int step;
423 			} phases[] = {
424 				{ "top-down", hole_end, -1, },
425 				{ "bottom-up", hole_start, 1, },
426 				{ }
427 			}, *p;
428 
429 			obj = fake_dma_object(vm->i915, full_size);
430 			if (IS_ERR(obj))
431 				break;
432 
433 			list_add(&obj->st_link, &objects);
434 
435 			/* Align differing sized objects against the edges, and
436 			 * check we don't walk off into the void when binding
437 			 * them into the GTT.
438 			 */
439 			for (p = phases; p->name; p++) {
440 				u64 offset;
441 
442 				offset = p->offset;
443 				list_for_each_entry(obj, &objects, st_link) {
444 					vma = i915_vma_instance(obj, vm, NULL);
445 					if (IS_ERR(vma))
446 						continue;
447 
448 					if (p->step < 0) {
449 						if (offset < hole_start + obj->base.size)
450 							break;
451 						offset -= obj->base.size;
452 					}
453 
454 					err = i915_vma_pin(vma, 0, 0, offset | flags);
455 					if (err) {
456 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
457 						       __func__, p->name, err, npages, prime, offset);
458 						goto err;
459 					}
460 
461 					if (!drm_mm_node_allocated(&vma->node) ||
462 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
463 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
464 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
465 						       offset);
466 						err = -EINVAL;
467 						goto err;
468 					}
469 
470 					i915_vma_unpin(vma);
471 
472 					if (p->step > 0) {
473 						if (offset + obj->base.size > hole_end)
474 							break;
475 						offset += obj->base.size;
476 					}
477 				}
478 
479 				offset = p->offset;
480 				list_for_each_entry(obj, &objects, st_link) {
481 					vma = i915_vma_instance(obj, vm, NULL);
482 					if (IS_ERR(vma))
483 						continue;
484 
485 					if (p->step < 0) {
486 						if (offset < hole_start + obj->base.size)
487 							break;
488 						offset -= obj->base.size;
489 					}
490 
491 					if (!drm_mm_node_allocated(&vma->node) ||
492 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
493 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
494 						       __func__, p->name, vma->node.start, vma->node.size,
495 						       offset);
496 						err = -EINVAL;
497 						goto err;
498 					}
499 
500 					err = i915_vma_unbind(vma);
501 					if (err) {
502 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
503 						       __func__, p->name, vma->node.start, vma->node.size,
504 						       err);
505 						goto err;
506 					}
507 
508 					if (p->step > 0) {
509 						if (offset + obj->base.size > hole_end)
510 							break;
511 						offset += obj->base.size;
512 					}
513 				}
514 
515 				offset = p->offset;
516 				list_for_each_entry_reverse(obj, &objects, st_link) {
517 					vma = i915_vma_instance(obj, vm, NULL);
518 					if (IS_ERR(vma))
519 						continue;
520 
521 					if (p->step < 0) {
522 						if (offset < hole_start + obj->base.size)
523 							break;
524 						offset -= obj->base.size;
525 					}
526 
527 					err = i915_vma_pin(vma, 0, 0, offset | flags);
528 					if (err) {
529 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
530 						       __func__, p->name, err, npages, prime, offset);
531 						goto err;
532 					}
533 
534 					if (!drm_mm_node_allocated(&vma->node) ||
535 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
536 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
537 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
538 						       offset);
539 						err = -EINVAL;
540 						goto err;
541 					}
542 
543 					i915_vma_unpin(vma);
544 
545 					if (p->step > 0) {
546 						if (offset + obj->base.size > hole_end)
547 							break;
548 						offset += obj->base.size;
549 					}
550 				}
551 
552 				offset = p->offset;
553 				list_for_each_entry_reverse(obj, &objects, st_link) {
554 					vma = i915_vma_instance(obj, vm, NULL);
555 					if (IS_ERR(vma))
556 						continue;
557 
558 					if (p->step < 0) {
559 						if (offset < hole_start + obj->base.size)
560 							break;
561 						offset -= obj->base.size;
562 					}
563 
564 					if (!drm_mm_node_allocated(&vma->node) ||
565 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
566 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
567 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
568 						       offset);
569 						err = -EINVAL;
570 						goto err;
571 					}
572 
573 					err = i915_vma_unbind(vma);
574 					if (err) {
575 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
576 						       __func__, p->name, vma->node.start, vma->node.size,
577 						       err);
578 						goto err;
579 					}
580 
581 					if (p->step > 0) {
582 						if (offset + obj->base.size > hole_end)
583 							break;
584 						offset += obj->base.size;
585 					}
586 				}
587 			}
588 
589 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
590 					__func__, npages, prime)) {
591 				err = -EINTR;
592 				goto err;
593 			}
594 		}
595 
596 		close_object_list(&objects, vm);
597 		cleanup_freed_objects(vm->i915);
598 	}
599 
600 	return 0;
601 
602 err:
603 	close_object_list(&objects, vm);
604 	return err;
605 }
606 
607 static int walk_hole(struct i915_address_space *vm,
608 		     u64 hole_start, u64 hole_end,
609 		     unsigned long end_time)
610 {
611 	const u64 hole_size = hole_end - hole_start;
612 	const unsigned long max_pages =
613 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
614 	unsigned long flags;
615 	u64 size;
616 
617 	/* Try binding a single VMA in different positions within the hole */
618 
619 	flags = PIN_OFFSET_FIXED | PIN_USER;
620 	if (i915_is_ggtt(vm))
621 		flags |= PIN_GLOBAL;
622 
623 	for_each_prime_number_from(size, 1, max_pages) {
624 		struct drm_i915_gem_object *obj;
625 		struct i915_vma *vma;
626 		u64 addr;
627 		int err = 0;
628 
629 		obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
630 		if (IS_ERR(obj))
631 			break;
632 
633 		vma = i915_vma_instance(obj, vm, NULL);
634 		if (IS_ERR(vma)) {
635 			err = PTR_ERR(vma);
636 			goto err_put;
637 		}
638 
639 		for (addr = hole_start;
640 		     addr + obj->base.size < hole_end;
641 		     addr += obj->base.size) {
642 			err = i915_vma_pin(vma, 0, 0, addr | flags);
643 			if (err) {
644 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
645 				       __func__, addr, vma->size,
646 				       hole_start, hole_end, err);
647 				goto err_put;
648 			}
649 			i915_vma_unpin(vma);
650 
651 			if (!drm_mm_node_allocated(&vma->node) ||
652 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
653 				pr_err("%s incorrect at %llx + %llx\n",
654 				       __func__, addr, vma->size);
655 				err = -EINVAL;
656 				goto err_put;
657 			}
658 
659 			err = i915_vma_unbind(vma);
660 			if (err) {
661 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
662 				       __func__, addr, vma->size, err);
663 				goto err_put;
664 			}
665 
666 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
667 
668 			if (igt_timeout(end_time,
669 					"%s timed out at %llx\n",
670 					__func__, addr)) {
671 				err = -EINTR;
672 				goto err_put;
673 			}
674 		}
675 
676 err_put:
677 		i915_gem_object_put(obj);
678 		if (err)
679 			return err;
680 
681 		cleanup_freed_objects(vm->i915);
682 	}
683 
684 	return 0;
685 }
686 
687 static int pot_hole(struct i915_address_space *vm,
688 		    u64 hole_start, u64 hole_end,
689 		    unsigned long end_time)
690 {
691 	struct drm_i915_gem_object *obj;
692 	struct i915_vma *vma;
693 	unsigned long flags;
694 	unsigned int pot;
695 	int err = 0;
696 
697 	flags = PIN_OFFSET_FIXED | PIN_USER;
698 	if (i915_is_ggtt(vm))
699 		flags |= PIN_GLOBAL;
700 
701 	obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
702 	if (IS_ERR(obj))
703 		return PTR_ERR(obj);
704 
705 	vma = i915_vma_instance(obj, vm, NULL);
706 	if (IS_ERR(vma)) {
707 		err = PTR_ERR(vma);
708 		goto err_obj;
709 	}
710 
711 	/* Insert a pair of pages across every pot boundary within the hole */
712 	for (pot = fls64(hole_end - 1) - 1;
713 	     pot > ilog2(2 * I915_GTT_PAGE_SIZE);
714 	     pot--) {
715 		u64 step = BIT_ULL(pot);
716 		u64 addr;
717 
718 		for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
719 		     addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
720 		     addr += step) {
721 			err = i915_vma_pin(vma, 0, 0, addr | flags);
722 			if (err) {
723 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
724 				       __func__,
725 				       addr,
726 				       hole_start, hole_end,
727 				       err);
728 				goto err_obj;
729 			}
730 
731 			if (!drm_mm_node_allocated(&vma->node) ||
732 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
733 				pr_err("%s incorrect at %llx + %llx\n",
734 				       __func__, addr, vma->size);
735 				i915_vma_unpin(vma);
736 				err = i915_vma_unbind(vma);
737 				err = -EINVAL;
738 				goto err_obj;
739 			}
740 
741 			i915_vma_unpin(vma);
742 			err = i915_vma_unbind(vma);
743 			GEM_BUG_ON(err);
744 		}
745 
746 		if (igt_timeout(end_time,
747 				"%s timed out after %d/%d\n",
748 				__func__, pot, fls64(hole_end - 1) - 1)) {
749 			err = -EINTR;
750 			goto err_obj;
751 		}
752 	}
753 
754 err_obj:
755 	i915_gem_object_put(obj);
756 	return err;
757 }
758 
759 static int drunk_hole(struct i915_address_space *vm,
760 		      u64 hole_start, u64 hole_end,
761 		      unsigned long end_time)
762 {
763 	I915_RND_STATE(prng);
764 	unsigned int size;
765 	unsigned long flags;
766 
767 	flags = PIN_OFFSET_FIXED | PIN_USER;
768 	if (i915_is_ggtt(vm))
769 		flags |= PIN_GLOBAL;
770 
771 	/* Keep creating larger objects until one cannot fit into the hole */
772 	for (size = 12; (hole_end - hole_start) >> size; size++) {
773 		struct drm_i915_gem_object *obj;
774 		unsigned int *order, count, n;
775 		struct i915_vma *vma;
776 		u64 hole_size;
777 		int err = -ENODEV;
778 
779 		hole_size = (hole_end - hole_start) >> size;
780 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
781 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
782 		count = hole_size >> 1;
783 		if (!count) {
784 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
785 				 __func__, hole_start, hole_end, size, hole_size);
786 			break;
787 		}
788 
789 		do {
790 			order = i915_random_order(count, &prng);
791 			if (order)
792 				break;
793 		} while (count >>= 1);
794 		if (!count)
795 			return -ENOMEM;
796 		GEM_BUG_ON(!order);
797 
798 		/* Ignore allocation failures (i.e. don't report them as
799 		 * a test failure) as we are purposefully allocating very
800 		 * large objects without checking that we have sufficient
801 		 * memory. We expect to hit -ENOMEM.
802 		 */
803 
804 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
805 		if (IS_ERR(obj)) {
806 			kfree(order);
807 			break;
808 		}
809 
810 		vma = i915_vma_instance(obj, vm, NULL);
811 		if (IS_ERR(vma)) {
812 			err = PTR_ERR(vma);
813 			goto err_obj;
814 		}
815 
816 		GEM_BUG_ON(vma->size != BIT_ULL(size));
817 
818 		for (n = 0; n < count; n++) {
819 			u64 addr = hole_start + order[n] * BIT_ULL(size);
820 
821 			err = i915_vma_pin(vma, 0, 0, addr | flags);
822 			if (err) {
823 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
824 				       __func__,
825 				       addr, BIT_ULL(size),
826 				       hole_start, hole_end,
827 				       err);
828 				goto err_obj;
829 			}
830 
831 			if (!drm_mm_node_allocated(&vma->node) ||
832 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
833 				pr_err("%s incorrect at %llx + %llx\n",
834 				       __func__, addr, BIT_ULL(size));
835 				i915_vma_unpin(vma);
836 				err = i915_vma_unbind(vma);
837 				err = -EINVAL;
838 				goto err_obj;
839 			}
840 
841 			i915_vma_unpin(vma);
842 			err = i915_vma_unbind(vma);
843 			GEM_BUG_ON(err);
844 
845 			if (igt_timeout(end_time,
846 					"%s timed out after %d/%d\n",
847 					__func__, n, count)) {
848 				err = -EINTR;
849 				goto err_obj;
850 			}
851 		}
852 
853 err_obj:
854 		i915_gem_object_put(obj);
855 		kfree(order);
856 		if (err)
857 			return err;
858 
859 		cleanup_freed_objects(vm->i915);
860 	}
861 
862 	return 0;
863 }
864 
865 static int __shrink_hole(struct i915_address_space *vm,
866 			 u64 hole_start, u64 hole_end,
867 			 unsigned long end_time)
868 {
869 	struct drm_i915_gem_object *obj;
870 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
871 	unsigned int order = 12;
872 	LIST_HEAD(objects);
873 	int err = 0;
874 	u64 addr;
875 
876 	/* Keep creating larger objects until one cannot fit into the hole */
877 	for (addr = hole_start; addr < hole_end; ) {
878 		struct i915_vma *vma;
879 		u64 size = BIT_ULL(order++);
880 
881 		size = min(size, hole_end - addr);
882 		obj = fake_dma_object(vm->i915, size);
883 		if (IS_ERR(obj)) {
884 			err = PTR_ERR(obj);
885 			break;
886 		}
887 
888 		list_add(&obj->st_link, &objects);
889 
890 		vma = i915_vma_instance(obj, vm, NULL);
891 		if (IS_ERR(vma)) {
892 			err = PTR_ERR(vma);
893 			break;
894 		}
895 
896 		GEM_BUG_ON(vma->size != size);
897 
898 		err = i915_vma_pin(vma, 0, 0, addr | flags);
899 		if (err) {
900 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
901 			       __func__, addr, size, hole_start, hole_end, err);
902 			break;
903 		}
904 
905 		if (!drm_mm_node_allocated(&vma->node) ||
906 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
907 			pr_err("%s incorrect at %llx + %llx\n",
908 			       __func__, addr, size);
909 			i915_vma_unpin(vma);
910 			err = i915_vma_unbind(vma);
911 			err = -EINVAL;
912 			break;
913 		}
914 
915 		i915_vma_unpin(vma);
916 		addr += size;
917 
918 		/*
919 		 * Since we are injecting allocation faults at random intervals,
920 		 * wait for this allocation to complete before we change the
921 		 * faultinjection.
922 		 */
923 		err = i915_vma_sync(vma);
924 		if (err)
925 			break;
926 
927 		if (igt_timeout(end_time,
928 				"%s timed out at ofset %llx [%llx - %llx]\n",
929 				__func__, addr, hole_start, hole_end)) {
930 			err = -EINTR;
931 			break;
932 		}
933 	}
934 
935 	close_object_list(&objects, vm);
936 	cleanup_freed_objects(vm->i915);
937 	return err;
938 }
939 
940 static int shrink_hole(struct i915_address_space *vm,
941 		       u64 hole_start, u64 hole_end,
942 		       unsigned long end_time)
943 {
944 	unsigned long prime;
945 	int err;
946 
947 	vm->fault_attr.probability = 999;
948 	atomic_set(&vm->fault_attr.times, -1);
949 
950 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
951 		vm->fault_attr.interval = prime;
952 		err = __shrink_hole(vm, hole_start, hole_end, end_time);
953 		if (err)
954 			break;
955 	}
956 
957 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
958 
959 	return err;
960 }
961 
962 static int shrink_boom(struct i915_address_space *vm,
963 		       u64 hole_start, u64 hole_end,
964 		       unsigned long end_time)
965 {
966 	unsigned int sizes[] = { SZ_2M, SZ_1G };
967 	struct drm_i915_gem_object *purge;
968 	struct drm_i915_gem_object *explode;
969 	int err;
970 	int i;
971 
972 	/*
973 	 * Catch the case which shrink_hole seems to miss. The setup here
974 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
975 	 * ensuring that all vma assiocated with the respective pd/pdp are
976 	 * unpinned at the time.
977 	 */
978 
979 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
980 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
981 		unsigned int size = sizes[i];
982 		struct i915_vma *vma;
983 
984 		purge = fake_dma_object(vm->i915, size);
985 		if (IS_ERR(purge))
986 			return PTR_ERR(purge);
987 
988 		vma = i915_vma_instance(purge, vm, NULL);
989 		if (IS_ERR(vma)) {
990 			err = PTR_ERR(vma);
991 			goto err_purge;
992 		}
993 
994 		err = i915_vma_pin(vma, 0, 0, flags);
995 		if (err)
996 			goto err_purge;
997 
998 		/* Should now be ripe for purging */
999 		i915_vma_unpin(vma);
1000 
1001 		explode = fake_dma_object(vm->i915, size);
1002 		if (IS_ERR(explode)) {
1003 			err = PTR_ERR(explode);
1004 			goto err_purge;
1005 		}
1006 
1007 		vm->fault_attr.probability = 100;
1008 		vm->fault_attr.interval = 1;
1009 		atomic_set(&vm->fault_attr.times, -1);
1010 
1011 		vma = i915_vma_instance(explode, vm, NULL);
1012 		if (IS_ERR(vma)) {
1013 			err = PTR_ERR(vma);
1014 			goto err_explode;
1015 		}
1016 
1017 		err = i915_vma_pin(vma, 0, 0, flags | size);
1018 		if (err)
1019 			goto err_explode;
1020 
1021 		i915_vma_unpin(vma);
1022 
1023 		i915_gem_object_put(purge);
1024 		i915_gem_object_put(explode);
1025 
1026 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1027 		cleanup_freed_objects(vm->i915);
1028 	}
1029 
1030 	return 0;
1031 
1032 err_explode:
1033 	i915_gem_object_put(explode);
1034 err_purge:
1035 	i915_gem_object_put(purge);
1036 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1037 	return err;
1038 }
1039 
1040 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1041 			  int (*func)(struct i915_address_space *vm,
1042 				      u64 hole_start, u64 hole_end,
1043 				      unsigned long end_time))
1044 {
1045 	struct i915_ppgtt *ppgtt;
1046 	IGT_TIMEOUT(end_time);
1047 	struct file *file;
1048 	int err;
1049 
1050 	if (!HAS_FULL_PPGTT(dev_priv))
1051 		return 0;
1052 
1053 	file = mock_file(dev_priv);
1054 	if (IS_ERR(file))
1055 		return PTR_ERR(file);
1056 
1057 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
1058 	if (IS_ERR(ppgtt)) {
1059 		err = PTR_ERR(ppgtt);
1060 		goto out_free;
1061 	}
1062 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1063 	GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1064 
1065 	err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1066 
1067 	i915_vm_put(&ppgtt->vm);
1068 
1069 out_free:
1070 	fput(file);
1071 	return err;
1072 }
1073 
1074 static int igt_ppgtt_fill(void *arg)
1075 {
1076 	return exercise_ppgtt(arg, fill_hole);
1077 }
1078 
1079 static int igt_ppgtt_walk(void *arg)
1080 {
1081 	return exercise_ppgtt(arg, walk_hole);
1082 }
1083 
1084 static int igt_ppgtt_pot(void *arg)
1085 {
1086 	return exercise_ppgtt(arg, pot_hole);
1087 }
1088 
1089 static int igt_ppgtt_drunk(void *arg)
1090 {
1091 	return exercise_ppgtt(arg, drunk_hole);
1092 }
1093 
1094 static int igt_ppgtt_lowlevel(void *arg)
1095 {
1096 	return exercise_ppgtt(arg, lowlevel_hole);
1097 }
1098 
1099 static int igt_ppgtt_shrink(void *arg)
1100 {
1101 	return exercise_ppgtt(arg, shrink_hole);
1102 }
1103 
1104 static int igt_ppgtt_shrink_boom(void *arg)
1105 {
1106 	return exercise_ppgtt(arg, shrink_boom);
1107 }
1108 
1109 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1110 {
1111 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1112 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1113 
1114 	if (a->start < b->start)
1115 		return -1;
1116 	else
1117 		return 1;
1118 }
1119 
1120 static int exercise_ggtt(struct drm_i915_private *i915,
1121 			 int (*func)(struct i915_address_space *vm,
1122 				     u64 hole_start, u64 hole_end,
1123 				     unsigned long end_time))
1124 {
1125 	struct i915_ggtt *ggtt = &i915->ggtt;
1126 	u64 hole_start, hole_end, last = 0;
1127 	struct drm_mm_node *node;
1128 	IGT_TIMEOUT(end_time);
1129 	int err = 0;
1130 
1131 restart:
1132 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1133 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1134 		if (hole_start < last)
1135 			continue;
1136 
1137 		if (ggtt->vm.mm.color_adjust)
1138 			ggtt->vm.mm.color_adjust(node, 0,
1139 						 &hole_start, &hole_end);
1140 		if (hole_start >= hole_end)
1141 			continue;
1142 
1143 		err = func(&ggtt->vm, hole_start, hole_end, end_time);
1144 		if (err)
1145 			break;
1146 
1147 		/* As we have manipulated the drm_mm, the list may be corrupt */
1148 		last = hole_end;
1149 		goto restart;
1150 	}
1151 
1152 	return err;
1153 }
1154 
1155 static int igt_ggtt_fill(void *arg)
1156 {
1157 	return exercise_ggtt(arg, fill_hole);
1158 }
1159 
1160 static int igt_ggtt_walk(void *arg)
1161 {
1162 	return exercise_ggtt(arg, walk_hole);
1163 }
1164 
1165 static int igt_ggtt_pot(void *arg)
1166 {
1167 	return exercise_ggtt(arg, pot_hole);
1168 }
1169 
1170 static int igt_ggtt_drunk(void *arg)
1171 {
1172 	return exercise_ggtt(arg, drunk_hole);
1173 }
1174 
1175 static int igt_ggtt_lowlevel(void *arg)
1176 {
1177 	return exercise_ggtt(arg, lowlevel_hole);
1178 }
1179 
1180 static int igt_ggtt_page(void *arg)
1181 {
1182 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1183 	I915_RND_STATE(prng);
1184 	struct drm_i915_private *i915 = arg;
1185 	struct i915_ggtt *ggtt = &i915->ggtt;
1186 	struct drm_i915_gem_object *obj;
1187 	intel_wakeref_t wakeref;
1188 	struct drm_mm_node tmp;
1189 	unsigned int *order, n;
1190 	int err;
1191 
1192 	if (!i915_ggtt_has_aperture(ggtt))
1193 		return 0;
1194 
1195 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1196 	if (IS_ERR(obj))
1197 		return PTR_ERR(obj);
1198 
1199 	err = i915_gem_object_pin_pages_unlocked(obj);
1200 	if (err)
1201 		goto out_free;
1202 
1203 	memset(&tmp, 0, sizeof(tmp));
1204 	mutex_lock(&ggtt->vm.mutex);
1205 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1206 					  count * PAGE_SIZE, 0,
1207 					  I915_COLOR_UNEVICTABLE,
1208 					  0, ggtt->mappable_end,
1209 					  DRM_MM_INSERT_LOW);
1210 	mutex_unlock(&ggtt->vm.mutex);
1211 	if (err)
1212 		goto out_unpin;
1213 
1214 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1215 
1216 	for (n = 0; n < count; n++) {
1217 		u64 offset = tmp.start + n * PAGE_SIZE;
1218 
1219 		ggtt->vm.insert_page(&ggtt->vm,
1220 				     i915_gem_object_get_dma_address(obj, 0),
1221 				     offset, I915_CACHE_NONE, 0);
1222 	}
1223 
1224 	order = i915_random_order(count, &prng);
1225 	if (!order) {
1226 		err = -ENOMEM;
1227 		goto out_remove;
1228 	}
1229 
1230 	for (n = 0; n < count; n++) {
1231 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1232 		u32 __iomem *vaddr;
1233 
1234 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1235 		iowrite32(n, vaddr + n);
1236 		io_mapping_unmap_atomic(vaddr);
1237 	}
1238 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1239 
1240 	i915_random_reorder(order, count, &prng);
1241 	for (n = 0; n < count; n++) {
1242 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1243 		u32 __iomem *vaddr;
1244 		u32 val;
1245 
1246 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1247 		val = ioread32(vaddr + n);
1248 		io_mapping_unmap_atomic(vaddr);
1249 
1250 		if (val != n) {
1251 			pr_err("insert page failed: found %d, expected %d\n",
1252 			       val, n);
1253 			err = -EINVAL;
1254 			break;
1255 		}
1256 	}
1257 
1258 	kfree(order);
1259 out_remove:
1260 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1261 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1262 	mutex_lock(&ggtt->vm.mutex);
1263 	drm_mm_remove_node(&tmp);
1264 	mutex_unlock(&ggtt->vm.mutex);
1265 out_unpin:
1266 	i915_gem_object_unpin_pages(obj);
1267 out_free:
1268 	i915_gem_object_put(obj);
1269 	return err;
1270 }
1271 
1272 static void track_vma_bind(struct i915_vma *vma)
1273 {
1274 	struct drm_i915_gem_object *obj = vma->obj;
1275 
1276 	__i915_gem_object_pin_pages(obj);
1277 
1278 	GEM_BUG_ON(vma->pages);
1279 	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1280 	__i915_gem_object_pin_pages(obj);
1281 	vma->pages = obj->mm.pages;
1282 
1283 	mutex_lock(&vma->vm->mutex);
1284 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1285 	mutex_unlock(&vma->vm->mutex);
1286 }
1287 
1288 static int exercise_mock(struct drm_i915_private *i915,
1289 			 int (*func)(struct i915_address_space *vm,
1290 				     u64 hole_start, u64 hole_end,
1291 				     unsigned long end_time))
1292 {
1293 	const u64 limit = totalram_pages() << PAGE_SHIFT;
1294 	struct i915_address_space *vm;
1295 	struct i915_gem_context *ctx;
1296 	IGT_TIMEOUT(end_time);
1297 	int err;
1298 
1299 	ctx = mock_context(i915, "mock");
1300 	if (!ctx)
1301 		return -ENOMEM;
1302 
1303 	vm = i915_gem_context_get_vm_rcu(ctx);
1304 	err = func(vm, 0, min(vm->total, limit), end_time);
1305 	i915_vm_put(vm);
1306 
1307 	mock_context_close(ctx);
1308 	return err;
1309 }
1310 
1311 static int igt_mock_fill(void *arg)
1312 {
1313 	struct i915_ggtt *ggtt = arg;
1314 
1315 	return exercise_mock(ggtt->vm.i915, fill_hole);
1316 }
1317 
1318 static int igt_mock_walk(void *arg)
1319 {
1320 	struct i915_ggtt *ggtt = arg;
1321 
1322 	return exercise_mock(ggtt->vm.i915, walk_hole);
1323 }
1324 
1325 static int igt_mock_pot(void *arg)
1326 {
1327 	struct i915_ggtt *ggtt = arg;
1328 
1329 	return exercise_mock(ggtt->vm.i915, pot_hole);
1330 }
1331 
1332 static int igt_mock_drunk(void *arg)
1333 {
1334 	struct i915_ggtt *ggtt = arg;
1335 
1336 	return exercise_mock(ggtt->vm.i915, drunk_hole);
1337 }
1338 
1339 static int igt_gtt_reserve(void *arg)
1340 {
1341 	struct i915_ggtt *ggtt = arg;
1342 	struct drm_i915_gem_object *obj, *on;
1343 	I915_RND_STATE(prng);
1344 	LIST_HEAD(objects);
1345 	u64 total;
1346 	int err = -ENODEV;
1347 
1348 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1349 	 * for the node, and evicts if it has to. So our test checks that
1350 	 * it can give us the requsted space and prevent overlaps.
1351 	 */
1352 
1353 	/* Start by filling the GGTT */
1354 	for (total = 0;
1355 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1356 	     total += 2 * I915_GTT_PAGE_SIZE) {
1357 		struct i915_vma *vma;
1358 
1359 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1360 						      2 * PAGE_SIZE);
1361 		if (IS_ERR(obj)) {
1362 			err = PTR_ERR(obj);
1363 			goto out;
1364 		}
1365 
1366 		err = i915_gem_object_pin_pages_unlocked(obj);
1367 		if (err) {
1368 			i915_gem_object_put(obj);
1369 			goto out;
1370 		}
1371 
1372 		list_add(&obj->st_link, &objects);
1373 
1374 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1375 		if (IS_ERR(vma)) {
1376 			err = PTR_ERR(vma);
1377 			goto out;
1378 		}
1379 
1380 		mutex_lock(&ggtt->vm.mutex);
1381 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1382 					   obj->base.size,
1383 					   total,
1384 					   obj->cache_level,
1385 					   0);
1386 		mutex_unlock(&ggtt->vm.mutex);
1387 		if (err) {
1388 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1389 			       total, ggtt->vm.total, err);
1390 			goto out;
1391 		}
1392 		track_vma_bind(vma);
1393 
1394 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1395 		if (vma->node.start != total ||
1396 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1397 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1398 			       vma->node.start, vma->node.size,
1399 			       total, 2*I915_GTT_PAGE_SIZE);
1400 			err = -EINVAL;
1401 			goto out;
1402 		}
1403 	}
1404 
1405 	/* Now we start forcing evictions */
1406 	for (total = I915_GTT_PAGE_SIZE;
1407 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1408 	     total += 2 * I915_GTT_PAGE_SIZE) {
1409 		struct i915_vma *vma;
1410 
1411 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1412 						      2 * PAGE_SIZE);
1413 		if (IS_ERR(obj)) {
1414 			err = PTR_ERR(obj);
1415 			goto out;
1416 		}
1417 
1418 		err = i915_gem_object_pin_pages_unlocked(obj);
1419 		if (err) {
1420 			i915_gem_object_put(obj);
1421 			goto out;
1422 		}
1423 
1424 		list_add(&obj->st_link, &objects);
1425 
1426 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1427 		if (IS_ERR(vma)) {
1428 			err = PTR_ERR(vma);
1429 			goto out;
1430 		}
1431 
1432 		mutex_lock(&ggtt->vm.mutex);
1433 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1434 					   obj->base.size,
1435 					   total,
1436 					   obj->cache_level,
1437 					   0);
1438 		mutex_unlock(&ggtt->vm.mutex);
1439 		if (err) {
1440 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1441 			       total, ggtt->vm.total, err);
1442 			goto out;
1443 		}
1444 		track_vma_bind(vma);
1445 
1446 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1447 		if (vma->node.start != total ||
1448 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1449 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1450 			       vma->node.start, vma->node.size,
1451 			       total, 2*I915_GTT_PAGE_SIZE);
1452 			err = -EINVAL;
1453 			goto out;
1454 		}
1455 	}
1456 
1457 	/* And then try at random */
1458 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1459 		struct i915_vma *vma;
1460 		u64 offset;
1461 
1462 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1463 		if (IS_ERR(vma)) {
1464 			err = PTR_ERR(vma);
1465 			goto out;
1466 		}
1467 
1468 		err = i915_vma_unbind(vma);
1469 		if (err) {
1470 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1471 			goto out;
1472 		}
1473 
1474 		offset = igt_random_offset(&prng,
1475 					   0, ggtt->vm.total,
1476 					   2 * I915_GTT_PAGE_SIZE,
1477 					   I915_GTT_MIN_ALIGNMENT);
1478 
1479 		mutex_lock(&ggtt->vm.mutex);
1480 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1481 					   obj->base.size,
1482 					   offset,
1483 					   obj->cache_level,
1484 					   0);
1485 		mutex_unlock(&ggtt->vm.mutex);
1486 		if (err) {
1487 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1488 			       total, ggtt->vm.total, err);
1489 			goto out;
1490 		}
1491 		track_vma_bind(vma);
1492 
1493 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1494 		if (vma->node.start != offset ||
1495 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1496 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1497 			       vma->node.start, vma->node.size,
1498 			       offset, 2*I915_GTT_PAGE_SIZE);
1499 			err = -EINVAL;
1500 			goto out;
1501 		}
1502 	}
1503 
1504 out:
1505 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1506 		i915_gem_object_unpin_pages(obj);
1507 		i915_gem_object_put(obj);
1508 	}
1509 	return err;
1510 }
1511 
1512 static int igt_gtt_insert(void *arg)
1513 {
1514 	struct i915_ggtt *ggtt = arg;
1515 	struct drm_i915_gem_object *obj, *on;
1516 	struct drm_mm_node tmp = {};
1517 	const struct invalid_insert {
1518 		u64 size;
1519 		u64 alignment;
1520 		u64 start, end;
1521 	} invalid_insert[] = {
1522 		{
1523 			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1524 			0, ggtt->vm.total,
1525 		},
1526 		{
1527 			2*I915_GTT_PAGE_SIZE, 0,
1528 			0, I915_GTT_PAGE_SIZE,
1529 		},
1530 		{
1531 			-(u64)I915_GTT_PAGE_SIZE, 0,
1532 			0, 4*I915_GTT_PAGE_SIZE,
1533 		},
1534 		{
1535 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1536 			0, 4*I915_GTT_PAGE_SIZE,
1537 		},
1538 		{
1539 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1540 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1541 		},
1542 		{}
1543 	}, *ii;
1544 	LIST_HEAD(objects);
1545 	u64 total;
1546 	int err = -ENODEV;
1547 
1548 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1549 	 * to the node, evicting if required.
1550 	 */
1551 
1552 	/* Check a couple of obviously invalid requests */
1553 	for (ii = invalid_insert; ii->size; ii++) {
1554 		mutex_lock(&ggtt->vm.mutex);
1555 		err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1556 					  ii->size, ii->alignment,
1557 					  I915_COLOR_UNEVICTABLE,
1558 					  ii->start, ii->end,
1559 					  0);
1560 		mutex_unlock(&ggtt->vm.mutex);
1561 		if (err != -ENOSPC) {
1562 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1563 			       ii->size, ii->alignment, ii->start, ii->end,
1564 			       err);
1565 			return -EINVAL;
1566 		}
1567 	}
1568 
1569 	/* Start by filling the GGTT */
1570 	for (total = 0;
1571 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1572 	     total += I915_GTT_PAGE_SIZE) {
1573 		struct i915_vma *vma;
1574 
1575 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1576 						      I915_GTT_PAGE_SIZE);
1577 		if (IS_ERR(obj)) {
1578 			err = PTR_ERR(obj);
1579 			goto out;
1580 		}
1581 
1582 		err = i915_gem_object_pin_pages_unlocked(obj);
1583 		if (err) {
1584 			i915_gem_object_put(obj);
1585 			goto out;
1586 		}
1587 
1588 		list_add(&obj->st_link, &objects);
1589 
1590 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1591 		if (IS_ERR(vma)) {
1592 			err = PTR_ERR(vma);
1593 			goto out;
1594 		}
1595 
1596 		mutex_lock(&ggtt->vm.mutex);
1597 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1598 					  obj->base.size, 0, obj->cache_level,
1599 					  0, ggtt->vm.total,
1600 					  0);
1601 		mutex_unlock(&ggtt->vm.mutex);
1602 		if (err == -ENOSPC) {
1603 			/* maxed out the GGTT space */
1604 			i915_gem_object_put(obj);
1605 			break;
1606 		}
1607 		if (err) {
1608 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1609 			       total, ggtt->vm.total, err);
1610 			goto out;
1611 		}
1612 		track_vma_bind(vma);
1613 		__i915_vma_pin(vma);
1614 
1615 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1616 	}
1617 
1618 	list_for_each_entry(obj, &objects, st_link) {
1619 		struct i915_vma *vma;
1620 
1621 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1622 		if (IS_ERR(vma)) {
1623 			err = PTR_ERR(vma);
1624 			goto out;
1625 		}
1626 
1627 		if (!drm_mm_node_allocated(&vma->node)) {
1628 			pr_err("VMA was unexpectedly evicted!\n");
1629 			err = -EINVAL;
1630 			goto out;
1631 		}
1632 
1633 		__i915_vma_unpin(vma);
1634 	}
1635 
1636 	/* If we then reinsert, we should find the same hole */
1637 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1638 		struct i915_vma *vma;
1639 		u64 offset;
1640 
1641 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1642 		if (IS_ERR(vma)) {
1643 			err = PTR_ERR(vma);
1644 			goto out;
1645 		}
1646 
1647 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1648 		offset = vma->node.start;
1649 
1650 		err = i915_vma_unbind(vma);
1651 		if (err) {
1652 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1653 			goto out;
1654 		}
1655 
1656 		mutex_lock(&ggtt->vm.mutex);
1657 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1658 					  obj->base.size, 0, obj->cache_level,
1659 					  0, ggtt->vm.total,
1660 					  0);
1661 		mutex_unlock(&ggtt->vm.mutex);
1662 		if (err) {
1663 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1664 			       total, ggtt->vm.total, err);
1665 			goto out;
1666 		}
1667 		track_vma_bind(vma);
1668 
1669 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1670 		if (vma->node.start != offset) {
1671 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1672 			       offset, vma->node.start);
1673 			err = -EINVAL;
1674 			goto out;
1675 		}
1676 	}
1677 
1678 	/* And then force evictions */
1679 	for (total = 0;
1680 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1681 	     total += 2 * I915_GTT_PAGE_SIZE) {
1682 		struct i915_vma *vma;
1683 
1684 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1685 						      2 * I915_GTT_PAGE_SIZE);
1686 		if (IS_ERR(obj)) {
1687 			err = PTR_ERR(obj);
1688 			goto out;
1689 		}
1690 
1691 		err = i915_gem_object_pin_pages_unlocked(obj);
1692 		if (err) {
1693 			i915_gem_object_put(obj);
1694 			goto out;
1695 		}
1696 
1697 		list_add(&obj->st_link, &objects);
1698 
1699 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1700 		if (IS_ERR(vma)) {
1701 			err = PTR_ERR(vma);
1702 			goto out;
1703 		}
1704 
1705 		mutex_lock(&ggtt->vm.mutex);
1706 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1707 					  obj->base.size, 0, obj->cache_level,
1708 					  0, ggtt->vm.total,
1709 					  0);
1710 		mutex_unlock(&ggtt->vm.mutex);
1711 		if (err) {
1712 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1713 			       total, ggtt->vm.total, err);
1714 			goto out;
1715 		}
1716 		track_vma_bind(vma);
1717 
1718 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1719 	}
1720 
1721 out:
1722 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1723 		i915_gem_object_unpin_pages(obj);
1724 		i915_gem_object_put(obj);
1725 	}
1726 	return err;
1727 }
1728 
1729 int i915_gem_gtt_mock_selftests(void)
1730 {
1731 	static const struct i915_subtest tests[] = {
1732 		SUBTEST(igt_mock_drunk),
1733 		SUBTEST(igt_mock_walk),
1734 		SUBTEST(igt_mock_pot),
1735 		SUBTEST(igt_mock_fill),
1736 		SUBTEST(igt_gtt_reserve),
1737 		SUBTEST(igt_gtt_insert),
1738 	};
1739 	struct drm_i915_private *i915;
1740 	struct i915_ggtt *ggtt;
1741 	int err;
1742 
1743 	i915 = mock_gem_device();
1744 	if (!i915)
1745 		return -ENOMEM;
1746 
1747 	ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1748 	if (!ggtt) {
1749 		err = -ENOMEM;
1750 		goto out_put;
1751 	}
1752 	mock_init_ggtt(i915, ggtt);
1753 
1754 	err = i915_subtests(tests, ggtt);
1755 
1756 	mock_device_flush(i915);
1757 	i915_gem_drain_freed_objects(i915);
1758 	mock_fini_ggtt(ggtt);
1759 	kfree(ggtt);
1760 out_put:
1761 	mock_destroy_device(i915);
1762 	return err;
1763 }
1764 
1765 static int context_sync(struct intel_context *ce)
1766 {
1767 	struct i915_request *rq;
1768 	long timeout;
1769 
1770 	rq = intel_context_create_request(ce);
1771 	if (IS_ERR(rq))
1772 		return PTR_ERR(rq);
1773 
1774 	i915_request_get(rq);
1775 	i915_request_add(rq);
1776 
1777 	timeout = i915_request_wait(rq, 0, HZ / 5);
1778 	i915_request_put(rq);
1779 
1780 	return timeout < 0 ? -EIO : 0;
1781 }
1782 
1783 static struct i915_request *
1784 submit_batch(struct intel_context *ce, u64 addr)
1785 {
1786 	struct i915_request *rq;
1787 	int err;
1788 
1789 	rq = intel_context_create_request(ce);
1790 	if (IS_ERR(rq))
1791 		return rq;
1792 
1793 	err = 0;
1794 	if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1795 		err = rq->engine->emit_init_breadcrumb(rq);
1796 	if (err == 0)
1797 		err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1798 
1799 	if (err == 0)
1800 		i915_request_get(rq);
1801 	i915_request_add(rq);
1802 
1803 	return err ? ERR_PTR(err) : rq;
1804 }
1805 
1806 static u32 *spinner(u32 *batch, int i)
1807 {
1808 	return batch + i * 64 / sizeof(*batch) + 4;
1809 }
1810 
1811 static void end_spin(u32 *batch, int i)
1812 {
1813 	*spinner(batch, i) = MI_BATCH_BUFFER_END;
1814 	wmb();
1815 }
1816 
1817 static int igt_cs_tlb(void *arg)
1818 {
1819 	const unsigned int count = PAGE_SIZE / 64;
1820 	const unsigned int chunk_size = count * PAGE_SIZE;
1821 	struct drm_i915_private *i915 = arg;
1822 	struct drm_i915_gem_object *bbe, *act, *out;
1823 	struct i915_gem_engines_iter it;
1824 	struct i915_address_space *vm;
1825 	struct i915_gem_context *ctx;
1826 	struct intel_context *ce;
1827 	struct i915_vma *vma;
1828 	I915_RND_STATE(prng);
1829 	struct file *file;
1830 	unsigned int i;
1831 	u32 *result;
1832 	u32 *batch;
1833 	int err = 0;
1834 
1835 	/*
1836 	 * Our mission here is to fool the hardware to execute something
1837 	 * from scratch as it has not seen the batch move (due to missing
1838 	 * the TLB invalidate).
1839 	 */
1840 
1841 	file = mock_file(i915);
1842 	if (IS_ERR(file))
1843 		return PTR_ERR(file);
1844 
1845 	ctx = live_context(i915, file);
1846 	if (IS_ERR(ctx)) {
1847 		err = PTR_ERR(ctx);
1848 		goto out_unlock;
1849 	}
1850 
1851 	vm = i915_gem_context_get_vm_rcu(ctx);
1852 	if (i915_is_ggtt(vm))
1853 		goto out_vm;
1854 
1855 	/* Create two pages; dummy we prefill the TLB, and intended */
1856 	bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1857 	if (IS_ERR(bbe)) {
1858 		err = PTR_ERR(bbe);
1859 		goto out_vm;
1860 	}
1861 
1862 	batch = i915_gem_object_pin_map_unlocked(bbe, I915_MAP_WC);
1863 	if (IS_ERR(batch)) {
1864 		err = PTR_ERR(batch);
1865 		goto out_put_bbe;
1866 	}
1867 	memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1868 	i915_gem_object_flush_map(bbe);
1869 	i915_gem_object_unpin_map(bbe);
1870 
1871 	act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1872 	if (IS_ERR(act)) {
1873 		err = PTR_ERR(act);
1874 		goto out_put_bbe;
1875 	}
1876 
1877 	/* Track the execution of each request by writing into different slot */
1878 	batch = i915_gem_object_pin_map_unlocked(act, I915_MAP_WC);
1879 	if (IS_ERR(batch)) {
1880 		err = PTR_ERR(batch);
1881 		goto out_put_act;
1882 	}
1883 	for (i = 0; i < count; i++) {
1884 		u32 *cs = batch + i * 64 / sizeof(*cs);
1885 		u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1886 
1887 		GEM_BUG_ON(INTEL_GEN(i915) < 6);
1888 		cs[0] = MI_STORE_DWORD_IMM_GEN4;
1889 		if (INTEL_GEN(i915) >= 8) {
1890 			cs[1] = lower_32_bits(addr);
1891 			cs[2] = upper_32_bits(addr);
1892 			cs[3] = i;
1893 			cs[4] = MI_NOOP;
1894 			cs[5] = MI_BATCH_BUFFER_START_GEN8;
1895 		} else {
1896 			cs[1] = 0;
1897 			cs[2] = lower_32_bits(addr);
1898 			cs[3] = i;
1899 			cs[4] = MI_NOOP;
1900 			cs[5] = MI_BATCH_BUFFER_START;
1901 		}
1902 	}
1903 
1904 	out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1905 	if (IS_ERR(out)) {
1906 		err = PTR_ERR(out);
1907 		goto out_put_batch;
1908 	}
1909 	i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1910 
1911 	vma = i915_vma_instance(out, vm, NULL);
1912 	if (IS_ERR(vma)) {
1913 		err = PTR_ERR(vma);
1914 		goto out_put_out;
1915 	}
1916 
1917 	err = i915_vma_pin(vma, 0, 0,
1918 			   PIN_USER |
1919 			   PIN_OFFSET_FIXED |
1920 			   (vm->total - PAGE_SIZE));
1921 	if (err)
1922 		goto out_put_out;
1923 	GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1924 
1925 	result = i915_gem_object_pin_map_unlocked(out, I915_MAP_WB);
1926 	if (IS_ERR(result)) {
1927 		err = PTR_ERR(result);
1928 		goto out_put_out;
1929 	}
1930 
1931 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1932 		IGT_TIMEOUT(end_time);
1933 		unsigned long pass = 0;
1934 
1935 		if (!intel_engine_can_store_dword(ce->engine))
1936 			continue;
1937 
1938 		while (!__igt_timeout(end_time, NULL)) {
1939 			struct i915_vm_pt_stash stash = {};
1940 			struct i915_request *rq;
1941 			struct i915_gem_ww_ctx ww;
1942 			u64 offset;
1943 
1944 			offset = igt_random_offset(&prng,
1945 						   0, vm->total - PAGE_SIZE,
1946 						   chunk_size, PAGE_SIZE);
1947 
1948 			memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1949 
1950 			vma = i915_vma_instance(bbe, vm, NULL);
1951 			if (IS_ERR(vma)) {
1952 				err = PTR_ERR(vma);
1953 				goto end;
1954 			}
1955 
1956 			err = vma->ops->set_pages(vma);
1957 			if (err)
1958 				goto end;
1959 
1960 			i915_gem_ww_ctx_init(&ww, false);
1961 retry:
1962 			err = i915_vm_lock_objects(vm, &ww);
1963 			if (err)
1964 				goto end_ww;
1965 
1966 			err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
1967 			if (err)
1968 				goto end_ww;
1969 
1970 			err = i915_vm_pin_pt_stash(vm, &stash);
1971 			if (!err)
1972 				vm->allocate_va_range(vm, &stash, offset, chunk_size);
1973 
1974 			i915_vm_free_pt_stash(vm, &stash);
1975 end_ww:
1976 			if (err == -EDEADLK) {
1977 				err = i915_gem_ww_ctx_backoff(&ww);
1978 				if (!err)
1979 					goto retry;
1980 			}
1981 			i915_gem_ww_ctx_fini(&ww);
1982 			if (err)
1983 				goto end;
1984 
1985 			/* Prime the TLB with the dummy pages */
1986 			for (i = 0; i < count; i++) {
1987 				vma->node.start = offset + i * PAGE_SIZE;
1988 				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1989 
1990 				rq = submit_batch(ce, vma->node.start);
1991 				if (IS_ERR(rq)) {
1992 					err = PTR_ERR(rq);
1993 					goto end;
1994 				}
1995 				i915_request_put(rq);
1996 			}
1997 
1998 			vma->ops->clear_pages(vma);
1999 
2000 			err = context_sync(ce);
2001 			if (err) {
2002 				pr_err("%s: dummy setup timed out\n",
2003 				       ce->engine->name);
2004 				goto end;
2005 			}
2006 
2007 			vma = i915_vma_instance(act, vm, NULL);
2008 			if (IS_ERR(vma)) {
2009 				err = PTR_ERR(vma);
2010 				goto end;
2011 			}
2012 
2013 			err = vma->ops->set_pages(vma);
2014 			if (err)
2015 				goto end;
2016 
2017 			/* Replace the TLB with target batches */
2018 			for (i = 0; i < count; i++) {
2019 				struct i915_request *rq;
2020 				u32 *cs = batch + i * 64 / sizeof(*cs);
2021 				u64 addr;
2022 
2023 				vma->node.start = offset + i * PAGE_SIZE;
2024 				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
2025 
2026 				addr = vma->node.start + i * 64;
2027 				cs[4] = MI_NOOP;
2028 				cs[6] = lower_32_bits(addr);
2029 				cs[7] = upper_32_bits(addr);
2030 				wmb();
2031 
2032 				rq = submit_batch(ce, addr);
2033 				if (IS_ERR(rq)) {
2034 					err = PTR_ERR(rq);
2035 					goto end;
2036 				}
2037 
2038 				/* Wait until the context chain has started */
2039 				if (i == 0) {
2040 					while (READ_ONCE(result[i]) &&
2041 					       !i915_request_completed(rq))
2042 						cond_resched();
2043 				} else {
2044 					end_spin(batch, i - 1);
2045 				}
2046 
2047 				i915_request_put(rq);
2048 			}
2049 			end_spin(batch, count - 1);
2050 
2051 			vma->ops->clear_pages(vma);
2052 
2053 			err = context_sync(ce);
2054 			if (err) {
2055 				pr_err("%s: writes timed out\n",
2056 				       ce->engine->name);
2057 				goto end;
2058 			}
2059 
2060 			for (i = 0; i < count; i++) {
2061 				if (result[i] != i) {
2062 					pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
2063 					       ce->engine->name, pass,
2064 					       offset, i, result[i], i);
2065 					err = -EINVAL;
2066 					goto end;
2067 				}
2068 			}
2069 
2070 			vm->clear_range(vm, offset, chunk_size);
2071 			pass++;
2072 		}
2073 	}
2074 end:
2075 	if (igt_flush_test(i915))
2076 		err = -EIO;
2077 	i915_gem_context_unlock_engines(ctx);
2078 	i915_gem_object_unpin_map(out);
2079 out_put_out:
2080 	i915_gem_object_put(out);
2081 out_put_batch:
2082 	i915_gem_object_unpin_map(act);
2083 out_put_act:
2084 	i915_gem_object_put(act);
2085 out_put_bbe:
2086 	i915_gem_object_put(bbe);
2087 out_vm:
2088 	i915_vm_put(vm);
2089 out_unlock:
2090 	fput(file);
2091 	return err;
2092 }
2093 
2094 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2095 {
2096 	static const struct i915_subtest tests[] = {
2097 		SUBTEST(igt_ppgtt_alloc),
2098 		SUBTEST(igt_ppgtt_lowlevel),
2099 		SUBTEST(igt_ppgtt_drunk),
2100 		SUBTEST(igt_ppgtt_walk),
2101 		SUBTEST(igt_ppgtt_pot),
2102 		SUBTEST(igt_ppgtt_fill),
2103 		SUBTEST(igt_ppgtt_shrink),
2104 		SUBTEST(igt_ppgtt_shrink_boom),
2105 		SUBTEST(igt_ggtt_lowlevel),
2106 		SUBTEST(igt_ggtt_drunk),
2107 		SUBTEST(igt_ggtt_walk),
2108 		SUBTEST(igt_ggtt_pot),
2109 		SUBTEST(igt_ggtt_fill),
2110 		SUBTEST(igt_ggtt_page),
2111 		SUBTEST(igt_cs_tlb),
2112 	};
2113 
2114 	GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2115 
2116 	return i915_subtests(tests, i915);
2117 }
2118