1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "gem/i915_gem_context.h"
29 #include "gem/i915_gem_internal.h"
30 #include "gem/i915_gem_lmem.h"
31 #include "gem/i915_gem_region.h"
32 #include "gem/selftests/mock_context.h"
33 #include "gt/intel_context.h"
34 #include "gt/intel_gpu_commands.h"
35 #include "gt/intel_gtt.h"
36 
37 #include "i915_random.h"
38 #include "i915_selftest.h"
39 #include "i915_vma_resource.h"
40 
41 #include "mock_drm.h"
42 #include "mock_gem_device.h"
43 #include "mock_gtt.h"
44 #include "igt_flush_test.h"
45 
46 static void cleanup_freed_objects(struct drm_i915_private *i915)
47 {
48 	i915_gem_drain_freed_objects(i915);
49 }
50 
51 static void fake_free_pages(struct drm_i915_gem_object *obj,
52 			    struct sg_table *pages)
53 {
54 	sg_free_table(pages);
55 	kfree(pages);
56 }
57 
58 static int fake_get_pages(struct drm_i915_gem_object *obj)
59 {
60 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
61 #define PFN_BIAS 0x1000
62 	struct sg_table *pages;
63 	struct scatterlist *sg;
64 	typeof(obj->base.size) rem;
65 
66 	pages = kmalloc(sizeof(*pages), GFP);
67 	if (!pages)
68 		return -ENOMEM;
69 
70 	rem = round_up(obj->base.size, BIT(31)) >> 31;
71 	if (sg_alloc_table(pages, rem, GFP)) {
72 		kfree(pages);
73 		return -ENOMEM;
74 	}
75 
76 	rem = obj->base.size;
77 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
78 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
79 
80 		GEM_BUG_ON(!len);
81 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
82 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
83 		sg_dma_len(sg) = len;
84 
85 		rem -= len;
86 	}
87 	GEM_BUG_ON(rem);
88 
89 	__i915_gem_object_set_pages(obj, pages);
90 
91 	return 0;
92 #undef GFP
93 }
94 
95 static void fake_put_pages(struct drm_i915_gem_object *obj,
96 			   struct sg_table *pages)
97 {
98 	fake_free_pages(obj, pages);
99 	obj->mm.dirty = false;
100 }
101 
102 static const struct drm_i915_gem_object_ops fake_ops = {
103 	.name = "fake-gem",
104 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
105 	.get_pages = fake_get_pages,
106 	.put_pages = fake_put_pages,
107 };
108 
109 static struct drm_i915_gem_object *
110 fake_dma_object(struct drm_i915_private *i915, u64 size)
111 {
112 	static struct lock_class_key lock_class;
113 	struct drm_i915_gem_object *obj;
114 
115 	GEM_BUG_ON(!size);
116 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
117 
118 	if (overflows_type(size, obj->base.size))
119 		return ERR_PTR(-E2BIG);
120 
121 	obj = i915_gem_object_alloc();
122 	if (!obj)
123 		goto err;
124 
125 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
126 	i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
127 
128 	i915_gem_object_set_volatile(obj);
129 
130 	obj->write_domain = I915_GEM_DOMAIN_CPU;
131 	obj->read_domains = I915_GEM_DOMAIN_CPU;
132 	obj->cache_level = I915_CACHE_NONE;
133 
134 	/* Preallocate the "backing storage" */
135 	if (i915_gem_object_pin_pages_unlocked(obj))
136 		goto err_obj;
137 
138 	i915_gem_object_unpin_pages(obj);
139 	return obj;
140 
141 err_obj:
142 	i915_gem_object_put(obj);
143 err:
144 	return ERR_PTR(-ENOMEM);
145 }
146 
147 static int igt_ppgtt_alloc(void *arg)
148 {
149 	struct drm_i915_private *dev_priv = arg;
150 	struct i915_ppgtt *ppgtt;
151 	struct i915_gem_ww_ctx ww;
152 	u64 size, last, limit;
153 	int err = 0;
154 
155 	/* Allocate a ppggt and try to fill the entire range */
156 
157 	if (!HAS_PPGTT(dev_priv))
158 		return 0;
159 
160 	ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
161 	if (IS_ERR(ppgtt))
162 		return PTR_ERR(ppgtt);
163 
164 	if (!ppgtt->vm.allocate_va_range)
165 		goto err_ppgtt_cleanup;
166 
167 	/*
168 	 * While we only allocate the page tables here and so we could
169 	 * address a much larger GTT than we could actually fit into
170 	 * RAM, a practical limit is the amount of physical pages in the system.
171 	 * This should ensure that we do not run into the oomkiller during
172 	 * the test and take down the machine wilfully.
173 	 */
174 	limit = totalram_pages() << PAGE_SHIFT;
175 	limit = min(ppgtt->vm.total, limit);
176 
177 	i915_gem_ww_ctx_init(&ww, false);
178 retry:
179 	err = i915_vm_lock_objects(&ppgtt->vm, &ww);
180 	if (err)
181 		goto err_ppgtt_cleanup;
182 
183 	/* Check we can allocate the entire range */
184 	for (size = 4096; size <= limit; size <<= 2) {
185 		struct i915_vm_pt_stash stash = {};
186 
187 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
188 		if (err)
189 			goto err_ppgtt_cleanup;
190 
191 		err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
192 		if (err) {
193 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
194 			goto err_ppgtt_cleanup;
195 		}
196 
197 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
198 		cond_resched();
199 
200 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
201 
202 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
203 	}
204 
205 	/* Check we can incrementally allocate the entire range */
206 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
207 		struct i915_vm_pt_stash stash = {};
208 
209 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
210 		if (err)
211 			goto err_ppgtt_cleanup;
212 
213 		err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
214 		if (err) {
215 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
216 			goto err_ppgtt_cleanup;
217 		}
218 
219 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
220 					    last, size - last);
221 		cond_resched();
222 
223 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
224 	}
225 
226 err_ppgtt_cleanup:
227 	if (err == -EDEADLK) {
228 		err = i915_gem_ww_ctx_backoff(&ww);
229 		if (!err)
230 			goto retry;
231 	}
232 	i915_gem_ww_ctx_fini(&ww);
233 
234 	i915_vm_put(&ppgtt->vm);
235 	return err;
236 }
237 
238 static int lowlevel_hole(struct i915_address_space *vm,
239 			 u64 hole_start, u64 hole_end,
240 			 unsigned long end_time)
241 {
242 	const unsigned int min_alignment =
243 		i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
244 	I915_RND_STATE(seed_prng);
245 	struct i915_vma_resource *mock_vma_res;
246 	unsigned int size;
247 
248 	mock_vma_res = kzalloc(sizeof(*mock_vma_res), GFP_KERNEL);
249 	if (!mock_vma_res)
250 		return -ENOMEM;
251 
252 	/* Keep creating larger objects until one cannot fit into the hole */
253 	for (size = 12; (hole_end - hole_start) >> size; size++) {
254 		I915_RND_SUBSTATE(prng, seed_prng);
255 		struct drm_i915_gem_object *obj;
256 		unsigned int *order, count, n;
257 		u64 hole_size, aligned_size;
258 
259 		aligned_size = max_t(u32, ilog2(min_alignment), size);
260 		hole_size = (hole_end - hole_start) >> aligned_size;
261 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
262 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
263 		count = hole_size >> 1;
264 		if (!count) {
265 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
266 				 __func__, hole_start, hole_end, size, hole_size);
267 			break;
268 		}
269 
270 		do {
271 			order = i915_random_order(count, &prng);
272 			if (order)
273 				break;
274 		} while (count >>= 1);
275 		if (!count) {
276 			kfree(mock_vma_res);
277 			return -ENOMEM;
278 		}
279 		GEM_BUG_ON(!order);
280 
281 		GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total);
282 		GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end);
283 
284 		/* Ignore allocation failures (i.e. don't report them as
285 		 * a test failure) as we are purposefully allocating very
286 		 * large objects without checking that we have sufficient
287 		 * memory. We expect to hit -ENOMEM.
288 		 */
289 
290 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
291 		if (IS_ERR(obj)) {
292 			kfree(order);
293 			break;
294 		}
295 
296 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
297 
298 		if (i915_gem_object_pin_pages_unlocked(obj)) {
299 			i915_gem_object_put(obj);
300 			kfree(order);
301 			break;
302 		}
303 
304 		for (n = 0; n < count; n++) {
305 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
306 			intel_wakeref_t wakeref;
307 
308 			GEM_BUG_ON(addr + BIT_ULL(aligned_size) > vm->total);
309 
310 			if (igt_timeout(end_time,
311 					"%s timed out before %d/%d\n",
312 					__func__, n, count)) {
313 				hole_end = hole_start; /* quit */
314 				break;
315 			}
316 
317 			if (vm->allocate_va_range) {
318 				struct i915_vm_pt_stash stash = {};
319 				struct i915_gem_ww_ctx ww;
320 				int err;
321 
322 				i915_gem_ww_ctx_init(&ww, false);
323 retry:
324 				err = i915_vm_lock_objects(vm, &ww);
325 				if (err)
326 					goto alloc_vm_end;
327 
328 				err = -ENOMEM;
329 				if (i915_vm_alloc_pt_stash(vm, &stash,
330 							   BIT_ULL(size)))
331 					goto alloc_vm_end;
332 
333 				err = i915_vm_map_pt_stash(vm, &stash);
334 				if (!err)
335 					vm->allocate_va_range(vm, &stash,
336 							      addr, BIT_ULL(size));
337 				i915_vm_free_pt_stash(vm, &stash);
338 alloc_vm_end:
339 				if (err == -EDEADLK) {
340 					err = i915_gem_ww_ctx_backoff(&ww);
341 					if (!err)
342 						goto retry;
343 				}
344 				i915_gem_ww_ctx_fini(&ww);
345 
346 				if (err)
347 					break;
348 			}
349 
350 			mock_vma_res->bi.pages = obj->mm.pages;
351 			mock_vma_res->node_size = BIT_ULL(aligned_size);
352 			mock_vma_res->start = addr;
353 
354 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
355 			  vm->insert_entries(vm, mock_vma_res,
356 						   I915_CACHE_NONE, 0);
357 		}
358 		count = n;
359 
360 		i915_random_reorder(order, count, &prng);
361 		for (n = 0; n < count; n++) {
362 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
363 			intel_wakeref_t wakeref;
364 
365 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
366 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
367 				vm->clear_range(vm, addr, BIT_ULL(size));
368 		}
369 
370 		i915_gem_object_unpin_pages(obj);
371 		i915_gem_object_put(obj);
372 
373 		kfree(order);
374 
375 		cleanup_freed_objects(vm->i915);
376 	}
377 
378 	kfree(mock_vma_res);
379 	return 0;
380 }
381 
382 static void close_object_list(struct list_head *objects,
383 			      struct i915_address_space *vm)
384 {
385 	struct drm_i915_gem_object *obj, *on;
386 	int ignored;
387 
388 	list_for_each_entry_safe(obj, on, objects, st_link) {
389 		struct i915_vma *vma;
390 
391 		vma = i915_vma_instance(obj, vm, NULL);
392 		if (!IS_ERR(vma))
393 			ignored = i915_vma_unbind_unlocked(vma);
394 
395 		list_del(&obj->st_link);
396 		i915_gem_object_put(obj);
397 	}
398 }
399 
400 static int fill_hole(struct i915_address_space *vm,
401 		     u64 hole_start, u64 hole_end,
402 		     unsigned long end_time)
403 {
404 	const u64 hole_size = hole_end - hole_start;
405 	struct drm_i915_gem_object *obj;
406 	const unsigned int min_alignment =
407 		i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
408 	const unsigned long max_pages =
409 		min_t(u64, ULONG_MAX - 1, (hole_size / 2) >> ilog2(min_alignment));
410 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
411 	unsigned long npages, prime, flags;
412 	struct i915_vma *vma;
413 	LIST_HEAD(objects);
414 	int err;
415 
416 	/* Try binding many VMA working inwards from either edge */
417 
418 	flags = PIN_OFFSET_FIXED | PIN_USER;
419 	if (i915_is_ggtt(vm))
420 		flags |= PIN_GLOBAL;
421 
422 	for_each_prime_number_from(prime, 2, max_step) {
423 		for (npages = 1; npages <= max_pages; npages *= prime) {
424 			const u64 full_size = npages << PAGE_SHIFT;
425 			const struct {
426 				const char *name;
427 				u64 offset;
428 				int step;
429 			} phases[] = {
430 				{ "top-down", hole_end, -1, },
431 				{ "bottom-up", hole_start, 1, },
432 				{ }
433 			}, *p;
434 
435 			obj = fake_dma_object(vm->i915, full_size);
436 			if (IS_ERR(obj))
437 				break;
438 
439 			list_add(&obj->st_link, &objects);
440 
441 			/* Align differing sized objects against the edges, and
442 			 * check we don't walk off into the void when binding
443 			 * them into the GTT.
444 			 */
445 			for (p = phases; p->name; p++) {
446 				u64 offset;
447 
448 				offset = p->offset;
449 				list_for_each_entry(obj, &objects, st_link) {
450 					u64 aligned_size = round_up(obj->base.size,
451 								    min_alignment);
452 
453 					vma = i915_vma_instance(obj, vm, NULL);
454 					if (IS_ERR(vma))
455 						continue;
456 
457 					if (p->step < 0) {
458 						if (offset < hole_start + aligned_size)
459 							break;
460 						offset -= aligned_size;
461 					}
462 
463 					err = i915_vma_pin(vma, 0, 0, offset | flags);
464 					if (err) {
465 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
466 						       __func__, p->name, err, npages, prime, offset);
467 						goto err;
468 					}
469 
470 					if (!drm_mm_node_allocated(&vma->node) ||
471 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
472 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
473 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
474 						       offset);
475 						err = -EINVAL;
476 						goto err;
477 					}
478 
479 					i915_vma_unpin(vma);
480 
481 					if (p->step > 0) {
482 						if (offset + aligned_size > hole_end)
483 							break;
484 						offset += aligned_size;
485 					}
486 				}
487 
488 				offset = p->offset;
489 				list_for_each_entry(obj, &objects, st_link) {
490 					u64 aligned_size = round_up(obj->base.size,
491 								    min_alignment);
492 
493 					vma = i915_vma_instance(obj, vm, NULL);
494 					if (IS_ERR(vma))
495 						continue;
496 
497 					if (p->step < 0) {
498 						if (offset < hole_start + aligned_size)
499 							break;
500 						offset -= aligned_size;
501 					}
502 
503 					if (!drm_mm_node_allocated(&vma->node) ||
504 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
505 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
506 						       __func__, p->name, vma->node.start, vma->node.size,
507 						       offset);
508 						err = -EINVAL;
509 						goto err;
510 					}
511 
512 					err = i915_vma_unbind_unlocked(vma);
513 					if (err) {
514 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
515 						       __func__, p->name, vma->node.start, vma->node.size,
516 						       err);
517 						goto err;
518 					}
519 
520 					if (p->step > 0) {
521 						if (offset + aligned_size > hole_end)
522 							break;
523 						offset += aligned_size;
524 					}
525 				}
526 
527 				offset = p->offset;
528 				list_for_each_entry_reverse(obj, &objects, st_link) {
529 					u64 aligned_size = round_up(obj->base.size,
530 								    min_alignment);
531 
532 					vma = i915_vma_instance(obj, vm, NULL);
533 					if (IS_ERR(vma))
534 						continue;
535 
536 					if (p->step < 0) {
537 						if (offset < hole_start + aligned_size)
538 							break;
539 						offset -= aligned_size;
540 					}
541 
542 					err = i915_vma_pin(vma, 0, 0, offset | flags);
543 					if (err) {
544 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
545 						       __func__, p->name, err, npages, prime, offset);
546 						goto err;
547 					}
548 
549 					if (!drm_mm_node_allocated(&vma->node) ||
550 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
551 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
552 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
553 						       offset);
554 						err = -EINVAL;
555 						goto err;
556 					}
557 
558 					i915_vma_unpin(vma);
559 
560 					if (p->step > 0) {
561 						if (offset + aligned_size > hole_end)
562 							break;
563 						offset += aligned_size;
564 					}
565 				}
566 
567 				offset = p->offset;
568 				list_for_each_entry_reverse(obj, &objects, st_link) {
569 					u64 aligned_size = round_up(obj->base.size,
570 								    min_alignment);
571 
572 					vma = i915_vma_instance(obj, vm, NULL);
573 					if (IS_ERR(vma))
574 						continue;
575 
576 					if (p->step < 0) {
577 						if (offset < hole_start + aligned_size)
578 							break;
579 						offset -= aligned_size;
580 					}
581 
582 					if (!drm_mm_node_allocated(&vma->node) ||
583 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
584 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
585 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
586 						       offset);
587 						err = -EINVAL;
588 						goto err;
589 					}
590 
591 					err = i915_vma_unbind_unlocked(vma);
592 					if (err) {
593 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
594 						       __func__, p->name, vma->node.start, vma->node.size,
595 						       err);
596 						goto err;
597 					}
598 
599 					if (p->step > 0) {
600 						if (offset + aligned_size > hole_end)
601 							break;
602 						offset += aligned_size;
603 					}
604 				}
605 			}
606 
607 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
608 					__func__, npages, prime)) {
609 				err = -EINTR;
610 				goto err;
611 			}
612 		}
613 
614 		close_object_list(&objects, vm);
615 		cleanup_freed_objects(vm->i915);
616 	}
617 
618 	return 0;
619 
620 err:
621 	close_object_list(&objects, vm);
622 	return err;
623 }
624 
625 static int walk_hole(struct i915_address_space *vm,
626 		     u64 hole_start, u64 hole_end,
627 		     unsigned long end_time)
628 {
629 	const u64 hole_size = hole_end - hole_start;
630 	const unsigned long max_pages =
631 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
632 	unsigned long min_alignment;
633 	unsigned long flags;
634 	u64 size;
635 
636 	/* Try binding a single VMA in different positions within the hole */
637 
638 	flags = PIN_OFFSET_FIXED | PIN_USER;
639 	if (i915_is_ggtt(vm))
640 		flags |= PIN_GLOBAL;
641 
642 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
643 
644 	for_each_prime_number_from(size, 1, max_pages) {
645 		struct drm_i915_gem_object *obj;
646 		struct i915_vma *vma;
647 		u64 addr;
648 		int err = 0;
649 
650 		obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
651 		if (IS_ERR(obj))
652 			break;
653 
654 		vma = i915_vma_instance(obj, vm, NULL);
655 		if (IS_ERR(vma)) {
656 			err = PTR_ERR(vma);
657 			goto err_put;
658 		}
659 
660 		for (addr = hole_start;
661 		     addr + obj->base.size < hole_end;
662 		     addr += round_up(obj->base.size, min_alignment)) {
663 			err = i915_vma_pin(vma, 0, 0, addr | flags);
664 			if (err) {
665 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
666 				       __func__, addr, vma->size,
667 				       hole_start, hole_end, err);
668 				goto err_put;
669 			}
670 			i915_vma_unpin(vma);
671 
672 			if (!drm_mm_node_allocated(&vma->node) ||
673 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
674 				pr_err("%s incorrect at %llx + %llx\n",
675 				       __func__, addr, vma->size);
676 				err = -EINVAL;
677 				goto err_put;
678 			}
679 
680 			err = i915_vma_unbind_unlocked(vma);
681 			if (err) {
682 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
683 				       __func__, addr, vma->size, err);
684 				goto err_put;
685 			}
686 
687 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
688 
689 			if (igt_timeout(end_time,
690 					"%s timed out at %llx\n",
691 					__func__, addr)) {
692 				err = -EINTR;
693 				goto err_put;
694 			}
695 		}
696 
697 err_put:
698 		i915_gem_object_put(obj);
699 		if (err)
700 			return err;
701 
702 		cleanup_freed_objects(vm->i915);
703 	}
704 
705 	return 0;
706 }
707 
708 static int pot_hole(struct i915_address_space *vm,
709 		    u64 hole_start, u64 hole_end,
710 		    unsigned long end_time)
711 {
712 	struct drm_i915_gem_object *obj;
713 	struct i915_vma *vma;
714 	unsigned int min_alignment;
715 	unsigned long flags;
716 	unsigned int pot;
717 	int err = 0;
718 
719 	flags = PIN_OFFSET_FIXED | PIN_USER;
720 	if (i915_is_ggtt(vm))
721 		flags |= PIN_GLOBAL;
722 
723 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
724 
725 	obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
726 	if (IS_ERR(obj))
727 		return PTR_ERR(obj);
728 
729 	vma = i915_vma_instance(obj, vm, NULL);
730 	if (IS_ERR(vma)) {
731 		err = PTR_ERR(vma);
732 		goto err_obj;
733 	}
734 
735 	/* Insert a pair of pages across every pot boundary within the hole */
736 	for (pot = fls64(hole_end - 1) - 1;
737 	     pot > ilog2(2 * min_alignment);
738 	     pot--) {
739 		u64 step = BIT_ULL(pot);
740 		u64 addr;
741 
742 		for (addr = round_up(hole_start + min_alignment, step) - min_alignment;
743 		     hole_end > addr && hole_end - addr >= 2 * min_alignment;
744 		     addr += step) {
745 			err = i915_vma_pin(vma, 0, 0, addr | flags);
746 			if (err) {
747 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
748 				       __func__,
749 				       addr,
750 				       hole_start, hole_end,
751 				       err);
752 				goto err_obj;
753 			}
754 
755 			if (!drm_mm_node_allocated(&vma->node) ||
756 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
757 				pr_err("%s incorrect at %llx + %llx\n",
758 				       __func__, addr, vma->size);
759 				i915_vma_unpin(vma);
760 				err = i915_vma_unbind_unlocked(vma);
761 				err = -EINVAL;
762 				goto err_obj;
763 			}
764 
765 			i915_vma_unpin(vma);
766 			err = i915_vma_unbind_unlocked(vma);
767 			GEM_BUG_ON(err);
768 		}
769 
770 		if (igt_timeout(end_time,
771 				"%s timed out after %d/%d\n",
772 				__func__, pot, fls64(hole_end - 1) - 1)) {
773 			err = -EINTR;
774 			goto err_obj;
775 		}
776 	}
777 
778 err_obj:
779 	i915_gem_object_put(obj);
780 	return err;
781 }
782 
783 static int drunk_hole(struct i915_address_space *vm,
784 		      u64 hole_start, u64 hole_end,
785 		      unsigned long end_time)
786 {
787 	I915_RND_STATE(prng);
788 	unsigned int min_alignment;
789 	unsigned int size;
790 	unsigned long flags;
791 
792 	flags = PIN_OFFSET_FIXED | PIN_USER;
793 	if (i915_is_ggtt(vm))
794 		flags |= PIN_GLOBAL;
795 
796 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
797 
798 	/* Keep creating larger objects until one cannot fit into the hole */
799 	for (size = 12; (hole_end - hole_start) >> size; size++) {
800 		struct drm_i915_gem_object *obj;
801 		unsigned int *order, count, n;
802 		struct i915_vma *vma;
803 		u64 hole_size, aligned_size;
804 		int err = -ENODEV;
805 
806 		aligned_size = max_t(u32, ilog2(min_alignment), size);
807 		hole_size = (hole_end - hole_start) >> aligned_size;
808 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
809 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
810 		count = hole_size >> 1;
811 		if (!count) {
812 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
813 				 __func__, hole_start, hole_end, size, hole_size);
814 			break;
815 		}
816 
817 		do {
818 			order = i915_random_order(count, &prng);
819 			if (order)
820 				break;
821 		} while (count >>= 1);
822 		if (!count)
823 			return -ENOMEM;
824 		GEM_BUG_ON(!order);
825 
826 		/* Ignore allocation failures (i.e. don't report them as
827 		 * a test failure) as we are purposefully allocating very
828 		 * large objects without checking that we have sufficient
829 		 * memory. We expect to hit -ENOMEM.
830 		 */
831 
832 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
833 		if (IS_ERR(obj)) {
834 			kfree(order);
835 			break;
836 		}
837 
838 		vma = i915_vma_instance(obj, vm, NULL);
839 		if (IS_ERR(vma)) {
840 			err = PTR_ERR(vma);
841 			goto err_obj;
842 		}
843 
844 		GEM_BUG_ON(vma->size != BIT_ULL(size));
845 
846 		for (n = 0; n < count; n++) {
847 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
848 
849 			err = i915_vma_pin(vma, 0, 0, addr | flags);
850 			if (err) {
851 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
852 				       __func__,
853 				       addr, BIT_ULL(size),
854 				       hole_start, hole_end,
855 				       err);
856 				goto err_obj;
857 			}
858 
859 			if (!drm_mm_node_allocated(&vma->node) ||
860 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
861 				pr_err("%s incorrect at %llx + %llx\n",
862 				       __func__, addr, BIT_ULL(size));
863 				i915_vma_unpin(vma);
864 				err = i915_vma_unbind_unlocked(vma);
865 				err = -EINVAL;
866 				goto err_obj;
867 			}
868 
869 			i915_vma_unpin(vma);
870 			err = i915_vma_unbind_unlocked(vma);
871 			GEM_BUG_ON(err);
872 
873 			if (igt_timeout(end_time,
874 					"%s timed out after %d/%d\n",
875 					__func__, n, count)) {
876 				err = -EINTR;
877 				goto err_obj;
878 			}
879 		}
880 
881 err_obj:
882 		i915_gem_object_put(obj);
883 		kfree(order);
884 		if (err)
885 			return err;
886 
887 		cleanup_freed_objects(vm->i915);
888 	}
889 
890 	return 0;
891 }
892 
893 static int __shrink_hole(struct i915_address_space *vm,
894 			 u64 hole_start, u64 hole_end,
895 			 unsigned long end_time)
896 {
897 	struct drm_i915_gem_object *obj;
898 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
899 	unsigned int min_alignment;
900 	unsigned int order = 12;
901 	LIST_HEAD(objects);
902 	int err = 0;
903 	u64 addr;
904 
905 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
906 
907 	/* Keep creating larger objects until one cannot fit into the hole */
908 	for (addr = hole_start; addr < hole_end; ) {
909 		struct i915_vma *vma;
910 		u64 size = BIT_ULL(order++);
911 
912 		size = min(size, hole_end - addr);
913 		obj = fake_dma_object(vm->i915, size);
914 		if (IS_ERR(obj)) {
915 			err = PTR_ERR(obj);
916 			break;
917 		}
918 
919 		list_add(&obj->st_link, &objects);
920 
921 		vma = i915_vma_instance(obj, vm, NULL);
922 		if (IS_ERR(vma)) {
923 			err = PTR_ERR(vma);
924 			break;
925 		}
926 
927 		GEM_BUG_ON(vma->size != size);
928 
929 		err = i915_vma_pin(vma, 0, 0, addr | flags);
930 		if (err) {
931 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
932 			       __func__, addr, size, hole_start, hole_end, err);
933 			break;
934 		}
935 
936 		if (!drm_mm_node_allocated(&vma->node) ||
937 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
938 			pr_err("%s incorrect at %llx + %llx\n",
939 			       __func__, addr, size);
940 			i915_vma_unpin(vma);
941 			err = i915_vma_unbind_unlocked(vma);
942 			err = -EINVAL;
943 			break;
944 		}
945 
946 		i915_vma_unpin(vma);
947 		addr += round_up(size, min_alignment);
948 
949 		/*
950 		 * Since we are injecting allocation faults at random intervals,
951 		 * wait for this allocation to complete before we change the
952 		 * faultinjection.
953 		 */
954 		err = i915_vma_sync(vma);
955 		if (err)
956 			break;
957 
958 		if (igt_timeout(end_time,
959 				"%s timed out at ofset %llx [%llx - %llx]\n",
960 				__func__, addr, hole_start, hole_end)) {
961 			err = -EINTR;
962 			break;
963 		}
964 	}
965 
966 	close_object_list(&objects, vm);
967 	cleanup_freed_objects(vm->i915);
968 	return err;
969 }
970 
971 static int shrink_hole(struct i915_address_space *vm,
972 		       u64 hole_start, u64 hole_end,
973 		       unsigned long end_time)
974 {
975 	unsigned long prime;
976 	int err;
977 
978 	vm->fault_attr.probability = 999;
979 	atomic_set(&vm->fault_attr.times, -1);
980 
981 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
982 		vm->fault_attr.interval = prime;
983 		err = __shrink_hole(vm, hole_start, hole_end, end_time);
984 		if (err)
985 			break;
986 	}
987 
988 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
989 
990 	return err;
991 }
992 
993 static int shrink_boom(struct i915_address_space *vm,
994 		       u64 hole_start, u64 hole_end,
995 		       unsigned long end_time)
996 {
997 	unsigned int sizes[] = { SZ_2M, SZ_1G };
998 	struct drm_i915_gem_object *purge;
999 	struct drm_i915_gem_object *explode;
1000 	int err;
1001 	int i;
1002 
1003 	/*
1004 	 * Catch the case which shrink_hole seems to miss. The setup here
1005 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
1006 	 * ensuring that all vma assiocated with the respective pd/pdp are
1007 	 * unpinned at the time.
1008 	 */
1009 
1010 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1011 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1012 		unsigned int size = sizes[i];
1013 		struct i915_vma *vma;
1014 
1015 		purge = fake_dma_object(vm->i915, size);
1016 		if (IS_ERR(purge))
1017 			return PTR_ERR(purge);
1018 
1019 		vma = i915_vma_instance(purge, vm, NULL);
1020 		if (IS_ERR(vma)) {
1021 			err = PTR_ERR(vma);
1022 			goto err_purge;
1023 		}
1024 
1025 		err = i915_vma_pin(vma, 0, 0, flags);
1026 		if (err)
1027 			goto err_purge;
1028 
1029 		/* Should now be ripe for purging */
1030 		i915_vma_unpin(vma);
1031 
1032 		explode = fake_dma_object(vm->i915, size);
1033 		if (IS_ERR(explode)) {
1034 			err = PTR_ERR(explode);
1035 			goto err_purge;
1036 		}
1037 
1038 		vm->fault_attr.probability = 100;
1039 		vm->fault_attr.interval = 1;
1040 		atomic_set(&vm->fault_attr.times, -1);
1041 
1042 		vma = i915_vma_instance(explode, vm, NULL);
1043 		if (IS_ERR(vma)) {
1044 			err = PTR_ERR(vma);
1045 			goto err_explode;
1046 		}
1047 
1048 		err = i915_vma_pin(vma, 0, 0, flags | size);
1049 		if (err)
1050 			goto err_explode;
1051 
1052 		i915_vma_unpin(vma);
1053 
1054 		i915_gem_object_put(purge);
1055 		i915_gem_object_put(explode);
1056 
1057 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1058 		cleanup_freed_objects(vm->i915);
1059 	}
1060 
1061 	return 0;
1062 
1063 err_explode:
1064 	i915_gem_object_put(explode);
1065 err_purge:
1066 	i915_gem_object_put(purge);
1067 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1068 	return err;
1069 }
1070 
1071 static int misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr,
1072 			   u64 addr, u64 size, unsigned long flags)
1073 {
1074 	struct drm_i915_gem_object *obj;
1075 	struct i915_vma *vma;
1076 	int err = 0;
1077 	u64 expected_vma_size, expected_node_size;
1078 	bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM ||
1079 			 mr->type == INTEL_MEMORY_STOLEN_LOCAL;
1080 
1081 	obj = i915_gem_object_create_region(mr, size, 0, I915_BO_ALLOC_GPU_ONLY);
1082 	if (IS_ERR(obj)) {
1083 		/* if iGVT-g or DMAR is active, stolen mem will be uninitialized */
1084 		if (PTR_ERR(obj) == -ENODEV && is_stolen)
1085 			return 0;
1086 		return PTR_ERR(obj);
1087 	}
1088 
1089 	vma = i915_vma_instance(obj, vm, NULL);
1090 	if (IS_ERR(vma)) {
1091 		err = PTR_ERR(vma);
1092 		goto err_put;
1093 	}
1094 
1095 	err = i915_vma_pin(vma, 0, 0, addr | flags);
1096 	if (err)
1097 		goto err_put;
1098 	i915_vma_unpin(vma);
1099 
1100 	if (!drm_mm_node_allocated(&vma->node)) {
1101 		err = -EINVAL;
1102 		goto err_put;
1103 	}
1104 
1105 	if (i915_vma_misplaced(vma, 0, 0, addr | flags)) {
1106 		err = -EINVAL;
1107 		goto err_put;
1108 	}
1109 
1110 	expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1));
1111 	expected_node_size = expected_vma_size;
1112 
1113 	if (HAS_64K_PAGES(vm->i915) && i915_gem_object_is_lmem(obj)) {
1114 		expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
1115 		expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
1116 	}
1117 
1118 	if (vma->size != expected_vma_size || vma->node.size != expected_node_size) {
1119 		err = i915_vma_unbind_unlocked(vma);
1120 		err = -EBADSLT;
1121 		goto err_put;
1122 	}
1123 
1124 	err = i915_vma_unbind_unlocked(vma);
1125 	if (err)
1126 		goto err_put;
1127 
1128 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1129 
1130 err_put:
1131 	i915_gem_object_put(obj);
1132 	cleanup_freed_objects(vm->i915);
1133 	return err;
1134 }
1135 
1136 static int misaligned_pin(struct i915_address_space *vm,
1137 			  u64 hole_start, u64 hole_end,
1138 			  unsigned long end_time)
1139 {
1140 	struct intel_memory_region *mr;
1141 	enum intel_region_id id;
1142 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
1143 	int err = 0;
1144 	u64 hole_size = hole_end - hole_start;
1145 
1146 	if (i915_is_ggtt(vm))
1147 		flags |= PIN_GLOBAL;
1148 
1149 	for_each_memory_region(mr, vm->i915, id) {
1150 		u64 min_alignment = i915_vm_min_alignment(vm, mr->type);
1151 		u64 size = min_alignment;
1152 		u64 addr = round_down(hole_start + (hole_size / 2), min_alignment);
1153 
1154 		/* avoid -ENOSPC on very small hole setups */
1155 		if (hole_size < 3 * min_alignment)
1156 			continue;
1157 
1158 		/* we can't test < 4k alignment due to flags being encoded in lower bits */
1159 		if (min_alignment != I915_GTT_PAGE_SIZE_4K) {
1160 			err = misaligned_case(vm, mr, addr + (min_alignment / 2), size, flags);
1161 			/* misaligned should error with -EINVAL*/
1162 			if (!err)
1163 				err = -EBADSLT;
1164 			if (err != -EINVAL)
1165 				return err;
1166 		}
1167 
1168 		/* test for vma->size expansion to min page size */
1169 		err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags);
1170 		if (err)
1171 			return err;
1172 
1173 		/* test for intermediate size not expanding vma->size for large alignments */
1174 		err = misaligned_case(vm, mr, addr, size / 2, flags);
1175 		if (err)
1176 			return err;
1177 	}
1178 
1179 	return 0;
1180 }
1181 
1182 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1183 			  int (*func)(struct i915_address_space *vm,
1184 				      u64 hole_start, u64 hole_end,
1185 				      unsigned long end_time))
1186 {
1187 	struct i915_ppgtt *ppgtt;
1188 	IGT_TIMEOUT(end_time);
1189 	struct file *file;
1190 	int err;
1191 
1192 	if (!HAS_FULL_PPGTT(dev_priv))
1193 		return 0;
1194 
1195 	file = mock_file(dev_priv);
1196 	if (IS_ERR(file))
1197 		return PTR_ERR(file);
1198 
1199 	ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
1200 	if (IS_ERR(ppgtt)) {
1201 		err = PTR_ERR(ppgtt);
1202 		goto out_free;
1203 	}
1204 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1205 	assert_vm_alive(&ppgtt->vm);
1206 
1207 	err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1208 
1209 	i915_vm_put(&ppgtt->vm);
1210 
1211 out_free:
1212 	fput(file);
1213 	return err;
1214 }
1215 
1216 static int igt_ppgtt_fill(void *arg)
1217 {
1218 	return exercise_ppgtt(arg, fill_hole);
1219 }
1220 
1221 static int igt_ppgtt_walk(void *arg)
1222 {
1223 	return exercise_ppgtt(arg, walk_hole);
1224 }
1225 
1226 static int igt_ppgtt_pot(void *arg)
1227 {
1228 	return exercise_ppgtt(arg, pot_hole);
1229 }
1230 
1231 static int igt_ppgtt_drunk(void *arg)
1232 {
1233 	return exercise_ppgtt(arg, drunk_hole);
1234 }
1235 
1236 static int igt_ppgtt_lowlevel(void *arg)
1237 {
1238 	return exercise_ppgtt(arg, lowlevel_hole);
1239 }
1240 
1241 static int igt_ppgtt_shrink(void *arg)
1242 {
1243 	return exercise_ppgtt(arg, shrink_hole);
1244 }
1245 
1246 static int igt_ppgtt_shrink_boom(void *arg)
1247 {
1248 	return exercise_ppgtt(arg, shrink_boom);
1249 }
1250 
1251 static int igt_ppgtt_misaligned_pin(void *arg)
1252 {
1253 	return exercise_ppgtt(arg, misaligned_pin);
1254 }
1255 
1256 static int sort_holes(void *priv, const struct list_head *A,
1257 		      const struct list_head *B)
1258 {
1259 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1260 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1261 
1262 	if (a->start < b->start)
1263 		return -1;
1264 	else
1265 		return 1;
1266 }
1267 
1268 static int exercise_ggtt(struct drm_i915_private *i915,
1269 			 int (*func)(struct i915_address_space *vm,
1270 				     u64 hole_start, u64 hole_end,
1271 				     unsigned long end_time))
1272 {
1273 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1274 	u64 hole_start, hole_end, last = 0;
1275 	struct drm_mm_node *node;
1276 	IGT_TIMEOUT(end_time);
1277 	int err = 0;
1278 
1279 restart:
1280 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1281 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1282 		if (hole_start < last)
1283 			continue;
1284 
1285 		if (ggtt->vm.mm.color_adjust)
1286 			ggtt->vm.mm.color_adjust(node, 0,
1287 						 &hole_start, &hole_end);
1288 		if (hole_start >= hole_end)
1289 			continue;
1290 
1291 		err = func(&ggtt->vm, hole_start, hole_end, end_time);
1292 		if (err)
1293 			break;
1294 
1295 		/* As we have manipulated the drm_mm, the list may be corrupt */
1296 		last = hole_end;
1297 		goto restart;
1298 	}
1299 
1300 	return err;
1301 }
1302 
1303 static int igt_ggtt_fill(void *arg)
1304 {
1305 	return exercise_ggtt(arg, fill_hole);
1306 }
1307 
1308 static int igt_ggtt_walk(void *arg)
1309 {
1310 	return exercise_ggtt(arg, walk_hole);
1311 }
1312 
1313 static int igt_ggtt_pot(void *arg)
1314 {
1315 	return exercise_ggtt(arg, pot_hole);
1316 }
1317 
1318 static int igt_ggtt_drunk(void *arg)
1319 {
1320 	return exercise_ggtt(arg, drunk_hole);
1321 }
1322 
1323 static int igt_ggtt_lowlevel(void *arg)
1324 {
1325 	return exercise_ggtt(arg, lowlevel_hole);
1326 }
1327 
1328 static int igt_ggtt_misaligned_pin(void *arg)
1329 {
1330 	return exercise_ggtt(arg, misaligned_pin);
1331 }
1332 
1333 static int igt_ggtt_page(void *arg)
1334 {
1335 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1336 	I915_RND_STATE(prng);
1337 	struct drm_i915_private *i915 = arg;
1338 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1339 	struct drm_i915_gem_object *obj;
1340 	intel_wakeref_t wakeref;
1341 	struct drm_mm_node tmp;
1342 	unsigned int *order, n;
1343 	int err;
1344 
1345 	if (!i915_ggtt_has_aperture(ggtt))
1346 		return 0;
1347 
1348 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1349 	if (IS_ERR(obj))
1350 		return PTR_ERR(obj);
1351 
1352 	err = i915_gem_object_pin_pages_unlocked(obj);
1353 	if (err)
1354 		goto out_free;
1355 
1356 	memset(&tmp, 0, sizeof(tmp));
1357 	mutex_lock(&ggtt->vm.mutex);
1358 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1359 					  count * PAGE_SIZE, 0,
1360 					  I915_COLOR_UNEVICTABLE,
1361 					  0, ggtt->mappable_end,
1362 					  DRM_MM_INSERT_LOW);
1363 	mutex_unlock(&ggtt->vm.mutex);
1364 	if (err)
1365 		goto out_unpin;
1366 
1367 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1368 
1369 	for (n = 0; n < count; n++) {
1370 		u64 offset = tmp.start + n * PAGE_SIZE;
1371 
1372 		ggtt->vm.insert_page(&ggtt->vm,
1373 				     i915_gem_object_get_dma_address(obj, 0),
1374 				     offset, I915_CACHE_NONE, 0);
1375 	}
1376 
1377 	order = i915_random_order(count, &prng);
1378 	if (!order) {
1379 		err = -ENOMEM;
1380 		goto out_remove;
1381 	}
1382 
1383 	for (n = 0; n < count; n++) {
1384 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1385 		u32 __iomem *vaddr;
1386 
1387 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1388 		iowrite32(n, vaddr + n);
1389 		io_mapping_unmap_atomic(vaddr);
1390 	}
1391 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1392 
1393 	i915_random_reorder(order, count, &prng);
1394 	for (n = 0; n < count; n++) {
1395 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1396 		u32 __iomem *vaddr;
1397 		u32 val;
1398 
1399 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1400 		val = ioread32(vaddr + n);
1401 		io_mapping_unmap_atomic(vaddr);
1402 
1403 		if (val != n) {
1404 			pr_err("insert page failed: found %d, expected %d\n",
1405 			       val, n);
1406 			err = -EINVAL;
1407 			break;
1408 		}
1409 	}
1410 
1411 	kfree(order);
1412 out_remove:
1413 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1414 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1415 	mutex_lock(&ggtt->vm.mutex);
1416 	drm_mm_remove_node(&tmp);
1417 	mutex_unlock(&ggtt->vm.mutex);
1418 out_unpin:
1419 	i915_gem_object_unpin_pages(obj);
1420 out_free:
1421 	i915_gem_object_put(obj);
1422 	return err;
1423 }
1424 
1425 static void track_vma_bind(struct i915_vma *vma)
1426 {
1427 	struct drm_i915_gem_object *obj = vma->obj;
1428 
1429 	__i915_gem_object_pin_pages(obj);
1430 
1431 	GEM_BUG_ON(atomic_read(&vma->pages_count));
1432 	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1433 	__i915_gem_object_pin_pages(obj);
1434 	vma->pages = obj->mm.pages;
1435 	vma->resource->bi.pages = vma->pages;
1436 
1437 	mutex_lock(&vma->vm->mutex);
1438 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1439 	mutex_unlock(&vma->vm->mutex);
1440 }
1441 
1442 static int exercise_mock(struct drm_i915_private *i915,
1443 			 int (*func)(struct i915_address_space *vm,
1444 				     u64 hole_start, u64 hole_end,
1445 				     unsigned long end_time))
1446 {
1447 	const u64 limit = totalram_pages() << PAGE_SHIFT;
1448 	struct i915_address_space *vm;
1449 	struct i915_gem_context *ctx;
1450 	IGT_TIMEOUT(end_time);
1451 	int err;
1452 
1453 	ctx = mock_context(i915, "mock");
1454 	if (!ctx)
1455 		return -ENOMEM;
1456 
1457 	vm = i915_gem_context_get_eb_vm(ctx);
1458 	err = func(vm, 0, min(vm->total, limit), end_time);
1459 	i915_vm_put(vm);
1460 
1461 	mock_context_close(ctx);
1462 	return err;
1463 }
1464 
1465 static int igt_mock_fill(void *arg)
1466 {
1467 	struct i915_ggtt *ggtt = arg;
1468 
1469 	return exercise_mock(ggtt->vm.i915, fill_hole);
1470 }
1471 
1472 static int igt_mock_walk(void *arg)
1473 {
1474 	struct i915_ggtt *ggtt = arg;
1475 
1476 	return exercise_mock(ggtt->vm.i915, walk_hole);
1477 }
1478 
1479 static int igt_mock_pot(void *arg)
1480 {
1481 	struct i915_ggtt *ggtt = arg;
1482 
1483 	return exercise_mock(ggtt->vm.i915, pot_hole);
1484 }
1485 
1486 static int igt_mock_drunk(void *arg)
1487 {
1488 	struct i915_ggtt *ggtt = arg;
1489 
1490 	return exercise_mock(ggtt->vm.i915, drunk_hole);
1491 }
1492 
1493 static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset)
1494 {
1495 	struct i915_address_space *vm = vma->vm;
1496 	struct i915_vma_resource *vma_res;
1497 	struct drm_i915_gem_object *obj = vma->obj;
1498 	int err;
1499 
1500 	vma_res = i915_vma_resource_alloc();
1501 	if (IS_ERR(vma_res))
1502 		return PTR_ERR(vma_res);
1503 
1504 	mutex_lock(&vm->mutex);
1505 	err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
1506 				   offset,
1507 				   obj->cache_level,
1508 				   0);
1509 	if (!err) {
1510 		i915_vma_resource_init_from_vma(vma_res, vma);
1511 		vma->resource = vma_res;
1512 	} else {
1513 		kfree(vma_res);
1514 	}
1515 	mutex_unlock(&vm->mutex);
1516 
1517 	return err;
1518 }
1519 
1520 static int igt_gtt_reserve(void *arg)
1521 {
1522 	struct i915_ggtt *ggtt = arg;
1523 	struct drm_i915_gem_object *obj, *on;
1524 	I915_RND_STATE(prng);
1525 	LIST_HEAD(objects);
1526 	u64 total;
1527 	int err = -ENODEV;
1528 
1529 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1530 	 * for the node, and evicts if it has to. So our test checks that
1531 	 * it can give us the requsted space and prevent overlaps.
1532 	 */
1533 
1534 	/* Start by filling the GGTT */
1535 	for (total = 0;
1536 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1537 	     total += 2 * I915_GTT_PAGE_SIZE) {
1538 		struct i915_vma *vma;
1539 
1540 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1541 						      2 * PAGE_SIZE);
1542 		if (IS_ERR(obj)) {
1543 			err = PTR_ERR(obj);
1544 			goto out;
1545 		}
1546 
1547 		err = i915_gem_object_pin_pages_unlocked(obj);
1548 		if (err) {
1549 			i915_gem_object_put(obj);
1550 			goto out;
1551 		}
1552 
1553 		list_add(&obj->st_link, &objects);
1554 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1555 		if (IS_ERR(vma)) {
1556 			err = PTR_ERR(vma);
1557 			goto out;
1558 		}
1559 
1560 		err = reserve_gtt_with_resource(vma, total);
1561 		if (err) {
1562 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1563 			       total, ggtt->vm.total, err);
1564 			goto out;
1565 		}
1566 		track_vma_bind(vma);
1567 
1568 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1569 		if (vma->node.start != total ||
1570 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1571 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1572 			       vma->node.start, vma->node.size,
1573 			       total, 2*I915_GTT_PAGE_SIZE);
1574 			err = -EINVAL;
1575 			goto out;
1576 		}
1577 	}
1578 
1579 	/* Now we start forcing evictions */
1580 	for (total = I915_GTT_PAGE_SIZE;
1581 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1582 	     total += 2 * I915_GTT_PAGE_SIZE) {
1583 		struct i915_vma *vma;
1584 
1585 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1586 						      2 * PAGE_SIZE);
1587 		if (IS_ERR(obj)) {
1588 			err = PTR_ERR(obj);
1589 			goto out;
1590 		}
1591 
1592 		err = i915_gem_object_pin_pages_unlocked(obj);
1593 		if (err) {
1594 			i915_gem_object_put(obj);
1595 			goto out;
1596 		}
1597 
1598 		list_add(&obj->st_link, &objects);
1599 
1600 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1601 		if (IS_ERR(vma)) {
1602 			err = PTR_ERR(vma);
1603 			goto out;
1604 		}
1605 
1606 		err = reserve_gtt_with_resource(vma, total);
1607 		if (err) {
1608 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1609 			       total, ggtt->vm.total, err);
1610 			goto out;
1611 		}
1612 		track_vma_bind(vma);
1613 
1614 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1615 		if (vma->node.start != total ||
1616 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1617 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1618 			       vma->node.start, vma->node.size,
1619 			       total, 2*I915_GTT_PAGE_SIZE);
1620 			err = -EINVAL;
1621 			goto out;
1622 		}
1623 	}
1624 
1625 	/* And then try at random */
1626 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1627 		struct i915_vma *vma;
1628 		u64 offset;
1629 
1630 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1631 		if (IS_ERR(vma)) {
1632 			err = PTR_ERR(vma);
1633 			goto out;
1634 		}
1635 
1636 		err = i915_vma_unbind_unlocked(vma);
1637 		if (err) {
1638 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1639 			goto out;
1640 		}
1641 
1642 		offset = igt_random_offset(&prng,
1643 					   0, ggtt->vm.total,
1644 					   2 * I915_GTT_PAGE_SIZE,
1645 					   I915_GTT_MIN_ALIGNMENT);
1646 
1647 		err = reserve_gtt_with_resource(vma, offset);
1648 		if (err) {
1649 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1650 			       total, ggtt->vm.total, err);
1651 			goto out;
1652 		}
1653 		track_vma_bind(vma);
1654 
1655 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1656 		if (vma->node.start != offset ||
1657 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1658 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1659 			       vma->node.start, vma->node.size,
1660 			       offset, 2*I915_GTT_PAGE_SIZE);
1661 			err = -EINVAL;
1662 			goto out;
1663 		}
1664 	}
1665 
1666 out:
1667 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1668 		i915_gem_object_unpin_pages(obj);
1669 		i915_gem_object_put(obj);
1670 	}
1671 	return err;
1672 }
1673 
1674 static int insert_gtt_with_resource(struct i915_vma *vma)
1675 {
1676 	struct i915_address_space *vm = vma->vm;
1677 	struct i915_vma_resource *vma_res;
1678 	struct drm_i915_gem_object *obj = vma->obj;
1679 	int err;
1680 
1681 	vma_res = i915_vma_resource_alloc();
1682 	if (IS_ERR(vma_res))
1683 		return PTR_ERR(vma_res);
1684 
1685 	mutex_lock(&vm->mutex);
1686 	err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
1687 				  obj->cache_level, 0, vm->total, 0);
1688 	if (!err) {
1689 		i915_vma_resource_init_from_vma(vma_res, vma);
1690 		vma->resource = vma_res;
1691 	} else {
1692 		kfree(vma_res);
1693 	}
1694 	mutex_unlock(&vm->mutex);
1695 
1696 	return err;
1697 }
1698 
1699 static int igt_gtt_insert(void *arg)
1700 {
1701 	struct i915_ggtt *ggtt = arg;
1702 	struct drm_i915_gem_object *obj, *on;
1703 	struct drm_mm_node tmp = {};
1704 	const struct invalid_insert {
1705 		u64 size;
1706 		u64 alignment;
1707 		u64 start, end;
1708 	} invalid_insert[] = {
1709 		{
1710 			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1711 			0, ggtt->vm.total,
1712 		},
1713 		{
1714 			2*I915_GTT_PAGE_SIZE, 0,
1715 			0, I915_GTT_PAGE_SIZE,
1716 		},
1717 		{
1718 			-(u64)I915_GTT_PAGE_SIZE, 0,
1719 			0, 4*I915_GTT_PAGE_SIZE,
1720 		},
1721 		{
1722 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1723 			0, 4*I915_GTT_PAGE_SIZE,
1724 		},
1725 		{
1726 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1727 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1728 		},
1729 		{}
1730 	}, *ii;
1731 	LIST_HEAD(objects);
1732 	u64 total;
1733 	int err = -ENODEV;
1734 
1735 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1736 	 * to the node, evicting if required.
1737 	 */
1738 
1739 	/* Check a couple of obviously invalid requests */
1740 	for (ii = invalid_insert; ii->size; ii++) {
1741 		mutex_lock(&ggtt->vm.mutex);
1742 		err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp,
1743 					  ii->size, ii->alignment,
1744 					  I915_COLOR_UNEVICTABLE,
1745 					  ii->start, ii->end,
1746 					  0);
1747 		mutex_unlock(&ggtt->vm.mutex);
1748 		if (err != -ENOSPC) {
1749 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1750 			       ii->size, ii->alignment, ii->start, ii->end,
1751 			       err);
1752 			return -EINVAL;
1753 		}
1754 	}
1755 
1756 	/* Start by filling the GGTT */
1757 	for (total = 0;
1758 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1759 	     total += I915_GTT_PAGE_SIZE) {
1760 		struct i915_vma *vma;
1761 
1762 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1763 						      I915_GTT_PAGE_SIZE);
1764 		if (IS_ERR(obj)) {
1765 			err = PTR_ERR(obj);
1766 			goto out;
1767 		}
1768 
1769 		err = i915_gem_object_pin_pages_unlocked(obj);
1770 		if (err) {
1771 			i915_gem_object_put(obj);
1772 			goto out;
1773 		}
1774 
1775 		list_add(&obj->st_link, &objects);
1776 
1777 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1778 		if (IS_ERR(vma)) {
1779 			err = PTR_ERR(vma);
1780 			goto out;
1781 		}
1782 
1783 		err = insert_gtt_with_resource(vma);
1784 		if (err == -ENOSPC) {
1785 			/* maxed out the GGTT space */
1786 			i915_gem_object_put(obj);
1787 			break;
1788 		}
1789 		if (err) {
1790 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1791 			       total, ggtt->vm.total, err);
1792 			goto out;
1793 		}
1794 		track_vma_bind(vma);
1795 		__i915_vma_pin(vma);
1796 
1797 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1798 	}
1799 
1800 	list_for_each_entry(obj, &objects, st_link) {
1801 		struct i915_vma *vma;
1802 
1803 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1804 		if (IS_ERR(vma)) {
1805 			err = PTR_ERR(vma);
1806 			goto out;
1807 		}
1808 
1809 		if (!drm_mm_node_allocated(&vma->node)) {
1810 			pr_err("VMA was unexpectedly evicted!\n");
1811 			err = -EINVAL;
1812 			goto out;
1813 		}
1814 
1815 		__i915_vma_unpin(vma);
1816 	}
1817 
1818 	/* If we then reinsert, we should find the same hole */
1819 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1820 		struct i915_vma *vma;
1821 		u64 offset;
1822 
1823 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1824 		if (IS_ERR(vma)) {
1825 			err = PTR_ERR(vma);
1826 			goto out;
1827 		}
1828 
1829 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1830 		offset = vma->node.start;
1831 
1832 		err = i915_vma_unbind_unlocked(vma);
1833 		if (err) {
1834 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1835 			goto out;
1836 		}
1837 
1838 		err = insert_gtt_with_resource(vma);
1839 		if (err) {
1840 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1841 			       total, ggtt->vm.total, err);
1842 			goto out;
1843 		}
1844 		track_vma_bind(vma);
1845 
1846 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1847 		if (vma->node.start != offset) {
1848 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1849 			       offset, vma->node.start);
1850 			err = -EINVAL;
1851 			goto out;
1852 		}
1853 	}
1854 
1855 	/* And then force evictions */
1856 	for (total = 0;
1857 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1858 	     total += 2 * I915_GTT_PAGE_SIZE) {
1859 		struct i915_vma *vma;
1860 
1861 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1862 						      2 * I915_GTT_PAGE_SIZE);
1863 		if (IS_ERR(obj)) {
1864 			err = PTR_ERR(obj);
1865 			goto out;
1866 		}
1867 
1868 		err = i915_gem_object_pin_pages_unlocked(obj);
1869 		if (err) {
1870 			i915_gem_object_put(obj);
1871 			goto out;
1872 		}
1873 
1874 		list_add(&obj->st_link, &objects);
1875 
1876 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1877 		if (IS_ERR(vma)) {
1878 			err = PTR_ERR(vma);
1879 			goto out;
1880 		}
1881 
1882 		err = insert_gtt_with_resource(vma);
1883 		if (err) {
1884 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1885 			       total, ggtt->vm.total, err);
1886 			goto out;
1887 		}
1888 		track_vma_bind(vma);
1889 
1890 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1891 	}
1892 
1893 out:
1894 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1895 		i915_gem_object_unpin_pages(obj);
1896 		i915_gem_object_put(obj);
1897 	}
1898 	return err;
1899 }
1900 
1901 int i915_gem_gtt_mock_selftests(void)
1902 {
1903 	static const struct i915_subtest tests[] = {
1904 		SUBTEST(igt_mock_drunk),
1905 		SUBTEST(igt_mock_walk),
1906 		SUBTEST(igt_mock_pot),
1907 		SUBTEST(igt_mock_fill),
1908 		SUBTEST(igt_gtt_reserve),
1909 		SUBTEST(igt_gtt_insert),
1910 	};
1911 	struct drm_i915_private *i915;
1912 	struct intel_gt *gt;
1913 	int err;
1914 
1915 	i915 = mock_gem_device();
1916 	if (!i915)
1917 		return -ENOMEM;
1918 
1919 	/* allocate the ggtt */
1920 	err = intel_gt_assign_ggtt(to_gt(i915));
1921 	if (err)
1922 		goto out_put;
1923 
1924 	gt = to_gt(i915);
1925 
1926 	mock_init_ggtt(gt);
1927 
1928 	err = i915_subtests(tests, gt->ggtt);
1929 
1930 	mock_device_flush(i915);
1931 	i915_gem_drain_freed_objects(i915);
1932 	mock_fini_ggtt(gt->ggtt);
1933 
1934 out_put:
1935 	mock_destroy_device(i915);
1936 	return err;
1937 }
1938 
1939 static int context_sync(struct intel_context *ce)
1940 {
1941 	struct i915_request *rq;
1942 	long timeout;
1943 
1944 	rq = intel_context_create_request(ce);
1945 	if (IS_ERR(rq))
1946 		return PTR_ERR(rq);
1947 
1948 	i915_request_get(rq);
1949 	i915_request_add(rq);
1950 
1951 	timeout = i915_request_wait(rq, 0, HZ / 5);
1952 	i915_request_put(rq);
1953 
1954 	return timeout < 0 ? -EIO : 0;
1955 }
1956 
1957 static struct i915_request *
1958 submit_batch(struct intel_context *ce, u64 addr)
1959 {
1960 	struct i915_request *rq;
1961 	int err;
1962 
1963 	rq = intel_context_create_request(ce);
1964 	if (IS_ERR(rq))
1965 		return rq;
1966 
1967 	err = 0;
1968 	if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1969 		err = rq->engine->emit_init_breadcrumb(rq);
1970 	if (err == 0)
1971 		err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1972 
1973 	if (err == 0)
1974 		i915_request_get(rq);
1975 	i915_request_add(rq);
1976 
1977 	return err ? ERR_PTR(err) : rq;
1978 }
1979 
1980 static u32 *spinner(u32 *batch, int i)
1981 {
1982 	return batch + i * 64 / sizeof(*batch) + 4;
1983 }
1984 
1985 static void end_spin(u32 *batch, int i)
1986 {
1987 	*spinner(batch, i) = MI_BATCH_BUFFER_END;
1988 	wmb();
1989 }
1990 
1991 static int igt_cs_tlb(void *arg)
1992 {
1993 	const unsigned int count = PAGE_SIZE / 64;
1994 	const unsigned int chunk_size = count * PAGE_SIZE;
1995 	struct drm_i915_private *i915 = arg;
1996 	struct drm_i915_gem_object *bbe, *act, *out;
1997 	struct i915_gem_engines_iter it;
1998 	struct i915_address_space *vm;
1999 	struct i915_gem_context *ctx;
2000 	struct intel_context *ce;
2001 	struct i915_vma *vma;
2002 	I915_RND_STATE(prng);
2003 	struct file *file;
2004 	unsigned int i;
2005 	u32 *result;
2006 	u32 *batch;
2007 	int err = 0;
2008 
2009 	/*
2010 	 * Our mission here is to fool the hardware to execute something
2011 	 * from scratch as it has not seen the batch move (due to missing
2012 	 * the TLB invalidate).
2013 	 */
2014 
2015 	file = mock_file(i915);
2016 	if (IS_ERR(file))
2017 		return PTR_ERR(file);
2018 
2019 	ctx = live_context(i915, file);
2020 	if (IS_ERR(ctx)) {
2021 		err = PTR_ERR(ctx);
2022 		goto out_unlock;
2023 	}
2024 
2025 	vm = i915_gem_context_get_eb_vm(ctx);
2026 	if (i915_is_ggtt(vm))
2027 		goto out_vm;
2028 
2029 	/* Create two pages; dummy we prefill the TLB, and intended */
2030 	bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
2031 	if (IS_ERR(bbe)) {
2032 		err = PTR_ERR(bbe);
2033 		goto out_vm;
2034 	}
2035 
2036 	batch = i915_gem_object_pin_map_unlocked(bbe, I915_MAP_WC);
2037 	if (IS_ERR(batch)) {
2038 		err = PTR_ERR(batch);
2039 		goto out_put_bbe;
2040 	}
2041 	memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
2042 	i915_gem_object_flush_map(bbe);
2043 	i915_gem_object_unpin_map(bbe);
2044 
2045 	act = i915_gem_object_create_internal(i915, PAGE_SIZE);
2046 	if (IS_ERR(act)) {
2047 		err = PTR_ERR(act);
2048 		goto out_put_bbe;
2049 	}
2050 
2051 	/* Track the execution of each request by writing into different slot */
2052 	batch = i915_gem_object_pin_map_unlocked(act, I915_MAP_WC);
2053 	if (IS_ERR(batch)) {
2054 		err = PTR_ERR(batch);
2055 		goto out_put_act;
2056 	}
2057 	for (i = 0; i < count; i++) {
2058 		u32 *cs = batch + i * 64 / sizeof(*cs);
2059 		u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
2060 
2061 		GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
2062 		cs[0] = MI_STORE_DWORD_IMM_GEN4;
2063 		if (GRAPHICS_VER(i915) >= 8) {
2064 			cs[1] = lower_32_bits(addr);
2065 			cs[2] = upper_32_bits(addr);
2066 			cs[3] = i;
2067 			cs[4] = MI_NOOP;
2068 			cs[5] = MI_BATCH_BUFFER_START_GEN8;
2069 		} else {
2070 			cs[1] = 0;
2071 			cs[2] = lower_32_bits(addr);
2072 			cs[3] = i;
2073 			cs[4] = MI_NOOP;
2074 			cs[5] = MI_BATCH_BUFFER_START;
2075 		}
2076 	}
2077 
2078 	out = i915_gem_object_create_internal(i915, PAGE_SIZE);
2079 	if (IS_ERR(out)) {
2080 		err = PTR_ERR(out);
2081 		goto out_put_batch;
2082 	}
2083 	i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
2084 
2085 	vma = i915_vma_instance(out, vm, NULL);
2086 	if (IS_ERR(vma)) {
2087 		err = PTR_ERR(vma);
2088 		goto out_put_out;
2089 	}
2090 
2091 	err = i915_vma_pin(vma, 0, 0,
2092 			   PIN_USER |
2093 			   PIN_OFFSET_FIXED |
2094 			   (vm->total - PAGE_SIZE));
2095 	if (err)
2096 		goto out_put_out;
2097 	GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
2098 
2099 	result = i915_gem_object_pin_map_unlocked(out, I915_MAP_WB);
2100 	if (IS_ERR(result)) {
2101 		err = PTR_ERR(result);
2102 		goto out_put_out;
2103 	}
2104 
2105 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2106 		IGT_TIMEOUT(end_time);
2107 		unsigned long pass = 0;
2108 
2109 		if (!intel_engine_can_store_dword(ce->engine))
2110 			continue;
2111 
2112 		while (!__igt_timeout(end_time, NULL)) {
2113 			struct i915_vm_pt_stash stash = {};
2114 			struct i915_request *rq;
2115 			struct i915_gem_ww_ctx ww;
2116 			struct i915_vma_resource *vma_res;
2117 			u64 offset;
2118 
2119 			offset = igt_random_offset(&prng,
2120 						   0, vm->total - PAGE_SIZE,
2121 						   chunk_size, PAGE_SIZE);
2122 
2123 			memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
2124 
2125 			vma = i915_vma_instance(bbe, vm, NULL);
2126 			if (IS_ERR(vma)) {
2127 				err = PTR_ERR(vma);
2128 				goto end;
2129 			}
2130 
2131 			i915_gem_object_lock(bbe, NULL);
2132 			err = i915_vma_get_pages(vma);
2133 			i915_gem_object_unlock(bbe);
2134 			if (err)
2135 				goto end;
2136 
2137 			vma_res = i915_vma_resource_alloc();
2138 			if (IS_ERR(vma_res)) {
2139 				i915_vma_put_pages(vma);
2140 				err = PTR_ERR(vma_res);
2141 				goto end;
2142 			}
2143 
2144 			i915_gem_ww_ctx_init(&ww, false);
2145 retry:
2146 			err = i915_vm_lock_objects(vm, &ww);
2147 			if (err)
2148 				goto end_ww;
2149 
2150 			err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
2151 			if (err)
2152 				goto end_ww;
2153 
2154 			err = i915_vm_map_pt_stash(vm, &stash);
2155 			if (!err)
2156 				vm->allocate_va_range(vm, &stash, offset, chunk_size);
2157 			i915_vm_free_pt_stash(vm, &stash);
2158 end_ww:
2159 			if (err == -EDEADLK) {
2160 				err = i915_gem_ww_ctx_backoff(&ww);
2161 				if (!err)
2162 					goto retry;
2163 			}
2164 			i915_gem_ww_ctx_fini(&ww);
2165 			if (err) {
2166 				kfree(vma_res);
2167 				goto end;
2168 			}
2169 
2170 			i915_vma_resource_init_from_vma(vma_res, vma);
2171 			/* Prime the TLB with the dummy pages */
2172 			for (i = 0; i < count; i++) {
2173 				vma_res->start = offset + i * PAGE_SIZE;
2174 				vm->insert_entries(vm, vma_res, I915_CACHE_NONE,
2175 						   0);
2176 
2177 				rq = submit_batch(ce, vma_res->start);
2178 				if (IS_ERR(rq)) {
2179 					err = PTR_ERR(rq);
2180 					i915_vma_resource_fini(vma_res);
2181 					kfree(vma_res);
2182 					goto end;
2183 				}
2184 				i915_request_put(rq);
2185 			}
2186 			i915_vma_resource_fini(vma_res);
2187 			i915_vma_put_pages(vma);
2188 
2189 			err = context_sync(ce);
2190 			if (err) {
2191 				pr_err("%s: dummy setup timed out\n",
2192 				       ce->engine->name);
2193 				kfree(vma_res);
2194 				goto end;
2195 			}
2196 
2197 			vma = i915_vma_instance(act, vm, NULL);
2198 			if (IS_ERR(vma)) {
2199 				kfree(vma_res);
2200 				err = PTR_ERR(vma);
2201 				goto end;
2202 			}
2203 
2204 			i915_gem_object_lock(act, NULL);
2205 			err = i915_vma_get_pages(vma);
2206 			i915_gem_object_unlock(act);
2207 			if (err) {
2208 				kfree(vma_res);
2209 				goto end;
2210 			}
2211 
2212 			i915_vma_resource_init_from_vma(vma_res, vma);
2213 			/* Replace the TLB with target batches */
2214 			for (i = 0; i < count; i++) {
2215 				struct i915_request *rq;
2216 				u32 *cs = batch + i * 64 / sizeof(*cs);
2217 				u64 addr;
2218 
2219 				vma_res->start = offset + i * PAGE_SIZE;
2220 				vm->insert_entries(vm, vma_res, I915_CACHE_NONE, 0);
2221 
2222 				addr = vma_res->start + i * 64;
2223 				cs[4] = MI_NOOP;
2224 				cs[6] = lower_32_bits(addr);
2225 				cs[7] = upper_32_bits(addr);
2226 				wmb();
2227 
2228 				rq = submit_batch(ce, addr);
2229 				if (IS_ERR(rq)) {
2230 					err = PTR_ERR(rq);
2231 					i915_vma_resource_fini(vma_res);
2232 					kfree(vma_res);
2233 					goto end;
2234 				}
2235 
2236 				/* Wait until the context chain has started */
2237 				if (i == 0) {
2238 					while (READ_ONCE(result[i]) &&
2239 					       !i915_request_completed(rq))
2240 						cond_resched();
2241 				} else {
2242 					end_spin(batch, i - 1);
2243 				}
2244 
2245 				i915_request_put(rq);
2246 			}
2247 			end_spin(batch, count - 1);
2248 
2249 			i915_vma_resource_fini(vma_res);
2250 			kfree(vma_res);
2251 			i915_vma_put_pages(vma);
2252 
2253 			err = context_sync(ce);
2254 			if (err) {
2255 				pr_err("%s: writes timed out\n",
2256 				       ce->engine->name);
2257 				goto end;
2258 			}
2259 
2260 			for (i = 0; i < count; i++) {
2261 				if (result[i] != i) {
2262 					pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
2263 					       ce->engine->name, pass,
2264 					       offset, i, result[i], i);
2265 					err = -EINVAL;
2266 					goto end;
2267 				}
2268 			}
2269 
2270 			vm->clear_range(vm, offset, chunk_size);
2271 			pass++;
2272 		}
2273 	}
2274 end:
2275 	if (igt_flush_test(i915))
2276 		err = -EIO;
2277 	i915_gem_context_unlock_engines(ctx);
2278 	i915_gem_object_unpin_map(out);
2279 out_put_out:
2280 	i915_gem_object_put(out);
2281 out_put_batch:
2282 	i915_gem_object_unpin_map(act);
2283 out_put_act:
2284 	i915_gem_object_put(act);
2285 out_put_bbe:
2286 	i915_gem_object_put(bbe);
2287 out_vm:
2288 	i915_vm_put(vm);
2289 out_unlock:
2290 	fput(file);
2291 	return err;
2292 }
2293 
2294 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2295 {
2296 	static const struct i915_subtest tests[] = {
2297 		SUBTEST(igt_ppgtt_alloc),
2298 		SUBTEST(igt_ppgtt_lowlevel),
2299 		SUBTEST(igt_ppgtt_drunk),
2300 		SUBTEST(igt_ppgtt_walk),
2301 		SUBTEST(igt_ppgtt_pot),
2302 		SUBTEST(igt_ppgtt_fill),
2303 		SUBTEST(igt_ppgtt_shrink),
2304 		SUBTEST(igt_ppgtt_shrink_boom),
2305 		SUBTEST(igt_ppgtt_misaligned_pin),
2306 		SUBTEST(igt_ggtt_lowlevel),
2307 		SUBTEST(igt_ggtt_drunk),
2308 		SUBTEST(igt_ggtt_walk),
2309 		SUBTEST(igt_ggtt_pot),
2310 		SUBTEST(igt_ggtt_fill),
2311 		SUBTEST(igt_ggtt_page),
2312 		SUBTEST(igt_ggtt_misaligned_pin),
2313 		SUBTEST(igt_cs_tlb),
2314 	};
2315 
2316 	GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
2317 
2318 	return i915_live_subtests(tests, i915);
2319 }
2320