1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "gem/i915_gem_context.h"
29 #include "gem/i915_gem_internal.h"
30 #include "gem/i915_gem_lmem.h"
31 #include "gem/i915_gem_region.h"
32 #include "gem/selftests/mock_context.h"
33 #include "gt/intel_context.h"
34 #include "gt/intel_gpu_commands.h"
35 #include "gt/intel_gtt.h"
36 
37 #include "i915_random.h"
38 #include "i915_selftest.h"
39 #include "i915_vma_resource.h"
40 
41 #include "mock_drm.h"
42 #include "mock_gem_device.h"
43 #include "mock_gtt.h"
44 #include "igt_flush_test.h"
45 
46 static void cleanup_freed_objects(struct drm_i915_private *i915)
47 {
48 	i915_gem_drain_freed_objects(i915);
49 }
50 
51 static void fake_free_pages(struct drm_i915_gem_object *obj,
52 			    struct sg_table *pages)
53 {
54 	sg_free_table(pages);
55 	kfree(pages);
56 }
57 
58 static int fake_get_pages(struct drm_i915_gem_object *obj)
59 {
60 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
61 #define PFN_BIAS 0x1000
62 	struct sg_table *pages;
63 	struct scatterlist *sg;
64 	typeof(obj->base.size) rem;
65 
66 	pages = kmalloc(sizeof(*pages), GFP);
67 	if (!pages)
68 		return -ENOMEM;
69 
70 	rem = round_up(obj->base.size, BIT(31)) >> 31;
71 	/* restricted by sg_alloc_table */
72 	if (overflows_type(rem, unsigned int))
73 		return -E2BIG;
74 
75 	if (sg_alloc_table(pages, rem, GFP)) {
76 		kfree(pages);
77 		return -ENOMEM;
78 	}
79 
80 	rem = obj->base.size;
81 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
82 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
83 
84 		GEM_BUG_ON(!len);
85 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
86 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
87 		sg_dma_len(sg) = len;
88 
89 		rem -= len;
90 	}
91 	GEM_BUG_ON(rem);
92 
93 	__i915_gem_object_set_pages(obj, pages);
94 
95 	return 0;
96 #undef GFP
97 }
98 
99 static void fake_put_pages(struct drm_i915_gem_object *obj,
100 			   struct sg_table *pages)
101 {
102 	fake_free_pages(obj, pages);
103 	obj->mm.dirty = false;
104 }
105 
106 static const struct drm_i915_gem_object_ops fake_ops = {
107 	.name = "fake-gem",
108 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
109 	.get_pages = fake_get_pages,
110 	.put_pages = fake_put_pages,
111 };
112 
113 static struct drm_i915_gem_object *
114 fake_dma_object(struct drm_i915_private *i915, u64 size)
115 {
116 	static struct lock_class_key lock_class;
117 	struct drm_i915_gem_object *obj;
118 
119 	GEM_BUG_ON(!size);
120 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
121 
122 	if (overflows_type(size, obj->base.size))
123 		return ERR_PTR(-E2BIG);
124 
125 	obj = i915_gem_object_alloc();
126 	if (!obj)
127 		goto err;
128 
129 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
130 	i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
131 
132 	i915_gem_object_set_volatile(obj);
133 
134 	obj->write_domain = I915_GEM_DOMAIN_CPU;
135 	obj->read_domains = I915_GEM_DOMAIN_CPU;
136 	obj->cache_level = I915_CACHE_NONE;
137 
138 	/* Preallocate the "backing storage" */
139 	if (i915_gem_object_pin_pages_unlocked(obj))
140 		goto err_obj;
141 
142 	i915_gem_object_unpin_pages(obj);
143 	return obj;
144 
145 err_obj:
146 	i915_gem_object_put(obj);
147 err:
148 	return ERR_PTR(-ENOMEM);
149 }
150 
151 static int igt_ppgtt_alloc(void *arg)
152 {
153 	struct drm_i915_private *dev_priv = arg;
154 	struct i915_ppgtt *ppgtt;
155 	struct i915_gem_ww_ctx ww;
156 	u64 size, last, limit;
157 	int err = 0;
158 
159 	/* Allocate a ppggt and try to fill the entire range */
160 
161 	if (!HAS_PPGTT(dev_priv))
162 		return 0;
163 
164 	ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
165 	if (IS_ERR(ppgtt))
166 		return PTR_ERR(ppgtt);
167 
168 	if (!ppgtt->vm.allocate_va_range)
169 		goto err_ppgtt_cleanup;
170 
171 	/*
172 	 * While we only allocate the page tables here and so we could
173 	 * address a much larger GTT than we could actually fit into
174 	 * RAM, a practical limit is the amount of physical pages in the system.
175 	 * This should ensure that we do not run into the oomkiller during
176 	 * the test and take down the machine wilfully.
177 	 */
178 	limit = totalram_pages() << PAGE_SHIFT;
179 	limit = min(ppgtt->vm.total, limit);
180 
181 	i915_gem_ww_ctx_init(&ww, false);
182 retry:
183 	err = i915_vm_lock_objects(&ppgtt->vm, &ww);
184 	if (err)
185 		goto err_ppgtt_cleanup;
186 
187 	/* Check we can allocate the entire range */
188 	for (size = 4096; size <= limit; size <<= 2) {
189 		struct i915_vm_pt_stash stash = {};
190 
191 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
192 		if (err)
193 			goto err_ppgtt_cleanup;
194 
195 		err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
196 		if (err) {
197 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
198 			goto err_ppgtt_cleanup;
199 		}
200 
201 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
202 		cond_resched();
203 
204 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
205 
206 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
207 	}
208 
209 	/* Check we can incrementally allocate the entire range */
210 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
211 		struct i915_vm_pt_stash stash = {};
212 
213 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
214 		if (err)
215 			goto err_ppgtt_cleanup;
216 
217 		err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
218 		if (err) {
219 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
220 			goto err_ppgtt_cleanup;
221 		}
222 
223 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
224 					    last, size - last);
225 		cond_resched();
226 
227 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
228 	}
229 
230 err_ppgtt_cleanup:
231 	if (err == -EDEADLK) {
232 		err = i915_gem_ww_ctx_backoff(&ww);
233 		if (!err)
234 			goto retry;
235 	}
236 	i915_gem_ww_ctx_fini(&ww);
237 
238 	i915_vm_put(&ppgtt->vm);
239 	return err;
240 }
241 
242 static int lowlevel_hole(struct i915_address_space *vm,
243 			 u64 hole_start, u64 hole_end,
244 			 unsigned long end_time)
245 {
246 	const unsigned int min_alignment =
247 		i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
248 	I915_RND_STATE(seed_prng);
249 	struct i915_vma_resource *mock_vma_res;
250 	unsigned int size;
251 
252 	mock_vma_res = kzalloc(sizeof(*mock_vma_res), GFP_KERNEL);
253 	if (!mock_vma_res)
254 		return -ENOMEM;
255 
256 	/* Keep creating larger objects until one cannot fit into the hole */
257 	for (size = 12; (hole_end - hole_start) >> size; size++) {
258 		I915_RND_SUBSTATE(prng, seed_prng);
259 		struct drm_i915_gem_object *obj;
260 		unsigned int *order, count, n;
261 		u64 hole_size, aligned_size;
262 
263 		aligned_size = max_t(u32, ilog2(min_alignment), size);
264 		hole_size = (hole_end - hole_start) >> aligned_size;
265 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
266 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
267 		count = hole_size >> 1;
268 		if (!count) {
269 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
270 				 __func__, hole_start, hole_end, size, hole_size);
271 			break;
272 		}
273 
274 		do {
275 			order = i915_random_order(count, &prng);
276 			if (order)
277 				break;
278 		} while (count >>= 1);
279 		if (!count) {
280 			kfree(mock_vma_res);
281 			return -ENOMEM;
282 		}
283 		GEM_BUG_ON(!order);
284 
285 		GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total);
286 		GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end);
287 
288 		/* Ignore allocation failures (i.e. don't report them as
289 		 * a test failure) as we are purposefully allocating very
290 		 * large objects without checking that we have sufficient
291 		 * memory. We expect to hit -ENOMEM.
292 		 */
293 
294 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
295 		if (IS_ERR(obj)) {
296 			kfree(order);
297 			break;
298 		}
299 
300 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
301 
302 		if (i915_gem_object_pin_pages_unlocked(obj)) {
303 			i915_gem_object_put(obj);
304 			kfree(order);
305 			break;
306 		}
307 
308 		for (n = 0; n < count; n++) {
309 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
310 			intel_wakeref_t wakeref;
311 
312 			GEM_BUG_ON(addr + BIT_ULL(aligned_size) > vm->total);
313 
314 			if (igt_timeout(end_time,
315 					"%s timed out before %d/%d\n",
316 					__func__, n, count)) {
317 				hole_end = hole_start; /* quit */
318 				break;
319 			}
320 
321 			if (vm->allocate_va_range) {
322 				struct i915_vm_pt_stash stash = {};
323 				struct i915_gem_ww_ctx ww;
324 				int err;
325 
326 				i915_gem_ww_ctx_init(&ww, false);
327 retry:
328 				err = i915_vm_lock_objects(vm, &ww);
329 				if (err)
330 					goto alloc_vm_end;
331 
332 				err = -ENOMEM;
333 				if (i915_vm_alloc_pt_stash(vm, &stash,
334 							   BIT_ULL(size)))
335 					goto alloc_vm_end;
336 
337 				err = i915_vm_map_pt_stash(vm, &stash);
338 				if (!err)
339 					vm->allocate_va_range(vm, &stash,
340 							      addr, BIT_ULL(size));
341 				i915_vm_free_pt_stash(vm, &stash);
342 alloc_vm_end:
343 				if (err == -EDEADLK) {
344 					err = i915_gem_ww_ctx_backoff(&ww);
345 					if (!err)
346 						goto retry;
347 				}
348 				i915_gem_ww_ctx_fini(&ww);
349 
350 				if (err)
351 					break;
352 			}
353 
354 			mock_vma_res->bi.pages = obj->mm.pages;
355 			mock_vma_res->node_size = BIT_ULL(aligned_size);
356 			mock_vma_res->start = addr;
357 
358 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
359 			  vm->insert_entries(vm, mock_vma_res,
360 						   I915_CACHE_NONE, 0);
361 		}
362 		count = n;
363 
364 		i915_random_reorder(order, count, &prng);
365 		for (n = 0; n < count; n++) {
366 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
367 			intel_wakeref_t wakeref;
368 
369 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
370 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
371 				vm->clear_range(vm, addr, BIT_ULL(size));
372 		}
373 
374 		i915_gem_object_unpin_pages(obj);
375 		i915_gem_object_put(obj);
376 
377 		kfree(order);
378 
379 		cleanup_freed_objects(vm->i915);
380 	}
381 
382 	kfree(mock_vma_res);
383 	return 0;
384 }
385 
386 static void close_object_list(struct list_head *objects,
387 			      struct i915_address_space *vm)
388 {
389 	struct drm_i915_gem_object *obj, *on;
390 	int ignored;
391 
392 	list_for_each_entry_safe(obj, on, objects, st_link) {
393 		struct i915_vma *vma;
394 
395 		vma = i915_vma_instance(obj, vm, NULL);
396 		if (!IS_ERR(vma))
397 			ignored = i915_vma_unbind_unlocked(vma);
398 
399 		list_del(&obj->st_link);
400 		i915_gem_object_put(obj);
401 	}
402 }
403 
404 static int fill_hole(struct i915_address_space *vm,
405 		     u64 hole_start, u64 hole_end,
406 		     unsigned long end_time)
407 {
408 	const u64 hole_size = hole_end - hole_start;
409 	struct drm_i915_gem_object *obj;
410 	const unsigned int min_alignment =
411 		i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
412 	const unsigned long max_pages =
413 		min_t(u64, ULONG_MAX - 1, (hole_size / 2) >> ilog2(min_alignment));
414 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
415 	unsigned long npages, prime, flags;
416 	struct i915_vma *vma;
417 	LIST_HEAD(objects);
418 	int err;
419 
420 	/* Try binding many VMA working inwards from either edge */
421 
422 	flags = PIN_OFFSET_FIXED | PIN_USER;
423 	if (i915_is_ggtt(vm))
424 		flags |= PIN_GLOBAL;
425 
426 	for_each_prime_number_from(prime, 2, max_step) {
427 		for (npages = 1; npages <= max_pages; npages *= prime) {
428 			const u64 full_size = npages << PAGE_SHIFT;
429 			const struct {
430 				const char *name;
431 				u64 offset;
432 				int step;
433 			} phases[] = {
434 				{ "top-down", hole_end, -1, },
435 				{ "bottom-up", hole_start, 1, },
436 				{ }
437 			}, *p;
438 
439 			obj = fake_dma_object(vm->i915, full_size);
440 			if (IS_ERR(obj))
441 				break;
442 
443 			list_add(&obj->st_link, &objects);
444 
445 			/* Align differing sized objects against the edges, and
446 			 * check we don't walk off into the void when binding
447 			 * them into the GTT.
448 			 */
449 			for (p = phases; p->name; p++) {
450 				u64 offset;
451 
452 				offset = p->offset;
453 				list_for_each_entry(obj, &objects, st_link) {
454 					u64 aligned_size = round_up(obj->base.size,
455 								    min_alignment);
456 
457 					vma = i915_vma_instance(obj, vm, NULL);
458 					if (IS_ERR(vma))
459 						continue;
460 
461 					if (p->step < 0) {
462 						if (offset < hole_start + aligned_size)
463 							break;
464 						offset -= aligned_size;
465 					}
466 
467 					err = i915_vma_pin(vma, 0, 0, offset | flags);
468 					if (err) {
469 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
470 						       __func__, p->name, err, npages, prime, offset);
471 						goto err;
472 					}
473 
474 					if (!drm_mm_node_allocated(&vma->node) ||
475 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
476 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
477 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
478 						       offset);
479 						err = -EINVAL;
480 						goto err;
481 					}
482 
483 					i915_vma_unpin(vma);
484 
485 					if (p->step > 0) {
486 						if (offset + aligned_size > hole_end)
487 							break;
488 						offset += aligned_size;
489 					}
490 				}
491 
492 				offset = p->offset;
493 				list_for_each_entry(obj, &objects, st_link) {
494 					u64 aligned_size = round_up(obj->base.size,
495 								    min_alignment);
496 
497 					vma = i915_vma_instance(obj, vm, NULL);
498 					if (IS_ERR(vma))
499 						continue;
500 
501 					if (p->step < 0) {
502 						if (offset < hole_start + aligned_size)
503 							break;
504 						offset -= aligned_size;
505 					}
506 
507 					if (!drm_mm_node_allocated(&vma->node) ||
508 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
509 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
510 						       __func__, p->name, vma->node.start, vma->node.size,
511 						       offset);
512 						err = -EINVAL;
513 						goto err;
514 					}
515 
516 					err = i915_vma_unbind_unlocked(vma);
517 					if (err) {
518 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
519 						       __func__, p->name, vma->node.start, vma->node.size,
520 						       err);
521 						goto err;
522 					}
523 
524 					if (p->step > 0) {
525 						if (offset + aligned_size > hole_end)
526 							break;
527 						offset += aligned_size;
528 					}
529 				}
530 
531 				offset = p->offset;
532 				list_for_each_entry_reverse(obj, &objects, st_link) {
533 					u64 aligned_size = round_up(obj->base.size,
534 								    min_alignment);
535 
536 					vma = i915_vma_instance(obj, vm, NULL);
537 					if (IS_ERR(vma))
538 						continue;
539 
540 					if (p->step < 0) {
541 						if (offset < hole_start + aligned_size)
542 							break;
543 						offset -= aligned_size;
544 					}
545 
546 					err = i915_vma_pin(vma, 0, 0, offset | flags);
547 					if (err) {
548 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
549 						       __func__, p->name, err, npages, prime, offset);
550 						goto err;
551 					}
552 
553 					if (!drm_mm_node_allocated(&vma->node) ||
554 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
555 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
556 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
557 						       offset);
558 						err = -EINVAL;
559 						goto err;
560 					}
561 
562 					i915_vma_unpin(vma);
563 
564 					if (p->step > 0) {
565 						if (offset + aligned_size > hole_end)
566 							break;
567 						offset += aligned_size;
568 					}
569 				}
570 
571 				offset = p->offset;
572 				list_for_each_entry_reverse(obj, &objects, st_link) {
573 					u64 aligned_size = round_up(obj->base.size,
574 								    min_alignment);
575 
576 					vma = i915_vma_instance(obj, vm, NULL);
577 					if (IS_ERR(vma))
578 						continue;
579 
580 					if (p->step < 0) {
581 						if (offset < hole_start + aligned_size)
582 							break;
583 						offset -= aligned_size;
584 					}
585 
586 					if (!drm_mm_node_allocated(&vma->node) ||
587 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
588 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
589 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
590 						       offset);
591 						err = -EINVAL;
592 						goto err;
593 					}
594 
595 					err = i915_vma_unbind_unlocked(vma);
596 					if (err) {
597 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
598 						       __func__, p->name, vma->node.start, vma->node.size,
599 						       err);
600 						goto err;
601 					}
602 
603 					if (p->step > 0) {
604 						if (offset + aligned_size > hole_end)
605 							break;
606 						offset += aligned_size;
607 					}
608 				}
609 			}
610 
611 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
612 					__func__, npages, prime)) {
613 				err = -EINTR;
614 				goto err;
615 			}
616 		}
617 
618 		close_object_list(&objects, vm);
619 		cleanup_freed_objects(vm->i915);
620 	}
621 
622 	return 0;
623 
624 err:
625 	close_object_list(&objects, vm);
626 	return err;
627 }
628 
629 static int walk_hole(struct i915_address_space *vm,
630 		     u64 hole_start, u64 hole_end,
631 		     unsigned long end_time)
632 {
633 	const u64 hole_size = hole_end - hole_start;
634 	const unsigned long max_pages =
635 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
636 	unsigned long min_alignment;
637 	unsigned long flags;
638 	u64 size;
639 
640 	/* Try binding a single VMA in different positions within the hole */
641 
642 	flags = PIN_OFFSET_FIXED | PIN_USER;
643 	if (i915_is_ggtt(vm))
644 		flags |= PIN_GLOBAL;
645 
646 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
647 
648 	for_each_prime_number_from(size, 1, max_pages) {
649 		struct drm_i915_gem_object *obj;
650 		struct i915_vma *vma;
651 		u64 addr;
652 		int err = 0;
653 
654 		obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
655 		if (IS_ERR(obj))
656 			break;
657 
658 		vma = i915_vma_instance(obj, vm, NULL);
659 		if (IS_ERR(vma)) {
660 			err = PTR_ERR(vma);
661 			goto err_put;
662 		}
663 
664 		for (addr = hole_start;
665 		     addr + obj->base.size < hole_end;
666 		     addr += round_up(obj->base.size, min_alignment)) {
667 			err = i915_vma_pin(vma, 0, 0, addr | flags);
668 			if (err) {
669 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
670 				       __func__, addr, vma->size,
671 				       hole_start, hole_end, err);
672 				goto err_put;
673 			}
674 			i915_vma_unpin(vma);
675 
676 			if (!drm_mm_node_allocated(&vma->node) ||
677 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
678 				pr_err("%s incorrect at %llx + %llx\n",
679 				       __func__, addr, vma->size);
680 				err = -EINVAL;
681 				goto err_put;
682 			}
683 
684 			err = i915_vma_unbind_unlocked(vma);
685 			if (err) {
686 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
687 				       __func__, addr, vma->size, err);
688 				goto err_put;
689 			}
690 
691 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
692 
693 			if (igt_timeout(end_time,
694 					"%s timed out at %llx\n",
695 					__func__, addr)) {
696 				err = -EINTR;
697 				goto err_put;
698 			}
699 		}
700 
701 err_put:
702 		i915_gem_object_put(obj);
703 		if (err)
704 			return err;
705 
706 		cleanup_freed_objects(vm->i915);
707 	}
708 
709 	return 0;
710 }
711 
712 static int pot_hole(struct i915_address_space *vm,
713 		    u64 hole_start, u64 hole_end,
714 		    unsigned long end_time)
715 {
716 	struct drm_i915_gem_object *obj;
717 	struct i915_vma *vma;
718 	unsigned int min_alignment;
719 	unsigned long flags;
720 	unsigned int pot;
721 	int err = 0;
722 
723 	flags = PIN_OFFSET_FIXED | PIN_USER;
724 	if (i915_is_ggtt(vm))
725 		flags |= PIN_GLOBAL;
726 
727 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
728 
729 	obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
730 	if (IS_ERR(obj))
731 		return PTR_ERR(obj);
732 
733 	vma = i915_vma_instance(obj, vm, NULL);
734 	if (IS_ERR(vma)) {
735 		err = PTR_ERR(vma);
736 		goto err_obj;
737 	}
738 
739 	/* Insert a pair of pages across every pot boundary within the hole */
740 	for (pot = fls64(hole_end - 1) - 1;
741 	     pot > ilog2(2 * min_alignment);
742 	     pot--) {
743 		u64 step = BIT_ULL(pot);
744 		u64 addr;
745 
746 		for (addr = round_up(hole_start + min_alignment, step) - min_alignment;
747 		     hole_end > addr && hole_end - addr >= 2 * min_alignment;
748 		     addr += step) {
749 			err = i915_vma_pin(vma, 0, 0, addr | flags);
750 			if (err) {
751 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
752 				       __func__,
753 				       addr,
754 				       hole_start, hole_end,
755 				       err);
756 				goto err_obj;
757 			}
758 
759 			if (!drm_mm_node_allocated(&vma->node) ||
760 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
761 				pr_err("%s incorrect at %llx + %llx\n",
762 				       __func__, addr, vma->size);
763 				i915_vma_unpin(vma);
764 				err = i915_vma_unbind_unlocked(vma);
765 				err = -EINVAL;
766 				goto err_obj;
767 			}
768 
769 			i915_vma_unpin(vma);
770 			err = i915_vma_unbind_unlocked(vma);
771 			GEM_BUG_ON(err);
772 		}
773 
774 		if (igt_timeout(end_time,
775 				"%s timed out after %d/%d\n",
776 				__func__, pot, fls64(hole_end - 1) - 1)) {
777 			err = -EINTR;
778 			goto err_obj;
779 		}
780 	}
781 
782 err_obj:
783 	i915_gem_object_put(obj);
784 	return err;
785 }
786 
787 static int drunk_hole(struct i915_address_space *vm,
788 		      u64 hole_start, u64 hole_end,
789 		      unsigned long end_time)
790 {
791 	I915_RND_STATE(prng);
792 	unsigned int min_alignment;
793 	unsigned int size;
794 	unsigned long flags;
795 
796 	flags = PIN_OFFSET_FIXED | PIN_USER;
797 	if (i915_is_ggtt(vm))
798 		flags |= PIN_GLOBAL;
799 
800 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
801 
802 	/* Keep creating larger objects until one cannot fit into the hole */
803 	for (size = 12; (hole_end - hole_start) >> size; size++) {
804 		struct drm_i915_gem_object *obj;
805 		unsigned int *order, count, n;
806 		struct i915_vma *vma;
807 		u64 hole_size, aligned_size;
808 		int err = -ENODEV;
809 
810 		aligned_size = max_t(u32, ilog2(min_alignment), size);
811 		hole_size = (hole_end - hole_start) >> aligned_size;
812 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
813 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
814 		count = hole_size >> 1;
815 		if (!count) {
816 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
817 				 __func__, hole_start, hole_end, size, hole_size);
818 			break;
819 		}
820 
821 		do {
822 			order = i915_random_order(count, &prng);
823 			if (order)
824 				break;
825 		} while (count >>= 1);
826 		if (!count)
827 			return -ENOMEM;
828 		GEM_BUG_ON(!order);
829 
830 		/* Ignore allocation failures (i.e. don't report them as
831 		 * a test failure) as we are purposefully allocating very
832 		 * large objects without checking that we have sufficient
833 		 * memory. We expect to hit -ENOMEM.
834 		 */
835 
836 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
837 		if (IS_ERR(obj)) {
838 			kfree(order);
839 			break;
840 		}
841 
842 		vma = i915_vma_instance(obj, vm, NULL);
843 		if (IS_ERR(vma)) {
844 			err = PTR_ERR(vma);
845 			goto err_obj;
846 		}
847 
848 		GEM_BUG_ON(vma->size != BIT_ULL(size));
849 
850 		for (n = 0; n < count; n++) {
851 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
852 
853 			err = i915_vma_pin(vma, 0, 0, addr | flags);
854 			if (err) {
855 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
856 				       __func__,
857 				       addr, BIT_ULL(size),
858 				       hole_start, hole_end,
859 				       err);
860 				goto err_obj;
861 			}
862 
863 			if (!drm_mm_node_allocated(&vma->node) ||
864 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
865 				pr_err("%s incorrect at %llx + %llx\n",
866 				       __func__, addr, BIT_ULL(size));
867 				i915_vma_unpin(vma);
868 				err = i915_vma_unbind_unlocked(vma);
869 				err = -EINVAL;
870 				goto err_obj;
871 			}
872 
873 			i915_vma_unpin(vma);
874 			err = i915_vma_unbind_unlocked(vma);
875 			GEM_BUG_ON(err);
876 
877 			if (igt_timeout(end_time,
878 					"%s timed out after %d/%d\n",
879 					__func__, n, count)) {
880 				err = -EINTR;
881 				goto err_obj;
882 			}
883 		}
884 
885 err_obj:
886 		i915_gem_object_put(obj);
887 		kfree(order);
888 		if (err)
889 			return err;
890 
891 		cleanup_freed_objects(vm->i915);
892 	}
893 
894 	return 0;
895 }
896 
897 static int __shrink_hole(struct i915_address_space *vm,
898 			 u64 hole_start, u64 hole_end,
899 			 unsigned long end_time)
900 {
901 	struct drm_i915_gem_object *obj;
902 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
903 	unsigned int min_alignment;
904 	unsigned int order = 12;
905 	LIST_HEAD(objects);
906 	int err = 0;
907 	u64 addr;
908 
909 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
910 
911 	/* Keep creating larger objects until one cannot fit into the hole */
912 	for (addr = hole_start; addr < hole_end; ) {
913 		struct i915_vma *vma;
914 		u64 size = BIT_ULL(order++);
915 
916 		size = min(size, hole_end - addr);
917 		obj = fake_dma_object(vm->i915, size);
918 		if (IS_ERR(obj)) {
919 			err = PTR_ERR(obj);
920 			break;
921 		}
922 
923 		list_add(&obj->st_link, &objects);
924 
925 		vma = i915_vma_instance(obj, vm, NULL);
926 		if (IS_ERR(vma)) {
927 			err = PTR_ERR(vma);
928 			break;
929 		}
930 
931 		GEM_BUG_ON(vma->size != size);
932 
933 		err = i915_vma_pin(vma, 0, 0, addr | flags);
934 		if (err) {
935 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
936 			       __func__, addr, size, hole_start, hole_end, err);
937 			break;
938 		}
939 
940 		if (!drm_mm_node_allocated(&vma->node) ||
941 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
942 			pr_err("%s incorrect at %llx + %llx\n",
943 			       __func__, addr, size);
944 			i915_vma_unpin(vma);
945 			err = i915_vma_unbind_unlocked(vma);
946 			err = -EINVAL;
947 			break;
948 		}
949 
950 		i915_vma_unpin(vma);
951 		addr += round_up(size, min_alignment);
952 
953 		/*
954 		 * Since we are injecting allocation faults at random intervals,
955 		 * wait for this allocation to complete before we change the
956 		 * faultinjection.
957 		 */
958 		err = i915_vma_sync(vma);
959 		if (err)
960 			break;
961 
962 		if (igt_timeout(end_time,
963 				"%s timed out at ofset %llx [%llx - %llx]\n",
964 				__func__, addr, hole_start, hole_end)) {
965 			err = -EINTR;
966 			break;
967 		}
968 	}
969 
970 	close_object_list(&objects, vm);
971 	cleanup_freed_objects(vm->i915);
972 	return err;
973 }
974 
975 static int shrink_hole(struct i915_address_space *vm,
976 		       u64 hole_start, u64 hole_end,
977 		       unsigned long end_time)
978 {
979 	unsigned long prime;
980 	int err;
981 
982 	vm->fault_attr.probability = 999;
983 	atomic_set(&vm->fault_attr.times, -1);
984 
985 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
986 		vm->fault_attr.interval = prime;
987 		err = __shrink_hole(vm, hole_start, hole_end, end_time);
988 		if (err)
989 			break;
990 	}
991 
992 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
993 
994 	return err;
995 }
996 
997 static int shrink_boom(struct i915_address_space *vm,
998 		       u64 hole_start, u64 hole_end,
999 		       unsigned long end_time)
1000 {
1001 	unsigned int sizes[] = { SZ_2M, SZ_1G };
1002 	struct drm_i915_gem_object *purge;
1003 	struct drm_i915_gem_object *explode;
1004 	int err;
1005 	int i;
1006 
1007 	/*
1008 	 * Catch the case which shrink_hole seems to miss. The setup here
1009 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
1010 	 * ensuring that all vma assiocated with the respective pd/pdp are
1011 	 * unpinned at the time.
1012 	 */
1013 
1014 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1015 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1016 		unsigned int size = sizes[i];
1017 		struct i915_vma *vma;
1018 
1019 		purge = fake_dma_object(vm->i915, size);
1020 		if (IS_ERR(purge))
1021 			return PTR_ERR(purge);
1022 
1023 		vma = i915_vma_instance(purge, vm, NULL);
1024 		if (IS_ERR(vma)) {
1025 			err = PTR_ERR(vma);
1026 			goto err_purge;
1027 		}
1028 
1029 		err = i915_vma_pin(vma, 0, 0, flags);
1030 		if (err)
1031 			goto err_purge;
1032 
1033 		/* Should now be ripe for purging */
1034 		i915_vma_unpin(vma);
1035 
1036 		explode = fake_dma_object(vm->i915, size);
1037 		if (IS_ERR(explode)) {
1038 			err = PTR_ERR(explode);
1039 			goto err_purge;
1040 		}
1041 
1042 		vm->fault_attr.probability = 100;
1043 		vm->fault_attr.interval = 1;
1044 		atomic_set(&vm->fault_attr.times, -1);
1045 
1046 		vma = i915_vma_instance(explode, vm, NULL);
1047 		if (IS_ERR(vma)) {
1048 			err = PTR_ERR(vma);
1049 			goto err_explode;
1050 		}
1051 
1052 		err = i915_vma_pin(vma, 0, 0, flags | size);
1053 		if (err)
1054 			goto err_explode;
1055 
1056 		i915_vma_unpin(vma);
1057 
1058 		i915_gem_object_put(purge);
1059 		i915_gem_object_put(explode);
1060 
1061 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1062 		cleanup_freed_objects(vm->i915);
1063 	}
1064 
1065 	return 0;
1066 
1067 err_explode:
1068 	i915_gem_object_put(explode);
1069 err_purge:
1070 	i915_gem_object_put(purge);
1071 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1072 	return err;
1073 }
1074 
1075 static int misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr,
1076 			   u64 addr, u64 size, unsigned long flags)
1077 {
1078 	struct drm_i915_gem_object *obj;
1079 	struct i915_vma *vma;
1080 	int err = 0;
1081 	u64 expected_vma_size, expected_node_size;
1082 	bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM ||
1083 			 mr->type == INTEL_MEMORY_STOLEN_LOCAL;
1084 
1085 	obj = i915_gem_object_create_region(mr, size, 0, I915_BO_ALLOC_GPU_ONLY);
1086 	if (IS_ERR(obj)) {
1087 		/* if iGVT-g or DMAR is active, stolen mem will be uninitialized */
1088 		if (PTR_ERR(obj) == -ENODEV && is_stolen)
1089 			return 0;
1090 		return PTR_ERR(obj);
1091 	}
1092 
1093 	vma = i915_vma_instance(obj, vm, NULL);
1094 	if (IS_ERR(vma)) {
1095 		err = PTR_ERR(vma);
1096 		goto err_put;
1097 	}
1098 
1099 	err = i915_vma_pin(vma, 0, 0, addr | flags);
1100 	if (err)
1101 		goto err_put;
1102 	i915_vma_unpin(vma);
1103 
1104 	if (!drm_mm_node_allocated(&vma->node)) {
1105 		err = -EINVAL;
1106 		goto err_put;
1107 	}
1108 
1109 	if (i915_vma_misplaced(vma, 0, 0, addr | flags)) {
1110 		err = -EINVAL;
1111 		goto err_put;
1112 	}
1113 
1114 	expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1));
1115 	expected_node_size = expected_vma_size;
1116 
1117 	if (HAS_64K_PAGES(vm->i915) && i915_gem_object_is_lmem(obj)) {
1118 		expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
1119 		expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
1120 	}
1121 
1122 	if (vma->size != expected_vma_size || vma->node.size != expected_node_size) {
1123 		err = i915_vma_unbind_unlocked(vma);
1124 		err = -EBADSLT;
1125 		goto err_put;
1126 	}
1127 
1128 	err = i915_vma_unbind_unlocked(vma);
1129 	if (err)
1130 		goto err_put;
1131 
1132 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1133 
1134 err_put:
1135 	i915_gem_object_put(obj);
1136 	cleanup_freed_objects(vm->i915);
1137 	return err;
1138 }
1139 
1140 static int misaligned_pin(struct i915_address_space *vm,
1141 			  u64 hole_start, u64 hole_end,
1142 			  unsigned long end_time)
1143 {
1144 	struct intel_memory_region *mr;
1145 	enum intel_region_id id;
1146 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
1147 	int err = 0;
1148 	u64 hole_size = hole_end - hole_start;
1149 
1150 	if (i915_is_ggtt(vm))
1151 		flags |= PIN_GLOBAL;
1152 
1153 	for_each_memory_region(mr, vm->i915, id) {
1154 		u64 min_alignment = i915_vm_min_alignment(vm, mr->type);
1155 		u64 size = min_alignment;
1156 		u64 addr = round_down(hole_start + (hole_size / 2), min_alignment);
1157 
1158 		/* avoid -ENOSPC on very small hole setups */
1159 		if (hole_size < 3 * min_alignment)
1160 			continue;
1161 
1162 		/* we can't test < 4k alignment due to flags being encoded in lower bits */
1163 		if (min_alignment != I915_GTT_PAGE_SIZE_4K) {
1164 			err = misaligned_case(vm, mr, addr + (min_alignment / 2), size, flags);
1165 			/* misaligned should error with -EINVAL*/
1166 			if (!err)
1167 				err = -EBADSLT;
1168 			if (err != -EINVAL)
1169 				return err;
1170 		}
1171 
1172 		/* test for vma->size expansion to min page size */
1173 		err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags);
1174 		if (err)
1175 			return err;
1176 
1177 		/* test for intermediate size not expanding vma->size for large alignments */
1178 		err = misaligned_case(vm, mr, addr, size / 2, flags);
1179 		if (err)
1180 			return err;
1181 	}
1182 
1183 	return 0;
1184 }
1185 
1186 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1187 			  int (*func)(struct i915_address_space *vm,
1188 				      u64 hole_start, u64 hole_end,
1189 				      unsigned long end_time))
1190 {
1191 	struct i915_ppgtt *ppgtt;
1192 	IGT_TIMEOUT(end_time);
1193 	struct file *file;
1194 	int err;
1195 
1196 	if (!HAS_FULL_PPGTT(dev_priv))
1197 		return 0;
1198 
1199 	file = mock_file(dev_priv);
1200 	if (IS_ERR(file))
1201 		return PTR_ERR(file);
1202 
1203 	ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
1204 	if (IS_ERR(ppgtt)) {
1205 		err = PTR_ERR(ppgtt);
1206 		goto out_free;
1207 	}
1208 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1209 	assert_vm_alive(&ppgtt->vm);
1210 
1211 	err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1212 
1213 	i915_vm_put(&ppgtt->vm);
1214 
1215 out_free:
1216 	fput(file);
1217 	return err;
1218 }
1219 
1220 static int igt_ppgtt_fill(void *arg)
1221 {
1222 	return exercise_ppgtt(arg, fill_hole);
1223 }
1224 
1225 static int igt_ppgtt_walk(void *arg)
1226 {
1227 	return exercise_ppgtt(arg, walk_hole);
1228 }
1229 
1230 static int igt_ppgtt_pot(void *arg)
1231 {
1232 	return exercise_ppgtt(arg, pot_hole);
1233 }
1234 
1235 static int igt_ppgtt_drunk(void *arg)
1236 {
1237 	return exercise_ppgtt(arg, drunk_hole);
1238 }
1239 
1240 static int igt_ppgtt_lowlevel(void *arg)
1241 {
1242 	return exercise_ppgtt(arg, lowlevel_hole);
1243 }
1244 
1245 static int igt_ppgtt_shrink(void *arg)
1246 {
1247 	return exercise_ppgtt(arg, shrink_hole);
1248 }
1249 
1250 static int igt_ppgtt_shrink_boom(void *arg)
1251 {
1252 	return exercise_ppgtt(arg, shrink_boom);
1253 }
1254 
1255 static int igt_ppgtt_misaligned_pin(void *arg)
1256 {
1257 	return exercise_ppgtt(arg, misaligned_pin);
1258 }
1259 
1260 static int sort_holes(void *priv, const struct list_head *A,
1261 		      const struct list_head *B)
1262 {
1263 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1264 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1265 
1266 	if (a->start < b->start)
1267 		return -1;
1268 	else
1269 		return 1;
1270 }
1271 
1272 static int exercise_ggtt(struct drm_i915_private *i915,
1273 			 int (*func)(struct i915_address_space *vm,
1274 				     u64 hole_start, u64 hole_end,
1275 				     unsigned long end_time))
1276 {
1277 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1278 	u64 hole_start, hole_end, last = 0;
1279 	struct drm_mm_node *node;
1280 	IGT_TIMEOUT(end_time);
1281 	int err = 0;
1282 
1283 restart:
1284 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1285 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1286 		if (hole_start < last)
1287 			continue;
1288 
1289 		if (ggtt->vm.mm.color_adjust)
1290 			ggtt->vm.mm.color_adjust(node, 0,
1291 						 &hole_start, &hole_end);
1292 		if (hole_start >= hole_end)
1293 			continue;
1294 
1295 		err = func(&ggtt->vm, hole_start, hole_end, end_time);
1296 		if (err)
1297 			break;
1298 
1299 		/* As we have manipulated the drm_mm, the list may be corrupt */
1300 		last = hole_end;
1301 		goto restart;
1302 	}
1303 
1304 	return err;
1305 }
1306 
1307 static int igt_ggtt_fill(void *arg)
1308 {
1309 	return exercise_ggtt(arg, fill_hole);
1310 }
1311 
1312 static int igt_ggtt_walk(void *arg)
1313 {
1314 	return exercise_ggtt(arg, walk_hole);
1315 }
1316 
1317 static int igt_ggtt_pot(void *arg)
1318 {
1319 	return exercise_ggtt(arg, pot_hole);
1320 }
1321 
1322 static int igt_ggtt_drunk(void *arg)
1323 {
1324 	return exercise_ggtt(arg, drunk_hole);
1325 }
1326 
1327 static int igt_ggtt_lowlevel(void *arg)
1328 {
1329 	return exercise_ggtt(arg, lowlevel_hole);
1330 }
1331 
1332 static int igt_ggtt_misaligned_pin(void *arg)
1333 {
1334 	return exercise_ggtt(arg, misaligned_pin);
1335 }
1336 
1337 static int igt_ggtt_page(void *arg)
1338 {
1339 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1340 	I915_RND_STATE(prng);
1341 	struct drm_i915_private *i915 = arg;
1342 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1343 	struct drm_i915_gem_object *obj;
1344 	intel_wakeref_t wakeref;
1345 	struct drm_mm_node tmp;
1346 	unsigned int *order, n;
1347 	int err;
1348 
1349 	if (!i915_ggtt_has_aperture(ggtt))
1350 		return 0;
1351 
1352 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1353 	if (IS_ERR(obj))
1354 		return PTR_ERR(obj);
1355 
1356 	err = i915_gem_object_pin_pages_unlocked(obj);
1357 	if (err)
1358 		goto out_free;
1359 
1360 	memset(&tmp, 0, sizeof(tmp));
1361 	mutex_lock(&ggtt->vm.mutex);
1362 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1363 					  count * PAGE_SIZE, 0,
1364 					  I915_COLOR_UNEVICTABLE,
1365 					  0, ggtt->mappable_end,
1366 					  DRM_MM_INSERT_LOW);
1367 	mutex_unlock(&ggtt->vm.mutex);
1368 	if (err)
1369 		goto out_unpin;
1370 
1371 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1372 
1373 	for (n = 0; n < count; n++) {
1374 		u64 offset = tmp.start + n * PAGE_SIZE;
1375 
1376 		ggtt->vm.insert_page(&ggtt->vm,
1377 				     i915_gem_object_get_dma_address(obj, 0),
1378 				     offset, I915_CACHE_NONE, 0);
1379 	}
1380 
1381 	order = i915_random_order(count, &prng);
1382 	if (!order) {
1383 		err = -ENOMEM;
1384 		goto out_remove;
1385 	}
1386 
1387 	for (n = 0; n < count; n++) {
1388 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1389 		u32 __iomem *vaddr;
1390 
1391 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1392 		iowrite32(n, vaddr + n);
1393 		io_mapping_unmap_atomic(vaddr);
1394 	}
1395 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1396 
1397 	i915_random_reorder(order, count, &prng);
1398 	for (n = 0; n < count; n++) {
1399 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1400 		u32 __iomem *vaddr;
1401 		u32 val;
1402 
1403 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1404 		val = ioread32(vaddr + n);
1405 		io_mapping_unmap_atomic(vaddr);
1406 
1407 		if (val != n) {
1408 			pr_err("insert page failed: found %d, expected %d\n",
1409 			       val, n);
1410 			err = -EINVAL;
1411 			break;
1412 		}
1413 	}
1414 
1415 	kfree(order);
1416 out_remove:
1417 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1418 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1419 	mutex_lock(&ggtt->vm.mutex);
1420 	drm_mm_remove_node(&tmp);
1421 	mutex_unlock(&ggtt->vm.mutex);
1422 out_unpin:
1423 	i915_gem_object_unpin_pages(obj);
1424 out_free:
1425 	i915_gem_object_put(obj);
1426 	return err;
1427 }
1428 
1429 static void track_vma_bind(struct i915_vma *vma)
1430 {
1431 	struct drm_i915_gem_object *obj = vma->obj;
1432 
1433 	__i915_gem_object_pin_pages(obj);
1434 
1435 	GEM_BUG_ON(atomic_read(&vma->pages_count));
1436 	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1437 	__i915_gem_object_pin_pages(obj);
1438 	vma->pages = obj->mm.pages;
1439 	vma->resource->bi.pages = vma->pages;
1440 
1441 	mutex_lock(&vma->vm->mutex);
1442 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1443 	mutex_unlock(&vma->vm->mutex);
1444 }
1445 
1446 static int exercise_mock(struct drm_i915_private *i915,
1447 			 int (*func)(struct i915_address_space *vm,
1448 				     u64 hole_start, u64 hole_end,
1449 				     unsigned long end_time))
1450 {
1451 	const u64 limit = totalram_pages() << PAGE_SHIFT;
1452 	struct i915_address_space *vm;
1453 	struct i915_gem_context *ctx;
1454 	IGT_TIMEOUT(end_time);
1455 	int err;
1456 
1457 	ctx = mock_context(i915, "mock");
1458 	if (!ctx)
1459 		return -ENOMEM;
1460 
1461 	vm = i915_gem_context_get_eb_vm(ctx);
1462 	err = func(vm, 0, min(vm->total, limit), end_time);
1463 	i915_vm_put(vm);
1464 
1465 	mock_context_close(ctx);
1466 	return err;
1467 }
1468 
1469 static int igt_mock_fill(void *arg)
1470 {
1471 	struct i915_ggtt *ggtt = arg;
1472 
1473 	return exercise_mock(ggtt->vm.i915, fill_hole);
1474 }
1475 
1476 static int igt_mock_walk(void *arg)
1477 {
1478 	struct i915_ggtt *ggtt = arg;
1479 
1480 	return exercise_mock(ggtt->vm.i915, walk_hole);
1481 }
1482 
1483 static int igt_mock_pot(void *arg)
1484 {
1485 	struct i915_ggtt *ggtt = arg;
1486 
1487 	return exercise_mock(ggtt->vm.i915, pot_hole);
1488 }
1489 
1490 static int igt_mock_drunk(void *arg)
1491 {
1492 	struct i915_ggtt *ggtt = arg;
1493 
1494 	return exercise_mock(ggtt->vm.i915, drunk_hole);
1495 }
1496 
1497 static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset)
1498 {
1499 	struct i915_address_space *vm = vma->vm;
1500 	struct i915_vma_resource *vma_res;
1501 	struct drm_i915_gem_object *obj = vma->obj;
1502 	int err;
1503 
1504 	vma_res = i915_vma_resource_alloc();
1505 	if (IS_ERR(vma_res))
1506 		return PTR_ERR(vma_res);
1507 
1508 	mutex_lock(&vm->mutex);
1509 	err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
1510 				   offset,
1511 				   obj->cache_level,
1512 				   0);
1513 	if (!err) {
1514 		i915_vma_resource_init_from_vma(vma_res, vma);
1515 		vma->resource = vma_res;
1516 	} else {
1517 		kfree(vma_res);
1518 	}
1519 	mutex_unlock(&vm->mutex);
1520 
1521 	return err;
1522 }
1523 
1524 static int igt_gtt_reserve(void *arg)
1525 {
1526 	struct i915_ggtt *ggtt = arg;
1527 	struct drm_i915_gem_object *obj, *on;
1528 	I915_RND_STATE(prng);
1529 	LIST_HEAD(objects);
1530 	u64 total;
1531 	int err = -ENODEV;
1532 
1533 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1534 	 * for the node, and evicts if it has to. So our test checks that
1535 	 * it can give us the requsted space and prevent overlaps.
1536 	 */
1537 
1538 	/* Start by filling the GGTT */
1539 	for (total = 0;
1540 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1541 	     total += 2 * I915_GTT_PAGE_SIZE) {
1542 		struct i915_vma *vma;
1543 
1544 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1545 						      2 * PAGE_SIZE);
1546 		if (IS_ERR(obj)) {
1547 			err = PTR_ERR(obj);
1548 			goto out;
1549 		}
1550 
1551 		err = i915_gem_object_pin_pages_unlocked(obj);
1552 		if (err) {
1553 			i915_gem_object_put(obj);
1554 			goto out;
1555 		}
1556 
1557 		list_add(&obj->st_link, &objects);
1558 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1559 		if (IS_ERR(vma)) {
1560 			err = PTR_ERR(vma);
1561 			goto out;
1562 		}
1563 
1564 		err = reserve_gtt_with_resource(vma, total);
1565 		if (err) {
1566 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1567 			       total, ggtt->vm.total, err);
1568 			goto out;
1569 		}
1570 		track_vma_bind(vma);
1571 
1572 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1573 		if (vma->node.start != total ||
1574 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1575 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1576 			       vma->node.start, vma->node.size,
1577 			       total, 2*I915_GTT_PAGE_SIZE);
1578 			err = -EINVAL;
1579 			goto out;
1580 		}
1581 	}
1582 
1583 	/* Now we start forcing evictions */
1584 	for (total = I915_GTT_PAGE_SIZE;
1585 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1586 	     total += 2 * I915_GTT_PAGE_SIZE) {
1587 		struct i915_vma *vma;
1588 
1589 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1590 						      2 * PAGE_SIZE);
1591 		if (IS_ERR(obj)) {
1592 			err = PTR_ERR(obj);
1593 			goto out;
1594 		}
1595 
1596 		err = i915_gem_object_pin_pages_unlocked(obj);
1597 		if (err) {
1598 			i915_gem_object_put(obj);
1599 			goto out;
1600 		}
1601 
1602 		list_add(&obj->st_link, &objects);
1603 
1604 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1605 		if (IS_ERR(vma)) {
1606 			err = PTR_ERR(vma);
1607 			goto out;
1608 		}
1609 
1610 		err = reserve_gtt_with_resource(vma, total);
1611 		if (err) {
1612 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1613 			       total, ggtt->vm.total, err);
1614 			goto out;
1615 		}
1616 		track_vma_bind(vma);
1617 
1618 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1619 		if (vma->node.start != total ||
1620 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1621 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1622 			       vma->node.start, vma->node.size,
1623 			       total, 2*I915_GTT_PAGE_SIZE);
1624 			err = -EINVAL;
1625 			goto out;
1626 		}
1627 	}
1628 
1629 	/* And then try at random */
1630 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1631 		struct i915_vma *vma;
1632 		u64 offset;
1633 
1634 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1635 		if (IS_ERR(vma)) {
1636 			err = PTR_ERR(vma);
1637 			goto out;
1638 		}
1639 
1640 		err = i915_vma_unbind_unlocked(vma);
1641 		if (err) {
1642 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1643 			goto out;
1644 		}
1645 
1646 		offset = igt_random_offset(&prng,
1647 					   0, ggtt->vm.total,
1648 					   2 * I915_GTT_PAGE_SIZE,
1649 					   I915_GTT_MIN_ALIGNMENT);
1650 
1651 		err = reserve_gtt_with_resource(vma, offset);
1652 		if (err) {
1653 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1654 			       total, ggtt->vm.total, err);
1655 			goto out;
1656 		}
1657 		track_vma_bind(vma);
1658 
1659 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1660 		if (vma->node.start != offset ||
1661 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1662 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1663 			       vma->node.start, vma->node.size,
1664 			       offset, 2*I915_GTT_PAGE_SIZE);
1665 			err = -EINVAL;
1666 			goto out;
1667 		}
1668 	}
1669 
1670 out:
1671 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1672 		i915_gem_object_unpin_pages(obj);
1673 		i915_gem_object_put(obj);
1674 	}
1675 	return err;
1676 }
1677 
1678 static int insert_gtt_with_resource(struct i915_vma *vma)
1679 {
1680 	struct i915_address_space *vm = vma->vm;
1681 	struct i915_vma_resource *vma_res;
1682 	struct drm_i915_gem_object *obj = vma->obj;
1683 	int err;
1684 
1685 	vma_res = i915_vma_resource_alloc();
1686 	if (IS_ERR(vma_res))
1687 		return PTR_ERR(vma_res);
1688 
1689 	mutex_lock(&vm->mutex);
1690 	err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
1691 				  obj->cache_level, 0, vm->total, 0);
1692 	if (!err) {
1693 		i915_vma_resource_init_from_vma(vma_res, vma);
1694 		vma->resource = vma_res;
1695 	} else {
1696 		kfree(vma_res);
1697 	}
1698 	mutex_unlock(&vm->mutex);
1699 
1700 	return err;
1701 }
1702 
1703 static int igt_gtt_insert(void *arg)
1704 {
1705 	struct i915_ggtt *ggtt = arg;
1706 	struct drm_i915_gem_object *obj, *on;
1707 	struct drm_mm_node tmp = {};
1708 	const struct invalid_insert {
1709 		u64 size;
1710 		u64 alignment;
1711 		u64 start, end;
1712 	} invalid_insert[] = {
1713 		{
1714 			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1715 			0, ggtt->vm.total,
1716 		},
1717 		{
1718 			2*I915_GTT_PAGE_SIZE, 0,
1719 			0, I915_GTT_PAGE_SIZE,
1720 		},
1721 		{
1722 			-(u64)I915_GTT_PAGE_SIZE, 0,
1723 			0, 4*I915_GTT_PAGE_SIZE,
1724 		},
1725 		{
1726 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1727 			0, 4*I915_GTT_PAGE_SIZE,
1728 		},
1729 		{
1730 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1731 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1732 		},
1733 		{}
1734 	}, *ii;
1735 	LIST_HEAD(objects);
1736 	u64 total;
1737 	int err = -ENODEV;
1738 
1739 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1740 	 * to the node, evicting if required.
1741 	 */
1742 
1743 	/* Check a couple of obviously invalid requests */
1744 	for (ii = invalid_insert; ii->size; ii++) {
1745 		mutex_lock(&ggtt->vm.mutex);
1746 		err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp,
1747 					  ii->size, ii->alignment,
1748 					  I915_COLOR_UNEVICTABLE,
1749 					  ii->start, ii->end,
1750 					  0);
1751 		mutex_unlock(&ggtt->vm.mutex);
1752 		if (err != -ENOSPC) {
1753 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1754 			       ii->size, ii->alignment, ii->start, ii->end,
1755 			       err);
1756 			return -EINVAL;
1757 		}
1758 	}
1759 
1760 	/* Start by filling the GGTT */
1761 	for (total = 0;
1762 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1763 	     total += I915_GTT_PAGE_SIZE) {
1764 		struct i915_vma *vma;
1765 
1766 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1767 						      I915_GTT_PAGE_SIZE);
1768 		if (IS_ERR(obj)) {
1769 			err = PTR_ERR(obj);
1770 			goto out;
1771 		}
1772 
1773 		err = i915_gem_object_pin_pages_unlocked(obj);
1774 		if (err) {
1775 			i915_gem_object_put(obj);
1776 			goto out;
1777 		}
1778 
1779 		list_add(&obj->st_link, &objects);
1780 
1781 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1782 		if (IS_ERR(vma)) {
1783 			err = PTR_ERR(vma);
1784 			goto out;
1785 		}
1786 
1787 		err = insert_gtt_with_resource(vma);
1788 		if (err == -ENOSPC) {
1789 			/* maxed out the GGTT space */
1790 			i915_gem_object_put(obj);
1791 			break;
1792 		}
1793 		if (err) {
1794 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1795 			       total, ggtt->vm.total, err);
1796 			goto out;
1797 		}
1798 		track_vma_bind(vma);
1799 		__i915_vma_pin(vma);
1800 
1801 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1802 	}
1803 
1804 	list_for_each_entry(obj, &objects, st_link) {
1805 		struct i915_vma *vma;
1806 
1807 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1808 		if (IS_ERR(vma)) {
1809 			err = PTR_ERR(vma);
1810 			goto out;
1811 		}
1812 
1813 		if (!drm_mm_node_allocated(&vma->node)) {
1814 			pr_err("VMA was unexpectedly evicted!\n");
1815 			err = -EINVAL;
1816 			goto out;
1817 		}
1818 
1819 		__i915_vma_unpin(vma);
1820 	}
1821 
1822 	/* If we then reinsert, we should find the same hole */
1823 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1824 		struct i915_vma *vma;
1825 		u64 offset;
1826 
1827 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1828 		if (IS_ERR(vma)) {
1829 			err = PTR_ERR(vma);
1830 			goto out;
1831 		}
1832 
1833 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1834 		offset = vma->node.start;
1835 
1836 		err = i915_vma_unbind_unlocked(vma);
1837 		if (err) {
1838 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1839 			goto out;
1840 		}
1841 
1842 		err = insert_gtt_with_resource(vma);
1843 		if (err) {
1844 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1845 			       total, ggtt->vm.total, err);
1846 			goto out;
1847 		}
1848 		track_vma_bind(vma);
1849 
1850 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1851 		if (vma->node.start != offset) {
1852 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1853 			       offset, vma->node.start);
1854 			err = -EINVAL;
1855 			goto out;
1856 		}
1857 	}
1858 
1859 	/* And then force evictions */
1860 	for (total = 0;
1861 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1862 	     total += 2 * I915_GTT_PAGE_SIZE) {
1863 		struct i915_vma *vma;
1864 
1865 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1866 						      2 * I915_GTT_PAGE_SIZE);
1867 		if (IS_ERR(obj)) {
1868 			err = PTR_ERR(obj);
1869 			goto out;
1870 		}
1871 
1872 		err = i915_gem_object_pin_pages_unlocked(obj);
1873 		if (err) {
1874 			i915_gem_object_put(obj);
1875 			goto out;
1876 		}
1877 
1878 		list_add(&obj->st_link, &objects);
1879 
1880 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1881 		if (IS_ERR(vma)) {
1882 			err = PTR_ERR(vma);
1883 			goto out;
1884 		}
1885 
1886 		err = insert_gtt_with_resource(vma);
1887 		if (err) {
1888 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1889 			       total, ggtt->vm.total, err);
1890 			goto out;
1891 		}
1892 		track_vma_bind(vma);
1893 
1894 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1895 	}
1896 
1897 out:
1898 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1899 		i915_gem_object_unpin_pages(obj);
1900 		i915_gem_object_put(obj);
1901 	}
1902 	return err;
1903 }
1904 
1905 int i915_gem_gtt_mock_selftests(void)
1906 {
1907 	static const struct i915_subtest tests[] = {
1908 		SUBTEST(igt_mock_drunk),
1909 		SUBTEST(igt_mock_walk),
1910 		SUBTEST(igt_mock_pot),
1911 		SUBTEST(igt_mock_fill),
1912 		SUBTEST(igt_gtt_reserve),
1913 		SUBTEST(igt_gtt_insert),
1914 	};
1915 	struct drm_i915_private *i915;
1916 	struct intel_gt *gt;
1917 	int err;
1918 
1919 	i915 = mock_gem_device();
1920 	if (!i915)
1921 		return -ENOMEM;
1922 
1923 	/* allocate the ggtt */
1924 	err = intel_gt_assign_ggtt(to_gt(i915));
1925 	if (err)
1926 		goto out_put;
1927 
1928 	gt = to_gt(i915);
1929 
1930 	mock_init_ggtt(gt);
1931 
1932 	err = i915_subtests(tests, gt->ggtt);
1933 
1934 	mock_device_flush(i915);
1935 	i915_gem_drain_freed_objects(i915);
1936 	mock_fini_ggtt(gt->ggtt);
1937 
1938 out_put:
1939 	mock_destroy_device(i915);
1940 	return err;
1941 }
1942 
1943 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1944 {
1945 	static const struct i915_subtest tests[] = {
1946 		SUBTEST(igt_ppgtt_alloc),
1947 		SUBTEST(igt_ppgtt_lowlevel),
1948 		SUBTEST(igt_ppgtt_drunk),
1949 		SUBTEST(igt_ppgtt_walk),
1950 		SUBTEST(igt_ppgtt_pot),
1951 		SUBTEST(igt_ppgtt_fill),
1952 		SUBTEST(igt_ppgtt_shrink),
1953 		SUBTEST(igt_ppgtt_shrink_boom),
1954 		SUBTEST(igt_ppgtt_misaligned_pin),
1955 		SUBTEST(igt_ggtt_lowlevel),
1956 		SUBTEST(igt_ggtt_drunk),
1957 		SUBTEST(igt_ggtt_walk),
1958 		SUBTEST(igt_ggtt_pot),
1959 		SUBTEST(igt_ggtt_fill),
1960 		SUBTEST(igt_ggtt_page),
1961 		SUBTEST(igt_ggtt_misaligned_pin),
1962 	};
1963 
1964 	GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
1965 
1966 	return i915_live_subtests(tests, i915);
1967 }
1968