1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "gem/i915_gem_context.h"
29 #include "gem/i915_gem_internal.h"
30 #include "gem/i915_gem_lmem.h"
31 #include "gem/i915_gem_region.h"
32 #include "gem/selftests/mock_context.h"
33 #include "gt/intel_context.h"
34 #include "gt/intel_gpu_commands.h"
35 #include "gt/intel_gtt.h"
36 
37 #include "i915_random.h"
38 #include "i915_selftest.h"
39 #include "i915_vma_resource.h"
40 
41 #include "mock_drm.h"
42 #include "mock_gem_device.h"
43 #include "mock_gtt.h"
44 #include "igt_flush_test.h"
45 
46 static void cleanup_freed_objects(struct drm_i915_private *i915)
47 {
48 	i915_gem_drain_freed_objects(i915);
49 }
50 
51 static void fake_free_pages(struct drm_i915_gem_object *obj,
52 			    struct sg_table *pages)
53 {
54 	sg_free_table(pages);
55 	kfree(pages);
56 }
57 
58 static int fake_get_pages(struct drm_i915_gem_object *obj)
59 {
60 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
61 #define PFN_BIAS 0x1000
62 	struct sg_table *pages;
63 	struct scatterlist *sg;
64 	typeof(obj->base.size) rem;
65 
66 	pages = kmalloc(sizeof(*pages), GFP);
67 	if (!pages)
68 		return -ENOMEM;
69 
70 	rem = round_up(obj->base.size, BIT(31)) >> 31;
71 	/* restricted by sg_alloc_table */
72 	if (overflows_type(rem, unsigned int)) {
73 		kfree(pages);
74 		return -E2BIG;
75 	}
76 
77 	if (sg_alloc_table(pages, rem, GFP)) {
78 		kfree(pages);
79 		return -ENOMEM;
80 	}
81 
82 	rem = obj->base.size;
83 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
84 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
85 
86 		GEM_BUG_ON(!len);
87 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
88 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
89 		sg_dma_len(sg) = len;
90 
91 		rem -= len;
92 	}
93 	GEM_BUG_ON(rem);
94 
95 	__i915_gem_object_set_pages(obj, pages);
96 
97 	return 0;
98 #undef GFP
99 }
100 
101 static void fake_put_pages(struct drm_i915_gem_object *obj,
102 			   struct sg_table *pages)
103 {
104 	fake_free_pages(obj, pages);
105 	obj->mm.dirty = false;
106 }
107 
108 static const struct drm_i915_gem_object_ops fake_ops = {
109 	.name = "fake-gem",
110 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
111 	.get_pages = fake_get_pages,
112 	.put_pages = fake_put_pages,
113 };
114 
115 static struct drm_i915_gem_object *
116 fake_dma_object(struct drm_i915_private *i915, u64 size)
117 {
118 	static struct lock_class_key lock_class;
119 	struct drm_i915_gem_object *obj;
120 
121 	GEM_BUG_ON(!size);
122 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
123 
124 	if (overflows_type(size, obj->base.size))
125 		return ERR_PTR(-E2BIG);
126 
127 	obj = i915_gem_object_alloc();
128 	if (!obj)
129 		goto err;
130 
131 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
132 	i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
133 
134 	i915_gem_object_set_volatile(obj);
135 
136 	obj->write_domain = I915_GEM_DOMAIN_CPU;
137 	obj->read_domains = I915_GEM_DOMAIN_CPU;
138 	obj->cache_level = I915_CACHE_NONE;
139 
140 	/* Preallocate the "backing storage" */
141 	if (i915_gem_object_pin_pages_unlocked(obj))
142 		goto err_obj;
143 
144 	i915_gem_object_unpin_pages(obj);
145 	return obj;
146 
147 err_obj:
148 	i915_gem_object_put(obj);
149 err:
150 	return ERR_PTR(-ENOMEM);
151 }
152 
153 static int igt_ppgtt_alloc(void *arg)
154 {
155 	struct drm_i915_private *dev_priv = arg;
156 	struct i915_ppgtt *ppgtt;
157 	struct i915_gem_ww_ctx ww;
158 	u64 size, last, limit;
159 	int err = 0;
160 
161 	/* Allocate a ppggt and try to fill the entire range */
162 
163 	if (!HAS_PPGTT(dev_priv))
164 		return 0;
165 
166 	ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
167 	if (IS_ERR(ppgtt))
168 		return PTR_ERR(ppgtt);
169 
170 	if (!ppgtt->vm.allocate_va_range)
171 		goto err_ppgtt_cleanup;
172 
173 	/*
174 	 * While we only allocate the page tables here and so we could
175 	 * address a much larger GTT than we could actually fit into
176 	 * RAM, a practical limit is the amount of physical pages in the system.
177 	 * This should ensure that we do not run into the oomkiller during
178 	 * the test and take down the machine wilfully.
179 	 */
180 	limit = totalram_pages() << PAGE_SHIFT;
181 	limit = min(ppgtt->vm.total, limit);
182 
183 	i915_gem_ww_ctx_init(&ww, false);
184 retry:
185 	err = i915_vm_lock_objects(&ppgtt->vm, &ww);
186 	if (err)
187 		goto err_ppgtt_cleanup;
188 
189 	/* Check we can allocate the entire range */
190 	for (size = 4096; size <= limit; size <<= 2) {
191 		struct i915_vm_pt_stash stash = {};
192 
193 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
194 		if (err)
195 			goto err_ppgtt_cleanup;
196 
197 		err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
198 		if (err) {
199 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
200 			goto err_ppgtt_cleanup;
201 		}
202 
203 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
204 		cond_resched();
205 
206 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
207 
208 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
209 	}
210 
211 	/* Check we can incrementally allocate the entire range */
212 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
213 		struct i915_vm_pt_stash stash = {};
214 
215 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
216 		if (err)
217 			goto err_ppgtt_cleanup;
218 
219 		err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
220 		if (err) {
221 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
222 			goto err_ppgtt_cleanup;
223 		}
224 
225 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
226 					    last, size - last);
227 		cond_resched();
228 
229 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
230 	}
231 
232 err_ppgtt_cleanup:
233 	if (err == -EDEADLK) {
234 		err = i915_gem_ww_ctx_backoff(&ww);
235 		if (!err)
236 			goto retry;
237 	}
238 	i915_gem_ww_ctx_fini(&ww);
239 
240 	i915_vm_put(&ppgtt->vm);
241 	return err;
242 }
243 
244 static int lowlevel_hole(struct i915_address_space *vm,
245 			 u64 hole_start, u64 hole_end,
246 			 unsigned long end_time)
247 {
248 	const unsigned int min_alignment =
249 		i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
250 	I915_RND_STATE(seed_prng);
251 	struct i915_vma_resource *mock_vma_res;
252 	unsigned int size;
253 
254 	mock_vma_res = kzalloc(sizeof(*mock_vma_res), GFP_KERNEL);
255 	if (!mock_vma_res)
256 		return -ENOMEM;
257 
258 	/* Keep creating larger objects until one cannot fit into the hole */
259 	for (size = 12; (hole_end - hole_start) >> size; size++) {
260 		I915_RND_SUBSTATE(prng, seed_prng);
261 		struct drm_i915_gem_object *obj;
262 		unsigned int *order, count, n;
263 		u64 hole_size, aligned_size;
264 
265 		aligned_size = max_t(u32, ilog2(min_alignment), size);
266 		hole_size = (hole_end - hole_start) >> aligned_size;
267 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
268 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
269 		count = hole_size >> 1;
270 		if (!count) {
271 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
272 				 __func__, hole_start, hole_end, size, hole_size);
273 			break;
274 		}
275 
276 		do {
277 			order = i915_random_order(count, &prng);
278 			if (order)
279 				break;
280 		} while (count >>= 1);
281 		if (!count) {
282 			kfree(mock_vma_res);
283 			return -ENOMEM;
284 		}
285 		GEM_BUG_ON(!order);
286 
287 		GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total);
288 		GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end);
289 
290 		/* Ignore allocation failures (i.e. don't report them as
291 		 * a test failure) as we are purposefully allocating very
292 		 * large objects without checking that we have sufficient
293 		 * memory. We expect to hit -ENOMEM.
294 		 */
295 
296 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
297 		if (IS_ERR(obj)) {
298 			kfree(order);
299 			break;
300 		}
301 
302 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
303 
304 		if (i915_gem_object_pin_pages_unlocked(obj)) {
305 			i915_gem_object_put(obj);
306 			kfree(order);
307 			break;
308 		}
309 
310 		for (n = 0; n < count; n++) {
311 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
312 			intel_wakeref_t wakeref;
313 
314 			GEM_BUG_ON(addr + BIT_ULL(aligned_size) > vm->total);
315 
316 			if (igt_timeout(end_time,
317 					"%s timed out before %d/%d\n",
318 					__func__, n, count)) {
319 				hole_end = hole_start; /* quit */
320 				break;
321 			}
322 
323 			if (vm->allocate_va_range) {
324 				struct i915_vm_pt_stash stash = {};
325 				struct i915_gem_ww_ctx ww;
326 				int err;
327 
328 				i915_gem_ww_ctx_init(&ww, false);
329 retry:
330 				err = i915_vm_lock_objects(vm, &ww);
331 				if (err)
332 					goto alloc_vm_end;
333 
334 				err = -ENOMEM;
335 				if (i915_vm_alloc_pt_stash(vm, &stash,
336 							   BIT_ULL(size)))
337 					goto alloc_vm_end;
338 
339 				err = i915_vm_map_pt_stash(vm, &stash);
340 				if (!err)
341 					vm->allocate_va_range(vm, &stash,
342 							      addr, BIT_ULL(size));
343 				i915_vm_free_pt_stash(vm, &stash);
344 alloc_vm_end:
345 				if (err == -EDEADLK) {
346 					err = i915_gem_ww_ctx_backoff(&ww);
347 					if (!err)
348 						goto retry;
349 				}
350 				i915_gem_ww_ctx_fini(&ww);
351 
352 				if (err)
353 					break;
354 			}
355 
356 			mock_vma_res->bi.pages = obj->mm.pages;
357 			mock_vma_res->node_size = BIT_ULL(aligned_size);
358 			mock_vma_res->start = addr;
359 
360 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
361 			  vm->insert_entries(vm, mock_vma_res,
362 						   I915_CACHE_NONE, 0);
363 		}
364 		count = n;
365 
366 		i915_random_reorder(order, count, &prng);
367 		for (n = 0; n < count; n++) {
368 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
369 			intel_wakeref_t wakeref;
370 
371 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
372 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
373 				vm->clear_range(vm, addr, BIT_ULL(size));
374 		}
375 
376 		i915_gem_object_unpin_pages(obj);
377 		i915_gem_object_put(obj);
378 
379 		kfree(order);
380 
381 		cleanup_freed_objects(vm->i915);
382 	}
383 
384 	kfree(mock_vma_res);
385 	return 0;
386 }
387 
388 static void close_object_list(struct list_head *objects,
389 			      struct i915_address_space *vm)
390 {
391 	struct drm_i915_gem_object *obj, *on;
392 	int ignored;
393 
394 	list_for_each_entry_safe(obj, on, objects, st_link) {
395 		struct i915_vma *vma;
396 
397 		vma = i915_vma_instance(obj, vm, NULL);
398 		if (!IS_ERR(vma))
399 			ignored = i915_vma_unbind_unlocked(vma);
400 
401 		list_del(&obj->st_link);
402 		i915_gem_object_put(obj);
403 	}
404 }
405 
406 static int fill_hole(struct i915_address_space *vm,
407 		     u64 hole_start, u64 hole_end,
408 		     unsigned long end_time)
409 {
410 	const u64 hole_size = hole_end - hole_start;
411 	struct drm_i915_gem_object *obj;
412 	const unsigned int min_alignment =
413 		i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
414 	const unsigned long max_pages =
415 		min_t(u64, ULONG_MAX - 1, (hole_size / 2) >> ilog2(min_alignment));
416 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
417 	unsigned long npages, prime, flags;
418 	struct i915_vma *vma;
419 	LIST_HEAD(objects);
420 	int err;
421 
422 	/* Try binding many VMA working inwards from either edge */
423 
424 	flags = PIN_OFFSET_FIXED | PIN_USER;
425 	if (i915_is_ggtt(vm))
426 		flags |= PIN_GLOBAL;
427 
428 	for_each_prime_number_from(prime, 2, max_step) {
429 		for (npages = 1; npages <= max_pages; npages *= prime) {
430 			const u64 full_size = npages << PAGE_SHIFT;
431 			const struct {
432 				const char *name;
433 				u64 offset;
434 				int step;
435 			} phases[] = {
436 				{ "top-down", hole_end, -1, },
437 				{ "bottom-up", hole_start, 1, },
438 				{ }
439 			}, *p;
440 
441 			obj = fake_dma_object(vm->i915, full_size);
442 			if (IS_ERR(obj))
443 				break;
444 
445 			list_add(&obj->st_link, &objects);
446 
447 			/* Align differing sized objects against the edges, and
448 			 * check we don't walk off into the void when binding
449 			 * them into the GTT.
450 			 */
451 			for (p = phases; p->name; p++) {
452 				u64 offset;
453 
454 				offset = p->offset;
455 				list_for_each_entry(obj, &objects, st_link) {
456 					u64 aligned_size = round_up(obj->base.size,
457 								    min_alignment);
458 
459 					vma = i915_vma_instance(obj, vm, NULL);
460 					if (IS_ERR(vma))
461 						continue;
462 
463 					if (p->step < 0) {
464 						if (offset < hole_start + aligned_size)
465 							break;
466 						offset -= aligned_size;
467 					}
468 
469 					err = i915_vma_pin(vma, 0, 0, offset | flags);
470 					if (err) {
471 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
472 						       __func__, p->name, err, npages, prime, offset);
473 						goto err;
474 					}
475 
476 					if (!drm_mm_node_allocated(&vma->node) ||
477 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
478 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
479 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
480 						       offset);
481 						err = -EINVAL;
482 						goto err;
483 					}
484 
485 					i915_vma_unpin(vma);
486 
487 					if (p->step > 0) {
488 						if (offset + aligned_size > hole_end)
489 							break;
490 						offset += aligned_size;
491 					}
492 				}
493 
494 				offset = p->offset;
495 				list_for_each_entry(obj, &objects, st_link) {
496 					u64 aligned_size = round_up(obj->base.size,
497 								    min_alignment);
498 
499 					vma = i915_vma_instance(obj, vm, NULL);
500 					if (IS_ERR(vma))
501 						continue;
502 
503 					if (p->step < 0) {
504 						if (offset < hole_start + aligned_size)
505 							break;
506 						offset -= aligned_size;
507 					}
508 
509 					if (!drm_mm_node_allocated(&vma->node) ||
510 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
511 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
512 						       __func__, p->name, vma->node.start, vma->node.size,
513 						       offset);
514 						err = -EINVAL;
515 						goto err;
516 					}
517 
518 					err = i915_vma_unbind_unlocked(vma);
519 					if (err) {
520 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
521 						       __func__, p->name, vma->node.start, vma->node.size,
522 						       err);
523 						goto err;
524 					}
525 
526 					if (p->step > 0) {
527 						if (offset + aligned_size > hole_end)
528 							break;
529 						offset += aligned_size;
530 					}
531 				}
532 
533 				offset = p->offset;
534 				list_for_each_entry_reverse(obj, &objects, st_link) {
535 					u64 aligned_size = round_up(obj->base.size,
536 								    min_alignment);
537 
538 					vma = i915_vma_instance(obj, vm, NULL);
539 					if (IS_ERR(vma))
540 						continue;
541 
542 					if (p->step < 0) {
543 						if (offset < hole_start + aligned_size)
544 							break;
545 						offset -= aligned_size;
546 					}
547 
548 					err = i915_vma_pin(vma, 0, 0, offset | flags);
549 					if (err) {
550 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
551 						       __func__, p->name, err, npages, prime, offset);
552 						goto err;
553 					}
554 
555 					if (!drm_mm_node_allocated(&vma->node) ||
556 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
557 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
558 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
559 						       offset);
560 						err = -EINVAL;
561 						goto err;
562 					}
563 
564 					i915_vma_unpin(vma);
565 
566 					if (p->step > 0) {
567 						if (offset + aligned_size > hole_end)
568 							break;
569 						offset += aligned_size;
570 					}
571 				}
572 
573 				offset = p->offset;
574 				list_for_each_entry_reverse(obj, &objects, st_link) {
575 					u64 aligned_size = round_up(obj->base.size,
576 								    min_alignment);
577 
578 					vma = i915_vma_instance(obj, vm, NULL);
579 					if (IS_ERR(vma))
580 						continue;
581 
582 					if (p->step < 0) {
583 						if (offset < hole_start + aligned_size)
584 							break;
585 						offset -= aligned_size;
586 					}
587 
588 					if (!drm_mm_node_allocated(&vma->node) ||
589 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
590 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
591 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
592 						       offset);
593 						err = -EINVAL;
594 						goto err;
595 					}
596 
597 					err = i915_vma_unbind_unlocked(vma);
598 					if (err) {
599 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
600 						       __func__, p->name, vma->node.start, vma->node.size,
601 						       err);
602 						goto err;
603 					}
604 
605 					if (p->step > 0) {
606 						if (offset + aligned_size > hole_end)
607 							break;
608 						offset += aligned_size;
609 					}
610 				}
611 			}
612 
613 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
614 					__func__, npages, prime)) {
615 				err = -EINTR;
616 				goto err;
617 			}
618 		}
619 
620 		close_object_list(&objects, vm);
621 		cleanup_freed_objects(vm->i915);
622 	}
623 
624 	return 0;
625 
626 err:
627 	close_object_list(&objects, vm);
628 	return err;
629 }
630 
631 static int walk_hole(struct i915_address_space *vm,
632 		     u64 hole_start, u64 hole_end,
633 		     unsigned long end_time)
634 {
635 	const u64 hole_size = hole_end - hole_start;
636 	const unsigned long max_pages =
637 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
638 	unsigned long min_alignment;
639 	unsigned long flags;
640 	u64 size;
641 
642 	/* Try binding a single VMA in different positions within the hole */
643 
644 	flags = PIN_OFFSET_FIXED | PIN_USER;
645 	if (i915_is_ggtt(vm))
646 		flags |= PIN_GLOBAL;
647 
648 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
649 
650 	for_each_prime_number_from(size, 1, max_pages) {
651 		struct drm_i915_gem_object *obj;
652 		struct i915_vma *vma;
653 		u64 addr;
654 		int err = 0;
655 
656 		obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
657 		if (IS_ERR(obj))
658 			break;
659 
660 		vma = i915_vma_instance(obj, vm, NULL);
661 		if (IS_ERR(vma)) {
662 			err = PTR_ERR(vma);
663 			goto err_put;
664 		}
665 
666 		for (addr = hole_start;
667 		     addr + obj->base.size < hole_end;
668 		     addr += round_up(obj->base.size, min_alignment)) {
669 			err = i915_vma_pin(vma, 0, 0, addr | flags);
670 			if (err) {
671 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
672 				       __func__, addr, vma->size,
673 				       hole_start, hole_end, err);
674 				goto err_put;
675 			}
676 			i915_vma_unpin(vma);
677 
678 			if (!drm_mm_node_allocated(&vma->node) ||
679 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
680 				pr_err("%s incorrect at %llx + %llx\n",
681 				       __func__, addr, vma->size);
682 				err = -EINVAL;
683 				goto err_put;
684 			}
685 
686 			err = i915_vma_unbind_unlocked(vma);
687 			if (err) {
688 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
689 				       __func__, addr, vma->size, err);
690 				goto err_put;
691 			}
692 
693 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
694 
695 			if (igt_timeout(end_time,
696 					"%s timed out at %llx\n",
697 					__func__, addr)) {
698 				err = -EINTR;
699 				goto err_put;
700 			}
701 		}
702 
703 err_put:
704 		i915_gem_object_put(obj);
705 		if (err)
706 			return err;
707 
708 		cleanup_freed_objects(vm->i915);
709 	}
710 
711 	return 0;
712 }
713 
714 static int pot_hole(struct i915_address_space *vm,
715 		    u64 hole_start, u64 hole_end,
716 		    unsigned long end_time)
717 {
718 	struct drm_i915_gem_object *obj;
719 	struct i915_vma *vma;
720 	unsigned int min_alignment;
721 	unsigned long flags;
722 	unsigned int pot;
723 	int err = 0;
724 
725 	flags = PIN_OFFSET_FIXED | PIN_USER;
726 	if (i915_is_ggtt(vm))
727 		flags |= PIN_GLOBAL;
728 
729 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
730 
731 	obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
732 	if (IS_ERR(obj))
733 		return PTR_ERR(obj);
734 
735 	vma = i915_vma_instance(obj, vm, NULL);
736 	if (IS_ERR(vma)) {
737 		err = PTR_ERR(vma);
738 		goto err_obj;
739 	}
740 
741 	/* Insert a pair of pages across every pot boundary within the hole */
742 	for (pot = fls64(hole_end - 1) - 1;
743 	     pot > ilog2(2 * min_alignment);
744 	     pot--) {
745 		u64 step = BIT_ULL(pot);
746 		u64 addr;
747 
748 		for (addr = round_up(hole_start + min_alignment, step) - min_alignment;
749 		     hole_end > addr && hole_end - addr >= 2 * min_alignment;
750 		     addr += step) {
751 			err = i915_vma_pin(vma, 0, 0, addr | flags);
752 			if (err) {
753 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
754 				       __func__,
755 				       addr,
756 				       hole_start, hole_end,
757 				       err);
758 				goto err_obj;
759 			}
760 
761 			if (!drm_mm_node_allocated(&vma->node) ||
762 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
763 				pr_err("%s incorrect at %llx + %llx\n",
764 				       __func__, addr, vma->size);
765 				i915_vma_unpin(vma);
766 				err = i915_vma_unbind_unlocked(vma);
767 				err = -EINVAL;
768 				goto err_obj;
769 			}
770 
771 			i915_vma_unpin(vma);
772 			err = i915_vma_unbind_unlocked(vma);
773 			GEM_BUG_ON(err);
774 		}
775 
776 		if (igt_timeout(end_time,
777 				"%s timed out after %d/%d\n",
778 				__func__, pot, fls64(hole_end - 1) - 1)) {
779 			err = -EINTR;
780 			goto err_obj;
781 		}
782 	}
783 
784 err_obj:
785 	i915_gem_object_put(obj);
786 	return err;
787 }
788 
789 static int drunk_hole(struct i915_address_space *vm,
790 		      u64 hole_start, u64 hole_end,
791 		      unsigned long end_time)
792 {
793 	I915_RND_STATE(prng);
794 	unsigned int min_alignment;
795 	unsigned int size;
796 	unsigned long flags;
797 
798 	flags = PIN_OFFSET_FIXED | PIN_USER;
799 	if (i915_is_ggtt(vm))
800 		flags |= PIN_GLOBAL;
801 
802 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
803 
804 	/* Keep creating larger objects until one cannot fit into the hole */
805 	for (size = 12; (hole_end - hole_start) >> size; size++) {
806 		struct drm_i915_gem_object *obj;
807 		unsigned int *order, count, n;
808 		struct i915_vma *vma;
809 		u64 hole_size, aligned_size;
810 		int err = -ENODEV;
811 
812 		aligned_size = max_t(u32, ilog2(min_alignment), size);
813 		hole_size = (hole_end - hole_start) >> aligned_size;
814 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
815 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
816 		count = hole_size >> 1;
817 		if (!count) {
818 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
819 				 __func__, hole_start, hole_end, size, hole_size);
820 			break;
821 		}
822 
823 		do {
824 			order = i915_random_order(count, &prng);
825 			if (order)
826 				break;
827 		} while (count >>= 1);
828 		if (!count)
829 			return -ENOMEM;
830 		GEM_BUG_ON(!order);
831 
832 		/* Ignore allocation failures (i.e. don't report them as
833 		 * a test failure) as we are purposefully allocating very
834 		 * large objects without checking that we have sufficient
835 		 * memory. We expect to hit -ENOMEM.
836 		 */
837 
838 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
839 		if (IS_ERR(obj)) {
840 			kfree(order);
841 			break;
842 		}
843 
844 		vma = i915_vma_instance(obj, vm, NULL);
845 		if (IS_ERR(vma)) {
846 			err = PTR_ERR(vma);
847 			goto err_obj;
848 		}
849 
850 		GEM_BUG_ON(vma->size != BIT_ULL(size));
851 
852 		for (n = 0; n < count; n++) {
853 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
854 
855 			err = i915_vma_pin(vma, 0, 0, addr | flags);
856 			if (err) {
857 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
858 				       __func__,
859 				       addr, BIT_ULL(size),
860 				       hole_start, hole_end,
861 				       err);
862 				goto err_obj;
863 			}
864 
865 			if (!drm_mm_node_allocated(&vma->node) ||
866 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
867 				pr_err("%s incorrect at %llx + %llx\n",
868 				       __func__, addr, BIT_ULL(size));
869 				i915_vma_unpin(vma);
870 				err = i915_vma_unbind_unlocked(vma);
871 				err = -EINVAL;
872 				goto err_obj;
873 			}
874 
875 			i915_vma_unpin(vma);
876 			err = i915_vma_unbind_unlocked(vma);
877 			GEM_BUG_ON(err);
878 
879 			if (igt_timeout(end_time,
880 					"%s timed out after %d/%d\n",
881 					__func__, n, count)) {
882 				err = -EINTR;
883 				goto err_obj;
884 			}
885 		}
886 
887 err_obj:
888 		i915_gem_object_put(obj);
889 		kfree(order);
890 		if (err)
891 			return err;
892 
893 		cleanup_freed_objects(vm->i915);
894 	}
895 
896 	return 0;
897 }
898 
899 static int __shrink_hole(struct i915_address_space *vm,
900 			 u64 hole_start, u64 hole_end,
901 			 unsigned long end_time)
902 {
903 	struct drm_i915_gem_object *obj;
904 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
905 	unsigned int min_alignment;
906 	unsigned int order = 12;
907 	LIST_HEAD(objects);
908 	int err = 0;
909 	u64 addr;
910 
911 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
912 
913 	/* Keep creating larger objects until one cannot fit into the hole */
914 	for (addr = hole_start; addr < hole_end; ) {
915 		struct i915_vma *vma;
916 		u64 size = BIT_ULL(order++);
917 
918 		size = min(size, hole_end - addr);
919 		obj = fake_dma_object(vm->i915, size);
920 		if (IS_ERR(obj)) {
921 			err = PTR_ERR(obj);
922 			break;
923 		}
924 
925 		list_add(&obj->st_link, &objects);
926 
927 		vma = i915_vma_instance(obj, vm, NULL);
928 		if (IS_ERR(vma)) {
929 			err = PTR_ERR(vma);
930 			break;
931 		}
932 
933 		GEM_BUG_ON(vma->size != size);
934 
935 		err = i915_vma_pin(vma, 0, 0, addr | flags);
936 		if (err) {
937 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
938 			       __func__, addr, size, hole_start, hole_end, err);
939 			break;
940 		}
941 
942 		if (!drm_mm_node_allocated(&vma->node) ||
943 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
944 			pr_err("%s incorrect at %llx + %llx\n",
945 			       __func__, addr, size);
946 			i915_vma_unpin(vma);
947 			err = i915_vma_unbind_unlocked(vma);
948 			err = -EINVAL;
949 			break;
950 		}
951 
952 		i915_vma_unpin(vma);
953 		addr += round_up(size, min_alignment);
954 
955 		/*
956 		 * Since we are injecting allocation faults at random intervals,
957 		 * wait for this allocation to complete before we change the
958 		 * faultinjection.
959 		 */
960 		err = i915_vma_sync(vma);
961 		if (err)
962 			break;
963 
964 		if (igt_timeout(end_time,
965 				"%s timed out at ofset %llx [%llx - %llx]\n",
966 				__func__, addr, hole_start, hole_end)) {
967 			err = -EINTR;
968 			break;
969 		}
970 	}
971 
972 	close_object_list(&objects, vm);
973 	cleanup_freed_objects(vm->i915);
974 	return err;
975 }
976 
977 static int shrink_hole(struct i915_address_space *vm,
978 		       u64 hole_start, u64 hole_end,
979 		       unsigned long end_time)
980 {
981 	unsigned long prime;
982 	int err;
983 
984 	vm->fault_attr.probability = 999;
985 	atomic_set(&vm->fault_attr.times, -1);
986 
987 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
988 		vm->fault_attr.interval = prime;
989 		err = __shrink_hole(vm, hole_start, hole_end, end_time);
990 		if (err)
991 			break;
992 	}
993 
994 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
995 
996 	return err;
997 }
998 
999 static int shrink_boom(struct i915_address_space *vm,
1000 		       u64 hole_start, u64 hole_end,
1001 		       unsigned long end_time)
1002 {
1003 	unsigned int sizes[] = { SZ_2M, SZ_1G };
1004 	struct drm_i915_gem_object *purge;
1005 	struct drm_i915_gem_object *explode;
1006 	int err;
1007 	int i;
1008 
1009 	/*
1010 	 * Catch the case which shrink_hole seems to miss. The setup here
1011 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
1012 	 * ensuring that all vma assiocated with the respective pd/pdp are
1013 	 * unpinned at the time.
1014 	 */
1015 
1016 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1017 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1018 		unsigned int size = sizes[i];
1019 		struct i915_vma *vma;
1020 
1021 		purge = fake_dma_object(vm->i915, size);
1022 		if (IS_ERR(purge))
1023 			return PTR_ERR(purge);
1024 
1025 		vma = i915_vma_instance(purge, vm, NULL);
1026 		if (IS_ERR(vma)) {
1027 			err = PTR_ERR(vma);
1028 			goto err_purge;
1029 		}
1030 
1031 		err = i915_vma_pin(vma, 0, 0, flags);
1032 		if (err)
1033 			goto err_purge;
1034 
1035 		/* Should now be ripe for purging */
1036 		i915_vma_unpin(vma);
1037 
1038 		explode = fake_dma_object(vm->i915, size);
1039 		if (IS_ERR(explode)) {
1040 			err = PTR_ERR(explode);
1041 			goto err_purge;
1042 		}
1043 
1044 		vm->fault_attr.probability = 100;
1045 		vm->fault_attr.interval = 1;
1046 		atomic_set(&vm->fault_attr.times, -1);
1047 
1048 		vma = i915_vma_instance(explode, vm, NULL);
1049 		if (IS_ERR(vma)) {
1050 			err = PTR_ERR(vma);
1051 			goto err_explode;
1052 		}
1053 
1054 		err = i915_vma_pin(vma, 0, 0, flags | size);
1055 		if (err)
1056 			goto err_explode;
1057 
1058 		i915_vma_unpin(vma);
1059 
1060 		i915_gem_object_put(purge);
1061 		i915_gem_object_put(explode);
1062 
1063 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1064 		cleanup_freed_objects(vm->i915);
1065 	}
1066 
1067 	return 0;
1068 
1069 err_explode:
1070 	i915_gem_object_put(explode);
1071 err_purge:
1072 	i915_gem_object_put(purge);
1073 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1074 	return err;
1075 }
1076 
1077 static int misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr,
1078 			   u64 addr, u64 size, unsigned long flags)
1079 {
1080 	struct drm_i915_gem_object *obj;
1081 	struct i915_vma *vma;
1082 	int err = 0;
1083 	u64 expected_vma_size, expected_node_size;
1084 	bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM ||
1085 			 mr->type == INTEL_MEMORY_STOLEN_LOCAL;
1086 
1087 	obj = i915_gem_object_create_region(mr, size, 0, I915_BO_ALLOC_GPU_ONLY);
1088 	if (IS_ERR(obj)) {
1089 		/* if iGVT-g or DMAR is active, stolen mem will be uninitialized */
1090 		if (PTR_ERR(obj) == -ENODEV && is_stolen)
1091 			return 0;
1092 		return PTR_ERR(obj);
1093 	}
1094 
1095 	vma = i915_vma_instance(obj, vm, NULL);
1096 	if (IS_ERR(vma)) {
1097 		err = PTR_ERR(vma);
1098 		goto err_put;
1099 	}
1100 
1101 	err = i915_vma_pin(vma, 0, 0, addr | flags);
1102 	if (err)
1103 		goto err_put;
1104 	i915_vma_unpin(vma);
1105 
1106 	if (!drm_mm_node_allocated(&vma->node)) {
1107 		err = -EINVAL;
1108 		goto err_put;
1109 	}
1110 
1111 	if (i915_vma_misplaced(vma, 0, 0, addr | flags)) {
1112 		err = -EINVAL;
1113 		goto err_put;
1114 	}
1115 
1116 	expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1));
1117 	expected_node_size = expected_vma_size;
1118 
1119 	if (HAS_64K_PAGES(vm->i915) && i915_gem_object_is_lmem(obj)) {
1120 		expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
1121 		expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
1122 	}
1123 
1124 	if (vma->size != expected_vma_size || vma->node.size != expected_node_size) {
1125 		err = i915_vma_unbind_unlocked(vma);
1126 		err = -EBADSLT;
1127 		goto err_put;
1128 	}
1129 
1130 	err = i915_vma_unbind_unlocked(vma);
1131 	if (err)
1132 		goto err_put;
1133 
1134 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1135 
1136 err_put:
1137 	i915_gem_object_put(obj);
1138 	cleanup_freed_objects(vm->i915);
1139 	return err;
1140 }
1141 
1142 static int misaligned_pin(struct i915_address_space *vm,
1143 			  u64 hole_start, u64 hole_end,
1144 			  unsigned long end_time)
1145 {
1146 	struct intel_memory_region *mr;
1147 	enum intel_region_id id;
1148 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
1149 	int err = 0;
1150 	u64 hole_size = hole_end - hole_start;
1151 
1152 	if (i915_is_ggtt(vm))
1153 		flags |= PIN_GLOBAL;
1154 
1155 	for_each_memory_region(mr, vm->i915, id) {
1156 		u64 min_alignment = i915_vm_min_alignment(vm, mr->type);
1157 		u64 size = min_alignment;
1158 		u64 addr = round_down(hole_start + (hole_size / 2), min_alignment);
1159 
1160 		/* avoid -ENOSPC on very small hole setups */
1161 		if (hole_size < 3 * min_alignment)
1162 			continue;
1163 
1164 		/* we can't test < 4k alignment due to flags being encoded in lower bits */
1165 		if (min_alignment != I915_GTT_PAGE_SIZE_4K) {
1166 			err = misaligned_case(vm, mr, addr + (min_alignment / 2), size, flags);
1167 			/* misaligned should error with -EINVAL*/
1168 			if (!err)
1169 				err = -EBADSLT;
1170 			if (err != -EINVAL)
1171 				return err;
1172 		}
1173 
1174 		/* test for vma->size expansion to min page size */
1175 		err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags);
1176 		if (err)
1177 			return err;
1178 
1179 		/* test for intermediate size not expanding vma->size for large alignments */
1180 		err = misaligned_case(vm, mr, addr, size / 2, flags);
1181 		if (err)
1182 			return err;
1183 	}
1184 
1185 	return 0;
1186 }
1187 
1188 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1189 			  int (*func)(struct i915_address_space *vm,
1190 				      u64 hole_start, u64 hole_end,
1191 				      unsigned long end_time))
1192 {
1193 	struct i915_ppgtt *ppgtt;
1194 	IGT_TIMEOUT(end_time);
1195 	struct file *file;
1196 	int err;
1197 
1198 	if (!HAS_FULL_PPGTT(dev_priv))
1199 		return 0;
1200 
1201 	file = mock_file(dev_priv);
1202 	if (IS_ERR(file))
1203 		return PTR_ERR(file);
1204 
1205 	ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
1206 	if (IS_ERR(ppgtt)) {
1207 		err = PTR_ERR(ppgtt);
1208 		goto out_free;
1209 	}
1210 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1211 	assert_vm_alive(&ppgtt->vm);
1212 
1213 	err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1214 
1215 	i915_vm_put(&ppgtt->vm);
1216 
1217 out_free:
1218 	fput(file);
1219 	return err;
1220 }
1221 
1222 static int igt_ppgtt_fill(void *arg)
1223 {
1224 	return exercise_ppgtt(arg, fill_hole);
1225 }
1226 
1227 static int igt_ppgtt_walk(void *arg)
1228 {
1229 	return exercise_ppgtt(arg, walk_hole);
1230 }
1231 
1232 static int igt_ppgtt_pot(void *arg)
1233 {
1234 	return exercise_ppgtt(arg, pot_hole);
1235 }
1236 
1237 static int igt_ppgtt_drunk(void *arg)
1238 {
1239 	return exercise_ppgtt(arg, drunk_hole);
1240 }
1241 
1242 static int igt_ppgtt_lowlevel(void *arg)
1243 {
1244 	return exercise_ppgtt(arg, lowlevel_hole);
1245 }
1246 
1247 static int igt_ppgtt_shrink(void *arg)
1248 {
1249 	return exercise_ppgtt(arg, shrink_hole);
1250 }
1251 
1252 static int igt_ppgtt_shrink_boom(void *arg)
1253 {
1254 	return exercise_ppgtt(arg, shrink_boom);
1255 }
1256 
1257 static int igt_ppgtt_misaligned_pin(void *arg)
1258 {
1259 	return exercise_ppgtt(arg, misaligned_pin);
1260 }
1261 
1262 static int sort_holes(void *priv, const struct list_head *A,
1263 		      const struct list_head *B)
1264 {
1265 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1266 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1267 
1268 	if (a->start < b->start)
1269 		return -1;
1270 	else
1271 		return 1;
1272 }
1273 
1274 static int exercise_ggtt(struct drm_i915_private *i915,
1275 			 int (*func)(struct i915_address_space *vm,
1276 				     u64 hole_start, u64 hole_end,
1277 				     unsigned long end_time))
1278 {
1279 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1280 	u64 hole_start, hole_end, last = 0;
1281 	struct drm_mm_node *node;
1282 	IGT_TIMEOUT(end_time);
1283 	int err = 0;
1284 
1285 restart:
1286 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1287 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1288 		if (hole_start < last)
1289 			continue;
1290 
1291 		if (ggtt->vm.mm.color_adjust)
1292 			ggtt->vm.mm.color_adjust(node, 0,
1293 						 &hole_start, &hole_end);
1294 		if (hole_start >= hole_end)
1295 			continue;
1296 
1297 		err = func(&ggtt->vm, hole_start, hole_end, end_time);
1298 		if (err)
1299 			break;
1300 
1301 		/* As we have manipulated the drm_mm, the list may be corrupt */
1302 		last = hole_end;
1303 		goto restart;
1304 	}
1305 
1306 	return err;
1307 }
1308 
1309 static int igt_ggtt_fill(void *arg)
1310 {
1311 	return exercise_ggtt(arg, fill_hole);
1312 }
1313 
1314 static int igt_ggtt_walk(void *arg)
1315 {
1316 	return exercise_ggtt(arg, walk_hole);
1317 }
1318 
1319 static int igt_ggtt_pot(void *arg)
1320 {
1321 	return exercise_ggtt(arg, pot_hole);
1322 }
1323 
1324 static int igt_ggtt_drunk(void *arg)
1325 {
1326 	return exercise_ggtt(arg, drunk_hole);
1327 }
1328 
1329 static int igt_ggtt_lowlevel(void *arg)
1330 {
1331 	return exercise_ggtt(arg, lowlevel_hole);
1332 }
1333 
1334 static int igt_ggtt_misaligned_pin(void *arg)
1335 {
1336 	return exercise_ggtt(arg, misaligned_pin);
1337 }
1338 
1339 static int igt_ggtt_page(void *arg)
1340 {
1341 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1342 	I915_RND_STATE(prng);
1343 	struct drm_i915_private *i915 = arg;
1344 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1345 	struct drm_i915_gem_object *obj;
1346 	intel_wakeref_t wakeref;
1347 	struct drm_mm_node tmp;
1348 	unsigned int *order, n;
1349 	int err;
1350 
1351 	if (!i915_ggtt_has_aperture(ggtt))
1352 		return 0;
1353 
1354 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1355 	if (IS_ERR(obj))
1356 		return PTR_ERR(obj);
1357 
1358 	err = i915_gem_object_pin_pages_unlocked(obj);
1359 	if (err)
1360 		goto out_free;
1361 
1362 	memset(&tmp, 0, sizeof(tmp));
1363 	mutex_lock(&ggtt->vm.mutex);
1364 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1365 					  count * PAGE_SIZE, 0,
1366 					  I915_COLOR_UNEVICTABLE,
1367 					  0, ggtt->mappable_end,
1368 					  DRM_MM_INSERT_LOW);
1369 	mutex_unlock(&ggtt->vm.mutex);
1370 	if (err)
1371 		goto out_unpin;
1372 
1373 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1374 
1375 	for (n = 0; n < count; n++) {
1376 		u64 offset = tmp.start + n * PAGE_SIZE;
1377 
1378 		ggtt->vm.insert_page(&ggtt->vm,
1379 				     i915_gem_object_get_dma_address(obj, 0),
1380 				     offset, I915_CACHE_NONE, 0);
1381 	}
1382 
1383 	order = i915_random_order(count, &prng);
1384 	if (!order) {
1385 		err = -ENOMEM;
1386 		goto out_remove;
1387 	}
1388 
1389 	for (n = 0; n < count; n++) {
1390 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1391 		u32 __iomem *vaddr;
1392 
1393 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1394 		iowrite32(n, vaddr + n);
1395 		io_mapping_unmap_atomic(vaddr);
1396 	}
1397 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1398 
1399 	i915_random_reorder(order, count, &prng);
1400 	for (n = 0; n < count; n++) {
1401 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1402 		u32 __iomem *vaddr;
1403 		u32 val;
1404 
1405 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1406 		val = ioread32(vaddr + n);
1407 		io_mapping_unmap_atomic(vaddr);
1408 
1409 		if (val != n) {
1410 			pr_err("insert page failed: found %d, expected %d\n",
1411 			       val, n);
1412 			err = -EINVAL;
1413 			break;
1414 		}
1415 	}
1416 
1417 	kfree(order);
1418 out_remove:
1419 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1420 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1421 	mutex_lock(&ggtt->vm.mutex);
1422 	drm_mm_remove_node(&tmp);
1423 	mutex_unlock(&ggtt->vm.mutex);
1424 out_unpin:
1425 	i915_gem_object_unpin_pages(obj);
1426 out_free:
1427 	i915_gem_object_put(obj);
1428 	return err;
1429 }
1430 
1431 static void track_vma_bind(struct i915_vma *vma)
1432 {
1433 	struct drm_i915_gem_object *obj = vma->obj;
1434 
1435 	__i915_gem_object_pin_pages(obj);
1436 
1437 	GEM_BUG_ON(atomic_read(&vma->pages_count));
1438 	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1439 	__i915_gem_object_pin_pages(obj);
1440 	vma->pages = obj->mm.pages;
1441 	vma->resource->bi.pages = vma->pages;
1442 
1443 	mutex_lock(&vma->vm->mutex);
1444 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1445 	mutex_unlock(&vma->vm->mutex);
1446 }
1447 
1448 static int exercise_mock(struct drm_i915_private *i915,
1449 			 int (*func)(struct i915_address_space *vm,
1450 				     u64 hole_start, u64 hole_end,
1451 				     unsigned long end_time))
1452 {
1453 	const u64 limit = totalram_pages() << PAGE_SHIFT;
1454 	struct i915_address_space *vm;
1455 	struct i915_gem_context *ctx;
1456 	IGT_TIMEOUT(end_time);
1457 	int err;
1458 
1459 	ctx = mock_context(i915, "mock");
1460 	if (!ctx)
1461 		return -ENOMEM;
1462 
1463 	vm = i915_gem_context_get_eb_vm(ctx);
1464 	err = func(vm, 0, min(vm->total, limit), end_time);
1465 	i915_vm_put(vm);
1466 
1467 	mock_context_close(ctx);
1468 	return err;
1469 }
1470 
1471 static int igt_mock_fill(void *arg)
1472 {
1473 	struct i915_ggtt *ggtt = arg;
1474 
1475 	return exercise_mock(ggtt->vm.i915, fill_hole);
1476 }
1477 
1478 static int igt_mock_walk(void *arg)
1479 {
1480 	struct i915_ggtt *ggtt = arg;
1481 
1482 	return exercise_mock(ggtt->vm.i915, walk_hole);
1483 }
1484 
1485 static int igt_mock_pot(void *arg)
1486 {
1487 	struct i915_ggtt *ggtt = arg;
1488 
1489 	return exercise_mock(ggtt->vm.i915, pot_hole);
1490 }
1491 
1492 static int igt_mock_drunk(void *arg)
1493 {
1494 	struct i915_ggtt *ggtt = arg;
1495 
1496 	return exercise_mock(ggtt->vm.i915, drunk_hole);
1497 }
1498 
1499 static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset)
1500 {
1501 	struct i915_address_space *vm = vma->vm;
1502 	struct i915_vma_resource *vma_res;
1503 	struct drm_i915_gem_object *obj = vma->obj;
1504 	int err;
1505 
1506 	vma_res = i915_vma_resource_alloc();
1507 	if (IS_ERR(vma_res))
1508 		return PTR_ERR(vma_res);
1509 
1510 	mutex_lock(&vm->mutex);
1511 	err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
1512 				   offset,
1513 				   obj->cache_level,
1514 				   0);
1515 	if (!err) {
1516 		i915_vma_resource_init_from_vma(vma_res, vma);
1517 		vma->resource = vma_res;
1518 	} else {
1519 		kfree(vma_res);
1520 	}
1521 	mutex_unlock(&vm->mutex);
1522 
1523 	return err;
1524 }
1525 
1526 static int igt_gtt_reserve(void *arg)
1527 {
1528 	struct i915_ggtt *ggtt = arg;
1529 	struct drm_i915_gem_object *obj, *on;
1530 	I915_RND_STATE(prng);
1531 	LIST_HEAD(objects);
1532 	u64 total;
1533 	int err = -ENODEV;
1534 
1535 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1536 	 * for the node, and evicts if it has to. So our test checks that
1537 	 * it can give us the requsted space and prevent overlaps.
1538 	 */
1539 
1540 	/* Start by filling the GGTT */
1541 	for (total = 0;
1542 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1543 	     total += 2 * I915_GTT_PAGE_SIZE) {
1544 		struct i915_vma *vma;
1545 
1546 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1547 						      2 * PAGE_SIZE);
1548 		if (IS_ERR(obj)) {
1549 			err = PTR_ERR(obj);
1550 			goto out;
1551 		}
1552 
1553 		err = i915_gem_object_pin_pages_unlocked(obj);
1554 		if (err) {
1555 			i915_gem_object_put(obj);
1556 			goto out;
1557 		}
1558 
1559 		list_add(&obj->st_link, &objects);
1560 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1561 		if (IS_ERR(vma)) {
1562 			err = PTR_ERR(vma);
1563 			goto out;
1564 		}
1565 
1566 		err = reserve_gtt_with_resource(vma, total);
1567 		if (err) {
1568 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1569 			       total, ggtt->vm.total, err);
1570 			goto out;
1571 		}
1572 		track_vma_bind(vma);
1573 
1574 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1575 		if (vma->node.start != total ||
1576 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1577 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1578 			       vma->node.start, vma->node.size,
1579 			       total, 2*I915_GTT_PAGE_SIZE);
1580 			err = -EINVAL;
1581 			goto out;
1582 		}
1583 	}
1584 
1585 	/* Now we start forcing evictions */
1586 	for (total = I915_GTT_PAGE_SIZE;
1587 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1588 	     total += 2 * I915_GTT_PAGE_SIZE) {
1589 		struct i915_vma *vma;
1590 
1591 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1592 						      2 * PAGE_SIZE);
1593 		if (IS_ERR(obj)) {
1594 			err = PTR_ERR(obj);
1595 			goto out;
1596 		}
1597 
1598 		err = i915_gem_object_pin_pages_unlocked(obj);
1599 		if (err) {
1600 			i915_gem_object_put(obj);
1601 			goto out;
1602 		}
1603 
1604 		list_add(&obj->st_link, &objects);
1605 
1606 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1607 		if (IS_ERR(vma)) {
1608 			err = PTR_ERR(vma);
1609 			goto out;
1610 		}
1611 
1612 		err = reserve_gtt_with_resource(vma, total);
1613 		if (err) {
1614 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1615 			       total, ggtt->vm.total, err);
1616 			goto out;
1617 		}
1618 		track_vma_bind(vma);
1619 
1620 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1621 		if (vma->node.start != total ||
1622 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1623 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1624 			       vma->node.start, vma->node.size,
1625 			       total, 2*I915_GTT_PAGE_SIZE);
1626 			err = -EINVAL;
1627 			goto out;
1628 		}
1629 	}
1630 
1631 	/* And then try at random */
1632 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1633 		struct i915_vma *vma;
1634 		u64 offset;
1635 
1636 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1637 		if (IS_ERR(vma)) {
1638 			err = PTR_ERR(vma);
1639 			goto out;
1640 		}
1641 
1642 		err = i915_vma_unbind_unlocked(vma);
1643 		if (err) {
1644 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1645 			goto out;
1646 		}
1647 
1648 		offset = igt_random_offset(&prng,
1649 					   0, ggtt->vm.total,
1650 					   2 * I915_GTT_PAGE_SIZE,
1651 					   I915_GTT_MIN_ALIGNMENT);
1652 
1653 		err = reserve_gtt_with_resource(vma, offset);
1654 		if (err) {
1655 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1656 			       total, ggtt->vm.total, err);
1657 			goto out;
1658 		}
1659 		track_vma_bind(vma);
1660 
1661 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1662 		if (vma->node.start != offset ||
1663 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1664 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1665 			       vma->node.start, vma->node.size,
1666 			       offset, 2*I915_GTT_PAGE_SIZE);
1667 			err = -EINVAL;
1668 			goto out;
1669 		}
1670 	}
1671 
1672 out:
1673 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1674 		i915_gem_object_unpin_pages(obj);
1675 		i915_gem_object_put(obj);
1676 	}
1677 	return err;
1678 }
1679 
1680 static int insert_gtt_with_resource(struct i915_vma *vma)
1681 {
1682 	struct i915_address_space *vm = vma->vm;
1683 	struct i915_vma_resource *vma_res;
1684 	struct drm_i915_gem_object *obj = vma->obj;
1685 	int err;
1686 
1687 	vma_res = i915_vma_resource_alloc();
1688 	if (IS_ERR(vma_res))
1689 		return PTR_ERR(vma_res);
1690 
1691 	mutex_lock(&vm->mutex);
1692 	err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
1693 				  obj->cache_level, 0, vm->total, 0);
1694 	if (!err) {
1695 		i915_vma_resource_init_from_vma(vma_res, vma);
1696 		vma->resource = vma_res;
1697 	} else {
1698 		kfree(vma_res);
1699 	}
1700 	mutex_unlock(&vm->mutex);
1701 
1702 	return err;
1703 }
1704 
1705 static int igt_gtt_insert(void *arg)
1706 {
1707 	struct i915_ggtt *ggtt = arg;
1708 	struct drm_i915_gem_object *obj, *on;
1709 	struct drm_mm_node tmp = {};
1710 	const struct invalid_insert {
1711 		u64 size;
1712 		u64 alignment;
1713 		u64 start, end;
1714 	} invalid_insert[] = {
1715 		{
1716 			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1717 			0, ggtt->vm.total,
1718 		},
1719 		{
1720 			2*I915_GTT_PAGE_SIZE, 0,
1721 			0, I915_GTT_PAGE_SIZE,
1722 		},
1723 		{
1724 			-(u64)I915_GTT_PAGE_SIZE, 0,
1725 			0, 4*I915_GTT_PAGE_SIZE,
1726 		},
1727 		{
1728 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1729 			0, 4*I915_GTT_PAGE_SIZE,
1730 		},
1731 		{
1732 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1733 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1734 		},
1735 		{}
1736 	}, *ii;
1737 	LIST_HEAD(objects);
1738 	u64 total;
1739 	int err = -ENODEV;
1740 
1741 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1742 	 * to the node, evicting if required.
1743 	 */
1744 
1745 	/* Check a couple of obviously invalid requests */
1746 	for (ii = invalid_insert; ii->size; ii++) {
1747 		mutex_lock(&ggtt->vm.mutex);
1748 		err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp,
1749 					  ii->size, ii->alignment,
1750 					  I915_COLOR_UNEVICTABLE,
1751 					  ii->start, ii->end,
1752 					  0);
1753 		mutex_unlock(&ggtt->vm.mutex);
1754 		if (err != -ENOSPC) {
1755 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1756 			       ii->size, ii->alignment, ii->start, ii->end,
1757 			       err);
1758 			return -EINVAL;
1759 		}
1760 	}
1761 
1762 	/* Start by filling the GGTT */
1763 	for (total = 0;
1764 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1765 	     total += I915_GTT_PAGE_SIZE) {
1766 		struct i915_vma *vma;
1767 
1768 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1769 						      I915_GTT_PAGE_SIZE);
1770 		if (IS_ERR(obj)) {
1771 			err = PTR_ERR(obj);
1772 			goto out;
1773 		}
1774 
1775 		err = i915_gem_object_pin_pages_unlocked(obj);
1776 		if (err) {
1777 			i915_gem_object_put(obj);
1778 			goto out;
1779 		}
1780 
1781 		list_add(&obj->st_link, &objects);
1782 
1783 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1784 		if (IS_ERR(vma)) {
1785 			err = PTR_ERR(vma);
1786 			goto out;
1787 		}
1788 
1789 		err = insert_gtt_with_resource(vma);
1790 		if (err == -ENOSPC) {
1791 			/* maxed out the GGTT space */
1792 			i915_gem_object_put(obj);
1793 			break;
1794 		}
1795 		if (err) {
1796 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1797 			       total, ggtt->vm.total, err);
1798 			goto out;
1799 		}
1800 		track_vma_bind(vma);
1801 		__i915_vma_pin(vma);
1802 
1803 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1804 	}
1805 
1806 	list_for_each_entry(obj, &objects, st_link) {
1807 		struct i915_vma *vma;
1808 
1809 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1810 		if (IS_ERR(vma)) {
1811 			err = PTR_ERR(vma);
1812 			goto out;
1813 		}
1814 
1815 		if (!drm_mm_node_allocated(&vma->node)) {
1816 			pr_err("VMA was unexpectedly evicted!\n");
1817 			err = -EINVAL;
1818 			goto out;
1819 		}
1820 
1821 		__i915_vma_unpin(vma);
1822 	}
1823 
1824 	/* If we then reinsert, we should find the same hole */
1825 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1826 		struct i915_vma *vma;
1827 		u64 offset;
1828 
1829 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1830 		if (IS_ERR(vma)) {
1831 			err = PTR_ERR(vma);
1832 			goto out;
1833 		}
1834 
1835 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1836 		offset = vma->node.start;
1837 
1838 		err = i915_vma_unbind_unlocked(vma);
1839 		if (err) {
1840 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1841 			goto out;
1842 		}
1843 
1844 		err = insert_gtt_with_resource(vma);
1845 		if (err) {
1846 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1847 			       total, ggtt->vm.total, err);
1848 			goto out;
1849 		}
1850 		track_vma_bind(vma);
1851 
1852 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1853 		if (vma->node.start != offset) {
1854 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1855 			       offset, vma->node.start);
1856 			err = -EINVAL;
1857 			goto out;
1858 		}
1859 	}
1860 
1861 	/* And then force evictions */
1862 	for (total = 0;
1863 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1864 	     total += 2 * I915_GTT_PAGE_SIZE) {
1865 		struct i915_vma *vma;
1866 
1867 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1868 						      2 * I915_GTT_PAGE_SIZE);
1869 		if (IS_ERR(obj)) {
1870 			err = PTR_ERR(obj);
1871 			goto out;
1872 		}
1873 
1874 		err = i915_gem_object_pin_pages_unlocked(obj);
1875 		if (err) {
1876 			i915_gem_object_put(obj);
1877 			goto out;
1878 		}
1879 
1880 		list_add(&obj->st_link, &objects);
1881 
1882 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1883 		if (IS_ERR(vma)) {
1884 			err = PTR_ERR(vma);
1885 			goto out;
1886 		}
1887 
1888 		err = insert_gtt_with_resource(vma);
1889 		if (err) {
1890 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1891 			       total, ggtt->vm.total, err);
1892 			goto out;
1893 		}
1894 		track_vma_bind(vma);
1895 
1896 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1897 	}
1898 
1899 out:
1900 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1901 		i915_gem_object_unpin_pages(obj);
1902 		i915_gem_object_put(obj);
1903 	}
1904 	return err;
1905 }
1906 
1907 int i915_gem_gtt_mock_selftests(void)
1908 {
1909 	static const struct i915_subtest tests[] = {
1910 		SUBTEST(igt_mock_drunk),
1911 		SUBTEST(igt_mock_walk),
1912 		SUBTEST(igt_mock_pot),
1913 		SUBTEST(igt_mock_fill),
1914 		SUBTEST(igt_gtt_reserve),
1915 		SUBTEST(igt_gtt_insert),
1916 	};
1917 	struct drm_i915_private *i915;
1918 	struct intel_gt *gt;
1919 	int err;
1920 
1921 	i915 = mock_gem_device();
1922 	if (!i915)
1923 		return -ENOMEM;
1924 
1925 	/* allocate the ggtt */
1926 	err = intel_gt_assign_ggtt(to_gt(i915));
1927 	if (err)
1928 		goto out_put;
1929 
1930 	gt = to_gt(i915);
1931 
1932 	mock_init_ggtt(gt);
1933 
1934 	err = i915_subtests(tests, gt->ggtt);
1935 
1936 	mock_device_flush(i915);
1937 	i915_gem_drain_freed_objects(i915);
1938 	mock_fini_ggtt(gt->ggtt);
1939 
1940 out_put:
1941 	mock_destroy_device(i915);
1942 	return err;
1943 }
1944 
1945 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1946 {
1947 	static const struct i915_subtest tests[] = {
1948 		SUBTEST(igt_ppgtt_alloc),
1949 		SUBTEST(igt_ppgtt_lowlevel),
1950 		SUBTEST(igt_ppgtt_drunk),
1951 		SUBTEST(igt_ppgtt_walk),
1952 		SUBTEST(igt_ppgtt_pot),
1953 		SUBTEST(igt_ppgtt_fill),
1954 		SUBTEST(igt_ppgtt_shrink),
1955 		SUBTEST(igt_ppgtt_shrink_boom),
1956 		SUBTEST(igt_ppgtt_misaligned_pin),
1957 		SUBTEST(igt_ggtt_lowlevel),
1958 		SUBTEST(igt_ggtt_drunk),
1959 		SUBTEST(igt_ggtt_walk),
1960 		SUBTEST(igt_ggtt_pot),
1961 		SUBTEST(igt_ggtt_fill),
1962 		SUBTEST(igt_ggtt_page),
1963 		SUBTEST(igt_ggtt_misaligned_pin),
1964 	};
1965 
1966 	GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
1967 
1968 	return i915_live_subtests(tests, i915);
1969 }
1970