xref: /openbmc/linux/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c (revision f79e4d5f92a129a1159c973735007d4ddc8541f3)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
30 
31 #include "mock_context.h"
32 #include "mock_drm.h"
33 #include "mock_gem_device.h"
34 
35 static void fake_free_pages(struct drm_i915_gem_object *obj,
36 			    struct sg_table *pages)
37 {
38 	sg_free_table(pages);
39 	kfree(pages);
40 }
41 
42 static int fake_get_pages(struct drm_i915_gem_object *obj)
43 {
44 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
45 #define PFN_BIAS 0x1000
46 	struct sg_table *pages;
47 	struct scatterlist *sg;
48 	unsigned int sg_page_sizes;
49 	typeof(obj->base.size) rem;
50 
51 	pages = kmalloc(sizeof(*pages), GFP);
52 	if (!pages)
53 		return -ENOMEM;
54 
55 	rem = round_up(obj->base.size, BIT(31)) >> 31;
56 	if (sg_alloc_table(pages, rem, GFP)) {
57 		kfree(pages);
58 		return -ENOMEM;
59 	}
60 
61 	sg_page_sizes = 0;
62 	rem = obj->base.size;
63 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
64 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
65 
66 		GEM_BUG_ON(!len);
67 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
68 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
69 		sg_dma_len(sg) = len;
70 		sg_page_sizes |= len;
71 
72 		rem -= len;
73 	}
74 	GEM_BUG_ON(rem);
75 
76 	obj->mm.madv = I915_MADV_DONTNEED;
77 
78 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
79 
80 	return 0;
81 #undef GFP
82 }
83 
84 static void fake_put_pages(struct drm_i915_gem_object *obj,
85 			   struct sg_table *pages)
86 {
87 	fake_free_pages(obj, pages);
88 	obj->mm.dirty = false;
89 	obj->mm.madv = I915_MADV_WILLNEED;
90 }
91 
92 static const struct drm_i915_gem_object_ops fake_ops = {
93 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
94 	.get_pages = fake_get_pages,
95 	.put_pages = fake_put_pages,
96 };
97 
98 static struct drm_i915_gem_object *
99 fake_dma_object(struct drm_i915_private *i915, u64 size)
100 {
101 	struct drm_i915_gem_object *obj;
102 
103 	GEM_BUG_ON(!size);
104 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
105 
106 	if (overflows_type(size, obj->base.size))
107 		return ERR_PTR(-E2BIG);
108 
109 	obj = i915_gem_object_alloc(i915);
110 	if (!obj)
111 		goto err;
112 
113 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
114 	i915_gem_object_init(obj, &fake_ops);
115 
116 	obj->write_domain = I915_GEM_DOMAIN_CPU;
117 	obj->read_domains = I915_GEM_DOMAIN_CPU;
118 	obj->cache_level = I915_CACHE_NONE;
119 
120 	/* Preallocate the "backing storage" */
121 	if (i915_gem_object_pin_pages(obj))
122 		goto err_obj;
123 
124 	i915_gem_object_unpin_pages(obj);
125 	return obj;
126 
127 err_obj:
128 	i915_gem_object_put(obj);
129 err:
130 	return ERR_PTR(-ENOMEM);
131 }
132 
133 static int igt_ppgtt_alloc(void *arg)
134 {
135 	struct drm_i915_private *dev_priv = arg;
136 	struct i915_hw_ppgtt *ppgtt;
137 	u64 size, last;
138 	int err;
139 
140 	/* Allocate a ppggt and try to fill the entire range */
141 
142 	if (!USES_PPGTT(dev_priv))
143 		return 0;
144 
145 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
146 	if (!ppgtt)
147 		return -ENOMEM;
148 
149 	mutex_lock(&dev_priv->drm.struct_mutex);
150 	err = __hw_ppgtt_init(ppgtt, dev_priv);
151 	if (err)
152 		goto err_ppgtt;
153 
154 	if (!ppgtt->base.allocate_va_range)
155 		goto err_ppgtt_cleanup;
156 
157 	/* Check we can allocate the entire range */
158 	for (size = 4096;
159 	     size <= ppgtt->base.total;
160 	     size <<= 2) {
161 		err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
162 		if (err) {
163 			if (err == -ENOMEM) {
164 				pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
165 					size, ilog2(size));
166 				err = 0; /* virtual space too large! */
167 			}
168 			goto err_ppgtt_cleanup;
169 		}
170 
171 		ppgtt->base.clear_range(&ppgtt->base, 0, size);
172 	}
173 
174 	/* Check we can incrementally allocate the entire range */
175 	for (last = 0, size = 4096;
176 	     size <= ppgtt->base.total;
177 	     last = size, size <<= 2) {
178 		err = ppgtt->base.allocate_va_range(&ppgtt->base,
179 						    last, size - last);
180 		if (err) {
181 			if (err == -ENOMEM) {
182 				pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
183 					last, size - last, ilog2(size));
184 				err = 0; /* virtual space too large! */
185 			}
186 			goto err_ppgtt_cleanup;
187 		}
188 	}
189 
190 err_ppgtt_cleanup:
191 	ppgtt->base.cleanup(&ppgtt->base);
192 err_ppgtt:
193 	mutex_unlock(&dev_priv->drm.struct_mutex);
194 	kfree(ppgtt);
195 	return err;
196 }
197 
198 static int lowlevel_hole(struct drm_i915_private *i915,
199 			 struct i915_address_space *vm,
200 			 u64 hole_start, u64 hole_end,
201 			 unsigned long end_time)
202 {
203 	I915_RND_STATE(seed_prng);
204 	unsigned int size;
205 	struct i915_vma mock_vma;
206 
207 	memset(&mock_vma, 0, sizeof(struct i915_vma));
208 
209 	/* Keep creating larger objects until one cannot fit into the hole */
210 	for (size = 12; (hole_end - hole_start) >> size; size++) {
211 		I915_RND_SUBSTATE(prng, seed_prng);
212 		struct drm_i915_gem_object *obj;
213 		unsigned int *order, count, n;
214 		u64 hole_size;
215 
216 		hole_size = (hole_end - hole_start) >> size;
217 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
218 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
219 		count = hole_size >> 1;
220 		if (!count) {
221 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
222 				 __func__, hole_start, hole_end, size, hole_size);
223 			break;
224 		}
225 
226 		do {
227 			order = i915_random_order(count, &prng);
228 			if (order)
229 				break;
230 		} while (count >>= 1);
231 		if (!count)
232 			return -ENOMEM;
233 		GEM_BUG_ON(!order);
234 
235 		GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
236 		GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
237 
238 		/* Ignore allocation failures (i.e. don't report them as
239 		 * a test failure) as we are purposefully allocating very
240 		 * large objects without checking that we have sufficient
241 		 * memory. We expect to hit -ENOMEM.
242 		 */
243 
244 		obj = fake_dma_object(i915, BIT_ULL(size));
245 		if (IS_ERR(obj)) {
246 			kfree(order);
247 			break;
248 		}
249 
250 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
251 
252 		if (i915_gem_object_pin_pages(obj)) {
253 			i915_gem_object_put(obj);
254 			kfree(order);
255 			break;
256 		}
257 
258 		for (n = 0; n < count; n++) {
259 			u64 addr = hole_start + order[n] * BIT_ULL(size);
260 
261 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
262 
263 			if (igt_timeout(end_time,
264 					"%s timed out before %d/%d\n",
265 					__func__, n, count)) {
266 				hole_end = hole_start; /* quit */
267 				break;
268 			}
269 
270 			if (vm->allocate_va_range &&
271 			    vm->allocate_va_range(vm, addr, BIT_ULL(size)))
272 				break;
273 
274 			mock_vma.pages = obj->mm.pages;
275 			mock_vma.node.size = BIT_ULL(size);
276 			mock_vma.node.start = addr;
277 
278 			intel_runtime_pm_get(i915);
279 			vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
280 			intel_runtime_pm_put(i915);
281 		}
282 		count = n;
283 
284 		i915_random_reorder(order, count, &prng);
285 		for (n = 0; n < count; n++) {
286 			u64 addr = hole_start + order[n] * BIT_ULL(size);
287 
288 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
289 			vm->clear_range(vm, addr, BIT_ULL(size));
290 		}
291 
292 		i915_gem_object_unpin_pages(obj);
293 		i915_gem_object_put(obj);
294 
295 		kfree(order);
296 	}
297 
298 	return 0;
299 }
300 
301 static void close_object_list(struct list_head *objects,
302 			      struct i915_address_space *vm)
303 {
304 	struct drm_i915_gem_object *obj, *on;
305 	int ignored;
306 
307 	list_for_each_entry_safe(obj, on, objects, st_link) {
308 		struct i915_vma *vma;
309 
310 		vma = i915_vma_instance(obj, vm, NULL);
311 		if (!IS_ERR(vma))
312 			ignored = i915_vma_unbind(vma);
313 		/* Only ppgtt vma may be closed before the object is freed */
314 		if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
315 			i915_vma_close(vma);
316 
317 		list_del(&obj->st_link);
318 		i915_gem_object_put(obj);
319 	}
320 }
321 
322 static int fill_hole(struct drm_i915_private *i915,
323 		     struct i915_address_space *vm,
324 		     u64 hole_start, u64 hole_end,
325 		     unsigned long end_time)
326 {
327 	const u64 hole_size = hole_end - hole_start;
328 	struct drm_i915_gem_object *obj;
329 	const unsigned long max_pages =
330 		min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
331 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
332 	unsigned long npages, prime, flags;
333 	struct i915_vma *vma;
334 	LIST_HEAD(objects);
335 	int err;
336 
337 	/* Try binding many VMA working inwards from either edge */
338 
339 	flags = PIN_OFFSET_FIXED | PIN_USER;
340 	if (i915_is_ggtt(vm))
341 		flags |= PIN_GLOBAL;
342 
343 	for_each_prime_number_from(prime, 2, max_step) {
344 		for (npages = 1; npages <= max_pages; npages *= prime) {
345 			const u64 full_size = npages << PAGE_SHIFT;
346 			const struct {
347 				const char *name;
348 				u64 offset;
349 				int step;
350 			} phases[] = {
351 				{ "top-down", hole_end, -1, },
352 				{ "bottom-up", hole_start, 1, },
353 				{ }
354 			}, *p;
355 
356 			obj = fake_dma_object(i915, full_size);
357 			if (IS_ERR(obj))
358 				break;
359 
360 			list_add(&obj->st_link, &objects);
361 
362 			/* Align differing sized objects against the edges, and
363 			 * check we don't walk off into the void when binding
364 			 * them into the GTT.
365 			 */
366 			for (p = phases; p->name; p++) {
367 				u64 offset;
368 
369 				offset = p->offset;
370 				list_for_each_entry(obj, &objects, st_link) {
371 					vma = i915_vma_instance(obj, vm, NULL);
372 					if (IS_ERR(vma))
373 						continue;
374 
375 					if (p->step < 0) {
376 						if (offset < hole_start + obj->base.size)
377 							break;
378 						offset -= obj->base.size;
379 					}
380 
381 					err = i915_vma_pin(vma, 0, 0, offset | flags);
382 					if (err) {
383 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
384 						       __func__, p->name, err, npages, prime, offset);
385 						goto err;
386 					}
387 
388 					if (!drm_mm_node_allocated(&vma->node) ||
389 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
390 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
391 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
392 						       offset);
393 						err = -EINVAL;
394 						goto err;
395 					}
396 
397 					i915_vma_unpin(vma);
398 
399 					if (p->step > 0) {
400 						if (offset + obj->base.size > hole_end)
401 							break;
402 						offset += obj->base.size;
403 					}
404 				}
405 
406 				offset = p->offset;
407 				list_for_each_entry(obj, &objects, st_link) {
408 					vma = i915_vma_instance(obj, vm, NULL);
409 					if (IS_ERR(vma))
410 						continue;
411 
412 					if (p->step < 0) {
413 						if (offset < hole_start + obj->base.size)
414 							break;
415 						offset -= obj->base.size;
416 					}
417 
418 					if (!drm_mm_node_allocated(&vma->node) ||
419 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
420 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
421 						       __func__, p->name, vma->node.start, vma->node.size,
422 						       offset);
423 						err = -EINVAL;
424 						goto err;
425 					}
426 
427 					err = i915_vma_unbind(vma);
428 					if (err) {
429 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
430 						       __func__, p->name, vma->node.start, vma->node.size,
431 						       err);
432 						goto err;
433 					}
434 
435 					if (p->step > 0) {
436 						if (offset + obj->base.size > hole_end)
437 							break;
438 						offset += obj->base.size;
439 					}
440 				}
441 
442 				offset = p->offset;
443 				list_for_each_entry_reverse(obj, &objects, st_link) {
444 					vma = i915_vma_instance(obj, vm, NULL);
445 					if (IS_ERR(vma))
446 						continue;
447 
448 					if (p->step < 0) {
449 						if (offset < hole_start + obj->base.size)
450 							break;
451 						offset -= obj->base.size;
452 					}
453 
454 					err = i915_vma_pin(vma, 0, 0, offset | flags);
455 					if (err) {
456 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
457 						       __func__, p->name, err, npages, prime, offset);
458 						goto err;
459 					}
460 
461 					if (!drm_mm_node_allocated(&vma->node) ||
462 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
463 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
464 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
465 						       offset);
466 						err = -EINVAL;
467 						goto err;
468 					}
469 
470 					i915_vma_unpin(vma);
471 
472 					if (p->step > 0) {
473 						if (offset + obj->base.size > hole_end)
474 							break;
475 						offset += obj->base.size;
476 					}
477 				}
478 
479 				offset = p->offset;
480 				list_for_each_entry_reverse(obj, &objects, st_link) {
481 					vma = i915_vma_instance(obj, vm, NULL);
482 					if (IS_ERR(vma))
483 						continue;
484 
485 					if (p->step < 0) {
486 						if (offset < hole_start + obj->base.size)
487 							break;
488 						offset -= obj->base.size;
489 					}
490 
491 					if (!drm_mm_node_allocated(&vma->node) ||
492 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
493 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
494 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
495 						       offset);
496 						err = -EINVAL;
497 						goto err;
498 					}
499 
500 					err = i915_vma_unbind(vma);
501 					if (err) {
502 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
503 						       __func__, p->name, vma->node.start, vma->node.size,
504 						       err);
505 						goto err;
506 					}
507 
508 					if (p->step > 0) {
509 						if (offset + obj->base.size > hole_end)
510 							break;
511 						offset += obj->base.size;
512 					}
513 				}
514 			}
515 
516 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
517 					__func__, npages, prime)) {
518 				err = -EINTR;
519 				goto err;
520 			}
521 		}
522 
523 		close_object_list(&objects, vm);
524 	}
525 
526 	return 0;
527 
528 err:
529 	close_object_list(&objects, vm);
530 	return err;
531 }
532 
533 static int walk_hole(struct drm_i915_private *i915,
534 		     struct i915_address_space *vm,
535 		     u64 hole_start, u64 hole_end,
536 		     unsigned long end_time)
537 {
538 	const u64 hole_size = hole_end - hole_start;
539 	const unsigned long max_pages =
540 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
541 	unsigned long flags;
542 	u64 size;
543 
544 	/* Try binding a single VMA in different positions within the hole */
545 
546 	flags = PIN_OFFSET_FIXED | PIN_USER;
547 	if (i915_is_ggtt(vm))
548 		flags |= PIN_GLOBAL;
549 
550 	for_each_prime_number_from(size, 1, max_pages) {
551 		struct drm_i915_gem_object *obj;
552 		struct i915_vma *vma;
553 		u64 addr;
554 		int err = 0;
555 
556 		obj = fake_dma_object(i915, size << PAGE_SHIFT);
557 		if (IS_ERR(obj))
558 			break;
559 
560 		vma = i915_vma_instance(obj, vm, NULL);
561 		if (IS_ERR(vma)) {
562 			err = PTR_ERR(vma);
563 			goto err_put;
564 		}
565 
566 		for (addr = hole_start;
567 		     addr + obj->base.size < hole_end;
568 		     addr += obj->base.size) {
569 			err = i915_vma_pin(vma, 0, 0, addr | flags);
570 			if (err) {
571 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
572 				       __func__, addr, vma->size,
573 				       hole_start, hole_end, err);
574 				goto err_close;
575 			}
576 			i915_vma_unpin(vma);
577 
578 			if (!drm_mm_node_allocated(&vma->node) ||
579 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
580 				pr_err("%s incorrect at %llx + %llx\n",
581 				       __func__, addr, vma->size);
582 				err = -EINVAL;
583 				goto err_close;
584 			}
585 
586 			err = i915_vma_unbind(vma);
587 			if (err) {
588 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
589 				       __func__, addr, vma->size, err);
590 				goto err_close;
591 			}
592 
593 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
594 
595 			if (igt_timeout(end_time,
596 					"%s timed out at %llx\n",
597 					__func__, addr)) {
598 				err = -EINTR;
599 				goto err_close;
600 			}
601 		}
602 
603 err_close:
604 		if (!i915_vma_is_ggtt(vma))
605 			i915_vma_close(vma);
606 err_put:
607 		i915_gem_object_put(obj);
608 		if (err)
609 			return err;
610 	}
611 
612 	return 0;
613 }
614 
615 static int pot_hole(struct drm_i915_private *i915,
616 		    struct i915_address_space *vm,
617 		    u64 hole_start, u64 hole_end,
618 		    unsigned long end_time)
619 {
620 	struct drm_i915_gem_object *obj;
621 	struct i915_vma *vma;
622 	unsigned long flags;
623 	unsigned int pot;
624 	int err = 0;
625 
626 	flags = PIN_OFFSET_FIXED | PIN_USER;
627 	if (i915_is_ggtt(vm))
628 		flags |= PIN_GLOBAL;
629 
630 	obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
631 	if (IS_ERR(obj))
632 		return PTR_ERR(obj);
633 
634 	vma = i915_vma_instance(obj, vm, NULL);
635 	if (IS_ERR(vma)) {
636 		err = PTR_ERR(vma);
637 		goto err_obj;
638 	}
639 
640 	/* Insert a pair of pages across every pot boundary within the hole */
641 	for (pot = fls64(hole_end - 1) - 1;
642 	     pot > ilog2(2 * I915_GTT_PAGE_SIZE);
643 	     pot--) {
644 		u64 step = BIT_ULL(pot);
645 		u64 addr;
646 
647 		for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
648 		     addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
649 		     addr += step) {
650 			err = i915_vma_pin(vma, 0, 0, addr | flags);
651 			if (err) {
652 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
653 				       __func__,
654 				       addr,
655 				       hole_start, hole_end,
656 				       err);
657 				goto err;
658 			}
659 
660 			if (!drm_mm_node_allocated(&vma->node) ||
661 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
662 				pr_err("%s incorrect at %llx + %llx\n",
663 				       __func__, addr, vma->size);
664 				i915_vma_unpin(vma);
665 				err = i915_vma_unbind(vma);
666 				err = -EINVAL;
667 				goto err;
668 			}
669 
670 			i915_vma_unpin(vma);
671 			err = i915_vma_unbind(vma);
672 			GEM_BUG_ON(err);
673 		}
674 
675 		if (igt_timeout(end_time,
676 				"%s timed out after %d/%d\n",
677 				__func__, pot, fls64(hole_end - 1) - 1)) {
678 			err = -EINTR;
679 			goto err;
680 		}
681 	}
682 
683 err:
684 	if (!i915_vma_is_ggtt(vma))
685 		i915_vma_close(vma);
686 err_obj:
687 	i915_gem_object_put(obj);
688 	return err;
689 }
690 
691 static int drunk_hole(struct drm_i915_private *i915,
692 		      struct i915_address_space *vm,
693 		      u64 hole_start, u64 hole_end,
694 		      unsigned long end_time)
695 {
696 	I915_RND_STATE(prng);
697 	unsigned int size;
698 	unsigned long flags;
699 
700 	flags = PIN_OFFSET_FIXED | PIN_USER;
701 	if (i915_is_ggtt(vm))
702 		flags |= PIN_GLOBAL;
703 
704 	/* Keep creating larger objects until one cannot fit into the hole */
705 	for (size = 12; (hole_end - hole_start) >> size; size++) {
706 		struct drm_i915_gem_object *obj;
707 		unsigned int *order, count, n;
708 		struct i915_vma *vma;
709 		u64 hole_size;
710 		int err = -ENODEV;
711 
712 		hole_size = (hole_end - hole_start) >> size;
713 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
714 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
715 		count = hole_size >> 1;
716 		if (!count) {
717 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
718 				 __func__, hole_start, hole_end, size, hole_size);
719 			break;
720 		}
721 
722 		do {
723 			order = i915_random_order(count, &prng);
724 			if (order)
725 				break;
726 		} while (count >>= 1);
727 		if (!count)
728 			return -ENOMEM;
729 		GEM_BUG_ON(!order);
730 
731 		/* Ignore allocation failures (i.e. don't report them as
732 		 * a test failure) as we are purposefully allocating very
733 		 * large objects without checking that we have sufficient
734 		 * memory. We expect to hit -ENOMEM.
735 		 */
736 
737 		obj = fake_dma_object(i915, BIT_ULL(size));
738 		if (IS_ERR(obj)) {
739 			kfree(order);
740 			break;
741 		}
742 
743 		vma = i915_vma_instance(obj, vm, NULL);
744 		if (IS_ERR(vma)) {
745 			err = PTR_ERR(vma);
746 			goto err_obj;
747 		}
748 
749 		GEM_BUG_ON(vma->size != BIT_ULL(size));
750 
751 		for (n = 0; n < count; n++) {
752 			u64 addr = hole_start + order[n] * BIT_ULL(size);
753 
754 			err = i915_vma_pin(vma, 0, 0, addr | flags);
755 			if (err) {
756 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
757 				       __func__,
758 				       addr, BIT_ULL(size),
759 				       hole_start, hole_end,
760 				       err);
761 				goto err;
762 			}
763 
764 			if (!drm_mm_node_allocated(&vma->node) ||
765 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
766 				pr_err("%s incorrect at %llx + %llx\n",
767 				       __func__, addr, BIT_ULL(size));
768 				i915_vma_unpin(vma);
769 				err = i915_vma_unbind(vma);
770 				err = -EINVAL;
771 				goto err;
772 			}
773 
774 			i915_vma_unpin(vma);
775 			err = i915_vma_unbind(vma);
776 			GEM_BUG_ON(err);
777 
778 			if (igt_timeout(end_time,
779 					"%s timed out after %d/%d\n",
780 					__func__, n, count)) {
781 				err = -EINTR;
782 				goto err;
783 			}
784 		}
785 
786 err:
787 		if (!i915_vma_is_ggtt(vma))
788 			i915_vma_close(vma);
789 err_obj:
790 		i915_gem_object_put(obj);
791 		kfree(order);
792 		if (err)
793 			return err;
794 	}
795 
796 	return 0;
797 }
798 
799 static int __shrink_hole(struct drm_i915_private *i915,
800 			 struct i915_address_space *vm,
801 			 u64 hole_start, u64 hole_end,
802 			 unsigned long end_time)
803 {
804 	struct drm_i915_gem_object *obj;
805 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
806 	unsigned int order = 12;
807 	LIST_HEAD(objects);
808 	int err = 0;
809 	u64 addr;
810 
811 	/* Keep creating larger objects until one cannot fit into the hole */
812 	for (addr = hole_start; addr < hole_end; ) {
813 		struct i915_vma *vma;
814 		u64 size = BIT_ULL(order++);
815 
816 		size = min(size, hole_end - addr);
817 		obj = fake_dma_object(i915, size);
818 		if (IS_ERR(obj)) {
819 			err = PTR_ERR(obj);
820 			break;
821 		}
822 
823 		list_add(&obj->st_link, &objects);
824 
825 		vma = i915_vma_instance(obj, vm, NULL);
826 		if (IS_ERR(vma)) {
827 			err = PTR_ERR(vma);
828 			break;
829 		}
830 
831 		GEM_BUG_ON(vma->size != size);
832 
833 		err = i915_vma_pin(vma, 0, 0, addr | flags);
834 		if (err) {
835 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
836 			       __func__, addr, size, hole_start, hole_end, err);
837 			break;
838 		}
839 
840 		if (!drm_mm_node_allocated(&vma->node) ||
841 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
842 			pr_err("%s incorrect at %llx + %llx\n",
843 			       __func__, addr, size);
844 			i915_vma_unpin(vma);
845 			err = i915_vma_unbind(vma);
846 			err = -EINVAL;
847 			break;
848 		}
849 
850 		i915_vma_unpin(vma);
851 		addr += size;
852 
853 		if (igt_timeout(end_time,
854 				"%s timed out at ofset %llx [%llx - %llx]\n",
855 				__func__, addr, hole_start, hole_end)) {
856 			err = -EINTR;
857 			break;
858 		}
859 	}
860 
861 	close_object_list(&objects, vm);
862 	return err;
863 }
864 
865 static int shrink_hole(struct drm_i915_private *i915,
866 		       struct i915_address_space *vm,
867 		       u64 hole_start, u64 hole_end,
868 		       unsigned long end_time)
869 {
870 	unsigned long prime;
871 	int err;
872 
873 	vm->fault_attr.probability = 999;
874 	atomic_set(&vm->fault_attr.times, -1);
875 
876 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
877 		vm->fault_attr.interval = prime;
878 		err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
879 		if (err)
880 			break;
881 	}
882 
883 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
884 
885 	return err;
886 }
887 
888 static int shrink_boom(struct drm_i915_private *i915,
889 		       struct i915_address_space *vm,
890 		       u64 hole_start, u64 hole_end,
891 		       unsigned long end_time)
892 {
893 	unsigned int sizes[] = { SZ_2M, SZ_1G };
894 	struct drm_i915_gem_object *purge;
895 	struct drm_i915_gem_object *explode;
896 	int err;
897 	int i;
898 
899 	/*
900 	 * Catch the case which shrink_hole seems to miss. The setup here
901 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
902 	 * ensuring that all vma assiocated with the respective pd/pdp are
903 	 * unpinned at the time.
904 	 */
905 
906 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
907 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
908 		unsigned int size = sizes[i];
909 		struct i915_vma *vma;
910 
911 		purge = fake_dma_object(i915, size);
912 		if (IS_ERR(purge))
913 			return PTR_ERR(purge);
914 
915 		vma = i915_vma_instance(purge, vm, NULL);
916 		if (IS_ERR(vma)) {
917 			err = PTR_ERR(vma);
918 			goto err_purge;
919 		}
920 
921 		err = i915_vma_pin(vma, 0, 0, flags);
922 		if (err)
923 			goto err_purge;
924 
925 		/* Should now be ripe for purging */
926 		i915_vma_unpin(vma);
927 
928 		explode = fake_dma_object(i915, size);
929 		if (IS_ERR(explode)) {
930 			err = PTR_ERR(explode);
931 			goto err_purge;
932 		}
933 
934 		vm->fault_attr.probability = 100;
935 		vm->fault_attr.interval = 1;
936 		atomic_set(&vm->fault_attr.times, -1);
937 
938 		vma = i915_vma_instance(explode, vm, NULL);
939 		if (IS_ERR(vma)) {
940 			err = PTR_ERR(vma);
941 			goto err_explode;
942 		}
943 
944 		err = i915_vma_pin(vma, 0, 0, flags | size);
945 		if (err)
946 			goto err_explode;
947 
948 		i915_vma_unpin(vma);
949 
950 		i915_gem_object_put(purge);
951 		i915_gem_object_put(explode);
952 
953 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
954 	}
955 
956 	return 0;
957 
958 err_explode:
959 	i915_gem_object_put(explode);
960 err_purge:
961 	i915_gem_object_put(purge);
962 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
963 	return err;
964 }
965 
966 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
967 			  int (*func)(struct drm_i915_private *i915,
968 				      struct i915_address_space *vm,
969 				      u64 hole_start, u64 hole_end,
970 				      unsigned long end_time))
971 {
972 	struct drm_file *file;
973 	struct i915_hw_ppgtt *ppgtt;
974 	IGT_TIMEOUT(end_time);
975 	int err;
976 
977 	if (!USES_FULL_PPGTT(dev_priv))
978 		return 0;
979 
980 	file = mock_file(dev_priv);
981 	if (IS_ERR(file))
982 		return PTR_ERR(file);
983 
984 	mutex_lock(&dev_priv->drm.struct_mutex);
985 	ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
986 	if (IS_ERR(ppgtt)) {
987 		err = PTR_ERR(ppgtt);
988 		goto out_unlock;
989 	}
990 	GEM_BUG_ON(offset_in_page(ppgtt->base.total));
991 	GEM_BUG_ON(ppgtt->base.closed);
992 
993 	err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
994 
995 	i915_ppgtt_close(&ppgtt->base);
996 	i915_ppgtt_put(ppgtt);
997 out_unlock:
998 	mutex_unlock(&dev_priv->drm.struct_mutex);
999 
1000 	mock_file_free(dev_priv, file);
1001 	return err;
1002 }
1003 
1004 static int igt_ppgtt_fill(void *arg)
1005 {
1006 	return exercise_ppgtt(arg, fill_hole);
1007 }
1008 
1009 static int igt_ppgtt_walk(void *arg)
1010 {
1011 	return exercise_ppgtt(arg, walk_hole);
1012 }
1013 
1014 static int igt_ppgtt_pot(void *arg)
1015 {
1016 	return exercise_ppgtt(arg, pot_hole);
1017 }
1018 
1019 static int igt_ppgtt_drunk(void *arg)
1020 {
1021 	return exercise_ppgtt(arg, drunk_hole);
1022 }
1023 
1024 static int igt_ppgtt_lowlevel(void *arg)
1025 {
1026 	return exercise_ppgtt(arg, lowlevel_hole);
1027 }
1028 
1029 static int igt_ppgtt_shrink(void *arg)
1030 {
1031 	return exercise_ppgtt(arg, shrink_hole);
1032 }
1033 
1034 static int igt_ppgtt_shrink_boom(void *arg)
1035 {
1036 	return exercise_ppgtt(arg, shrink_boom);
1037 }
1038 
1039 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1040 {
1041 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1042 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1043 
1044 	if (a->start < b->start)
1045 		return -1;
1046 	else
1047 		return 1;
1048 }
1049 
1050 static int exercise_ggtt(struct drm_i915_private *i915,
1051 			 int (*func)(struct drm_i915_private *i915,
1052 				     struct i915_address_space *vm,
1053 				     u64 hole_start, u64 hole_end,
1054 				     unsigned long end_time))
1055 {
1056 	struct i915_ggtt *ggtt = &i915->ggtt;
1057 	u64 hole_start, hole_end, last = 0;
1058 	struct drm_mm_node *node;
1059 	IGT_TIMEOUT(end_time);
1060 	int err = 0;
1061 
1062 	mutex_lock(&i915->drm.struct_mutex);
1063 restart:
1064 	list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
1065 	drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
1066 		if (hole_start < last)
1067 			continue;
1068 
1069 		if (ggtt->base.mm.color_adjust)
1070 			ggtt->base.mm.color_adjust(node, 0,
1071 						   &hole_start, &hole_end);
1072 		if (hole_start >= hole_end)
1073 			continue;
1074 
1075 		err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
1076 		if (err)
1077 			break;
1078 
1079 		/* As we have manipulated the drm_mm, the list may be corrupt */
1080 		last = hole_end;
1081 		goto restart;
1082 	}
1083 	mutex_unlock(&i915->drm.struct_mutex);
1084 
1085 	return err;
1086 }
1087 
1088 static int igt_ggtt_fill(void *arg)
1089 {
1090 	return exercise_ggtt(arg, fill_hole);
1091 }
1092 
1093 static int igt_ggtt_walk(void *arg)
1094 {
1095 	return exercise_ggtt(arg, walk_hole);
1096 }
1097 
1098 static int igt_ggtt_pot(void *arg)
1099 {
1100 	return exercise_ggtt(arg, pot_hole);
1101 }
1102 
1103 static int igt_ggtt_drunk(void *arg)
1104 {
1105 	return exercise_ggtt(arg, drunk_hole);
1106 }
1107 
1108 static int igt_ggtt_lowlevel(void *arg)
1109 {
1110 	return exercise_ggtt(arg, lowlevel_hole);
1111 }
1112 
1113 static int igt_ggtt_page(void *arg)
1114 {
1115 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1116 	I915_RND_STATE(prng);
1117 	struct drm_i915_private *i915 = arg;
1118 	struct i915_ggtt *ggtt = &i915->ggtt;
1119 	struct drm_i915_gem_object *obj;
1120 	struct drm_mm_node tmp;
1121 	unsigned int *order, n;
1122 	int err;
1123 
1124 	mutex_lock(&i915->drm.struct_mutex);
1125 
1126 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1127 	if (IS_ERR(obj)) {
1128 		err = PTR_ERR(obj);
1129 		goto out_unlock;
1130 	}
1131 
1132 	err = i915_gem_object_pin_pages(obj);
1133 	if (err)
1134 		goto out_free;
1135 
1136 	memset(&tmp, 0, sizeof(tmp));
1137 	err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
1138 					  count * PAGE_SIZE, 0,
1139 					  I915_COLOR_UNEVICTABLE,
1140 					  0, ggtt->mappable_end,
1141 					  DRM_MM_INSERT_LOW);
1142 	if (err)
1143 		goto out_unpin;
1144 
1145 	intel_runtime_pm_get(i915);
1146 
1147 	for (n = 0; n < count; n++) {
1148 		u64 offset = tmp.start + n * PAGE_SIZE;
1149 
1150 		ggtt->base.insert_page(&ggtt->base,
1151 				       i915_gem_object_get_dma_address(obj, 0),
1152 				       offset, I915_CACHE_NONE, 0);
1153 	}
1154 
1155 	order = i915_random_order(count, &prng);
1156 	if (!order) {
1157 		err = -ENOMEM;
1158 		goto out_remove;
1159 	}
1160 
1161 	for (n = 0; n < count; n++) {
1162 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1163 		u32 __iomem *vaddr;
1164 
1165 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1166 		iowrite32(n, vaddr + n);
1167 		io_mapping_unmap_atomic(vaddr);
1168 	}
1169 	i915_gem_flush_ggtt_writes(i915);
1170 
1171 	i915_random_reorder(order, count, &prng);
1172 	for (n = 0; n < count; n++) {
1173 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1174 		u32 __iomem *vaddr;
1175 		u32 val;
1176 
1177 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1178 		val = ioread32(vaddr + n);
1179 		io_mapping_unmap_atomic(vaddr);
1180 
1181 		if (val != n) {
1182 			pr_err("insert page failed: found %d, expected %d\n",
1183 			       val, n);
1184 			err = -EINVAL;
1185 			break;
1186 		}
1187 	}
1188 
1189 	kfree(order);
1190 out_remove:
1191 	ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size);
1192 	intel_runtime_pm_put(i915);
1193 	drm_mm_remove_node(&tmp);
1194 out_unpin:
1195 	i915_gem_object_unpin_pages(obj);
1196 out_free:
1197 	i915_gem_object_put(obj);
1198 out_unlock:
1199 	mutex_unlock(&i915->drm.struct_mutex);
1200 	return err;
1201 }
1202 
1203 static void track_vma_bind(struct i915_vma *vma)
1204 {
1205 	struct drm_i915_gem_object *obj = vma->obj;
1206 
1207 	obj->bind_count++; /* track for eviction later */
1208 	__i915_gem_object_pin_pages(obj);
1209 
1210 	vma->pages = obj->mm.pages;
1211 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1212 }
1213 
1214 static int exercise_mock(struct drm_i915_private *i915,
1215 			 int (*func)(struct drm_i915_private *i915,
1216 				     struct i915_address_space *vm,
1217 				     u64 hole_start, u64 hole_end,
1218 				     unsigned long end_time))
1219 {
1220 	struct i915_gem_context *ctx;
1221 	struct i915_hw_ppgtt *ppgtt;
1222 	IGT_TIMEOUT(end_time);
1223 	int err;
1224 
1225 	ctx = mock_context(i915, "mock");
1226 	if (!ctx)
1227 		return -ENOMEM;
1228 
1229 	ppgtt = ctx->ppgtt;
1230 	GEM_BUG_ON(!ppgtt);
1231 
1232 	err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
1233 
1234 	mock_context_close(ctx);
1235 	return err;
1236 }
1237 
1238 static int igt_mock_fill(void *arg)
1239 {
1240 	return exercise_mock(arg, fill_hole);
1241 }
1242 
1243 static int igt_mock_walk(void *arg)
1244 {
1245 	return exercise_mock(arg, walk_hole);
1246 }
1247 
1248 static int igt_mock_pot(void *arg)
1249 {
1250 	return exercise_mock(arg, pot_hole);
1251 }
1252 
1253 static int igt_mock_drunk(void *arg)
1254 {
1255 	return exercise_mock(arg, drunk_hole);
1256 }
1257 
1258 static int igt_gtt_reserve(void *arg)
1259 {
1260 	struct drm_i915_private *i915 = arg;
1261 	struct drm_i915_gem_object *obj, *on;
1262 	LIST_HEAD(objects);
1263 	u64 total;
1264 	int err = -ENODEV;
1265 
1266 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1267 	 * for the node, and evicts if it has to. So our test checks that
1268 	 * it can give us the requsted space and prevent overlaps.
1269 	 */
1270 
1271 	/* Start by filling the GGTT */
1272 	for (total = 0;
1273 	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1274 	     total += 2*I915_GTT_PAGE_SIZE) {
1275 		struct i915_vma *vma;
1276 
1277 		obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1278 		if (IS_ERR(obj)) {
1279 			err = PTR_ERR(obj);
1280 			goto out;
1281 		}
1282 
1283 		err = i915_gem_object_pin_pages(obj);
1284 		if (err) {
1285 			i915_gem_object_put(obj);
1286 			goto out;
1287 		}
1288 
1289 		list_add(&obj->st_link, &objects);
1290 
1291 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1292 		if (IS_ERR(vma)) {
1293 			err = PTR_ERR(vma);
1294 			goto out;
1295 		}
1296 
1297 		err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1298 					   obj->base.size,
1299 					   total,
1300 					   obj->cache_level,
1301 					   0);
1302 		if (err) {
1303 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1304 			       total, i915->ggtt.base.total, err);
1305 			goto out;
1306 		}
1307 		track_vma_bind(vma);
1308 
1309 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1310 		if (vma->node.start != total ||
1311 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1312 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1313 			       vma->node.start, vma->node.size,
1314 			       total, 2*I915_GTT_PAGE_SIZE);
1315 			err = -EINVAL;
1316 			goto out;
1317 		}
1318 	}
1319 
1320 	/* Now we start forcing evictions */
1321 	for (total = I915_GTT_PAGE_SIZE;
1322 	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1323 	     total += 2*I915_GTT_PAGE_SIZE) {
1324 		struct i915_vma *vma;
1325 
1326 		obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1327 		if (IS_ERR(obj)) {
1328 			err = PTR_ERR(obj);
1329 			goto out;
1330 		}
1331 
1332 		err = i915_gem_object_pin_pages(obj);
1333 		if (err) {
1334 			i915_gem_object_put(obj);
1335 			goto out;
1336 		}
1337 
1338 		list_add(&obj->st_link, &objects);
1339 
1340 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1341 		if (IS_ERR(vma)) {
1342 			err = PTR_ERR(vma);
1343 			goto out;
1344 		}
1345 
1346 		err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1347 					   obj->base.size,
1348 					   total,
1349 					   obj->cache_level,
1350 					   0);
1351 		if (err) {
1352 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1353 			       total, i915->ggtt.base.total, err);
1354 			goto out;
1355 		}
1356 		track_vma_bind(vma);
1357 
1358 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1359 		if (vma->node.start != total ||
1360 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1361 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1362 			       vma->node.start, vma->node.size,
1363 			       total, 2*I915_GTT_PAGE_SIZE);
1364 			err = -EINVAL;
1365 			goto out;
1366 		}
1367 	}
1368 
1369 	/* And then try at random */
1370 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1371 		struct i915_vma *vma;
1372 		u64 offset;
1373 
1374 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1375 		if (IS_ERR(vma)) {
1376 			err = PTR_ERR(vma);
1377 			goto out;
1378 		}
1379 
1380 		err = i915_vma_unbind(vma);
1381 		if (err) {
1382 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1383 			goto out;
1384 		}
1385 
1386 		offset = random_offset(0, i915->ggtt.base.total,
1387 				       2*I915_GTT_PAGE_SIZE,
1388 				       I915_GTT_MIN_ALIGNMENT);
1389 
1390 		err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1391 					   obj->base.size,
1392 					   offset,
1393 					   obj->cache_level,
1394 					   0);
1395 		if (err) {
1396 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1397 			       total, i915->ggtt.base.total, err);
1398 			goto out;
1399 		}
1400 		track_vma_bind(vma);
1401 
1402 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1403 		if (vma->node.start != offset ||
1404 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1405 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1406 			       vma->node.start, vma->node.size,
1407 			       offset, 2*I915_GTT_PAGE_SIZE);
1408 			err = -EINVAL;
1409 			goto out;
1410 		}
1411 	}
1412 
1413 out:
1414 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1415 		i915_gem_object_unpin_pages(obj);
1416 		i915_gem_object_put(obj);
1417 	}
1418 	return err;
1419 }
1420 
1421 static int igt_gtt_insert(void *arg)
1422 {
1423 	struct drm_i915_private *i915 = arg;
1424 	struct drm_i915_gem_object *obj, *on;
1425 	struct drm_mm_node tmp = {};
1426 	const struct invalid_insert {
1427 		u64 size;
1428 		u64 alignment;
1429 		u64 start, end;
1430 	} invalid_insert[] = {
1431 		{
1432 			i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
1433 			0, i915->ggtt.base.total,
1434 		},
1435 		{
1436 			2*I915_GTT_PAGE_SIZE, 0,
1437 			0, I915_GTT_PAGE_SIZE,
1438 		},
1439 		{
1440 			-(u64)I915_GTT_PAGE_SIZE, 0,
1441 			0, 4*I915_GTT_PAGE_SIZE,
1442 		},
1443 		{
1444 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1445 			0, 4*I915_GTT_PAGE_SIZE,
1446 		},
1447 		{
1448 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1449 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1450 		},
1451 		{}
1452 	}, *ii;
1453 	LIST_HEAD(objects);
1454 	u64 total;
1455 	int err = -ENODEV;
1456 
1457 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1458 	 * to the node, evicting if required.
1459 	 */
1460 
1461 	/* Check a couple of obviously invalid requests */
1462 	for (ii = invalid_insert; ii->size; ii++) {
1463 		err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
1464 					  ii->size, ii->alignment,
1465 					  I915_COLOR_UNEVICTABLE,
1466 					  ii->start, ii->end,
1467 					  0);
1468 		if (err != -ENOSPC) {
1469 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1470 			       ii->size, ii->alignment, ii->start, ii->end,
1471 			       err);
1472 			return -EINVAL;
1473 		}
1474 	}
1475 
1476 	/* Start by filling the GGTT */
1477 	for (total = 0;
1478 	     total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1479 	     total += I915_GTT_PAGE_SIZE) {
1480 		struct i915_vma *vma;
1481 
1482 		obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
1483 		if (IS_ERR(obj)) {
1484 			err = PTR_ERR(obj);
1485 			goto out;
1486 		}
1487 
1488 		err = i915_gem_object_pin_pages(obj);
1489 		if (err) {
1490 			i915_gem_object_put(obj);
1491 			goto out;
1492 		}
1493 
1494 		list_add(&obj->st_link, &objects);
1495 
1496 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1497 		if (IS_ERR(vma)) {
1498 			err = PTR_ERR(vma);
1499 			goto out;
1500 		}
1501 
1502 		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1503 					  obj->base.size, 0, obj->cache_level,
1504 					  0, i915->ggtt.base.total,
1505 					  0);
1506 		if (err == -ENOSPC) {
1507 			/* maxed out the GGTT space */
1508 			i915_gem_object_put(obj);
1509 			break;
1510 		}
1511 		if (err) {
1512 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1513 			       total, i915->ggtt.base.total, err);
1514 			goto out;
1515 		}
1516 		track_vma_bind(vma);
1517 		__i915_vma_pin(vma);
1518 
1519 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1520 	}
1521 
1522 	list_for_each_entry(obj, &objects, st_link) {
1523 		struct i915_vma *vma;
1524 
1525 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1526 		if (IS_ERR(vma)) {
1527 			err = PTR_ERR(vma);
1528 			goto out;
1529 		}
1530 
1531 		if (!drm_mm_node_allocated(&vma->node)) {
1532 			pr_err("VMA was unexpectedly evicted!\n");
1533 			err = -EINVAL;
1534 			goto out;
1535 		}
1536 
1537 		__i915_vma_unpin(vma);
1538 	}
1539 
1540 	/* If we then reinsert, we should find the same hole */
1541 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1542 		struct i915_vma *vma;
1543 		u64 offset;
1544 
1545 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1546 		if (IS_ERR(vma)) {
1547 			err = PTR_ERR(vma);
1548 			goto out;
1549 		}
1550 
1551 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1552 		offset = vma->node.start;
1553 
1554 		err = i915_vma_unbind(vma);
1555 		if (err) {
1556 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1557 			goto out;
1558 		}
1559 
1560 		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1561 					  obj->base.size, 0, obj->cache_level,
1562 					  0, i915->ggtt.base.total,
1563 					  0);
1564 		if (err) {
1565 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1566 			       total, i915->ggtt.base.total, err);
1567 			goto out;
1568 		}
1569 		track_vma_bind(vma);
1570 
1571 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1572 		if (vma->node.start != offset) {
1573 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1574 			       offset, vma->node.start);
1575 			err = -EINVAL;
1576 			goto out;
1577 		}
1578 	}
1579 
1580 	/* And then force evictions */
1581 	for (total = 0;
1582 	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1583 	     total += 2*I915_GTT_PAGE_SIZE) {
1584 		struct i915_vma *vma;
1585 
1586 		obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
1587 		if (IS_ERR(obj)) {
1588 			err = PTR_ERR(obj);
1589 			goto out;
1590 		}
1591 
1592 		err = i915_gem_object_pin_pages(obj);
1593 		if (err) {
1594 			i915_gem_object_put(obj);
1595 			goto out;
1596 		}
1597 
1598 		list_add(&obj->st_link, &objects);
1599 
1600 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1601 		if (IS_ERR(vma)) {
1602 			err = PTR_ERR(vma);
1603 			goto out;
1604 		}
1605 
1606 		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1607 					  obj->base.size, 0, obj->cache_level,
1608 					  0, i915->ggtt.base.total,
1609 					  0);
1610 		if (err) {
1611 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1612 			       total, i915->ggtt.base.total, err);
1613 			goto out;
1614 		}
1615 		track_vma_bind(vma);
1616 
1617 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1618 	}
1619 
1620 out:
1621 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1622 		i915_gem_object_unpin_pages(obj);
1623 		i915_gem_object_put(obj);
1624 	}
1625 	return err;
1626 }
1627 
1628 int i915_gem_gtt_mock_selftests(void)
1629 {
1630 	static const struct i915_subtest tests[] = {
1631 		SUBTEST(igt_mock_drunk),
1632 		SUBTEST(igt_mock_walk),
1633 		SUBTEST(igt_mock_pot),
1634 		SUBTEST(igt_mock_fill),
1635 		SUBTEST(igt_gtt_reserve),
1636 		SUBTEST(igt_gtt_insert),
1637 	};
1638 	struct drm_i915_private *i915;
1639 	int err;
1640 
1641 	i915 = mock_gem_device();
1642 	if (!i915)
1643 		return -ENOMEM;
1644 
1645 	mutex_lock(&i915->drm.struct_mutex);
1646 	err = i915_subtests(tests, i915);
1647 	mutex_unlock(&i915->drm.struct_mutex);
1648 
1649 	drm_dev_unref(&i915->drm);
1650 	return err;
1651 }
1652 
1653 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1654 {
1655 	static const struct i915_subtest tests[] = {
1656 		SUBTEST(igt_ppgtt_alloc),
1657 		SUBTEST(igt_ppgtt_lowlevel),
1658 		SUBTEST(igt_ppgtt_drunk),
1659 		SUBTEST(igt_ppgtt_walk),
1660 		SUBTEST(igt_ppgtt_pot),
1661 		SUBTEST(igt_ppgtt_fill),
1662 		SUBTEST(igt_ppgtt_shrink),
1663 		SUBTEST(igt_ppgtt_shrink_boom),
1664 		SUBTEST(igt_ggtt_lowlevel),
1665 		SUBTEST(igt_ggtt_drunk),
1666 		SUBTEST(igt_ggtt_walk),
1667 		SUBTEST(igt_ggtt_pot),
1668 		SUBTEST(igt_ggtt_fill),
1669 		SUBTEST(igt_ggtt_page),
1670 	};
1671 
1672 	GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
1673 
1674 	return i915_subtests(tests, i915);
1675 }
1676