xref: /openbmc/linux/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c (revision 530e7a660fb795452357b36cce26b839a9a187a9)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
30 
31 #include "mock_context.h"
32 #include "mock_drm.h"
33 #include "mock_gem_device.h"
34 
35 static void fake_free_pages(struct drm_i915_gem_object *obj,
36 			    struct sg_table *pages)
37 {
38 	sg_free_table(pages);
39 	kfree(pages);
40 }
41 
42 static int fake_get_pages(struct drm_i915_gem_object *obj)
43 {
44 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
45 #define PFN_BIAS 0x1000
46 	struct sg_table *pages;
47 	struct scatterlist *sg;
48 	unsigned int sg_page_sizes;
49 	typeof(obj->base.size) rem;
50 
51 	pages = kmalloc(sizeof(*pages), GFP);
52 	if (!pages)
53 		return -ENOMEM;
54 
55 	rem = round_up(obj->base.size, BIT(31)) >> 31;
56 	if (sg_alloc_table(pages, rem, GFP)) {
57 		kfree(pages);
58 		return -ENOMEM;
59 	}
60 
61 	sg_page_sizes = 0;
62 	rem = obj->base.size;
63 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
64 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
65 
66 		GEM_BUG_ON(!len);
67 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
68 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
69 		sg_dma_len(sg) = len;
70 		sg_page_sizes |= len;
71 
72 		rem -= len;
73 	}
74 	GEM_BUG_ON(rem);
75 
76 	obj->mm.madv = I915_MADV_DONTNEED;
77 
78 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
79 
80 	return 0;
81 #undef GFP
82 }
83 
84 static void fake_put_pages(struct drm_i915_gem_object *obj,
85 			   struct sg_table *pages)
86 {
87 	fake_free_pages(obj, pages);
88 	obj->mm.dirty = false;
89 	obj->mm.madv = I915_MADV_WILLNEED;
90 }
91 
92 static const struct drm_i915_gem_object_ops fake_ops = {
93 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
94 	.get_pages = fake_get_pages,
95 	.put_pages = fake_put_pages,
96 };
97 
98 static struct drm_i915_gem_object *
99 fake_dma_object(struct drm_i915_private *i915, u64 size)
100 {
101 	struct drm_i915_gem_object *obj;
102 
103 	GEM_BUG_ON(!size);
104 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
105 
106 	if (overflows_type(size, obj->base.size))
107 		return ERR_PTR(-E2BIG);
108 
109 	obj = i915_gem_object_alloc(i915);
110 	if (!obj)
111 		goto err;
112 
113 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
114 	i915_gem_object_init(obj, &fake_ops);
115 
116 	obj->write_domain = I915_GEM_DOMAIN_CPU;
117 	obj->read_domains = I915_GEM_DOMAIN_CPU;
118 	obj->cache_level = I915_CACHE_NONE;
119 
120 	/* Preallocate the "backing storage" */
121 	if (i915_gem_object_pin_pages(obj))
122 		goto err_obj;
123 
124 	i915_gem_object_unpin_pages(obj);
125 	return obj;
126 
127 err_obj:
128 	i915_gem_object_put(obj);
129 err:
130 	return ERR_PTR(-ENOMEM);
131 }
132 
133 static int igt_ppgtt_alloc(void *arg)
134 {
135 	struct drm_i915_private *dev_priv = arg;
136 	struct i915_hw_ppgtt *ppgtt;
137 	u64 size, last;
138 	int err = 0;
139 
140 	/* Allocate a ppggt and try to fill the entire range */
141 
142 	if (!USES_PPGTT(dev_priv))
143 		return 0;
144 
145 	mutex_lock(&dev_priv->drm.struct_mutex);
146 	ppgtt = __hw_ppgtt_create(dev_priv);
147 	if (IS_ERR(ppgtt)) {
148 		err = PTR_ERR(ppgtt);
149 		goto err_unlock;
150 	}
151 
152 	if (!ppgtt->vm.allocate_va_range)
153 		goto err_ppgtt_cleanup;
154 
155 	/* Check we can allocate the entire range */
156 	for (size = 4096;
157 	     size <= ppgtt->vm.total;
158 	     size <<= 2) {
159 		err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
160 		if (err) {
161 			if (err == -ENOMEM) {
162 				pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
163 					size, ilog2(size));
164 				err = 0; /* virtual space too large! */
165 			}
166 			goto err_ppgtt_cleanup;
167 		}
168 
169 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
170 	}
171 
172 	/* Check we can incrementally allocate the entire range */
173 	for (last = 0, size = 4096;
174 	     size <= ppgtt->vm.total;
175 	     last = size, size <<= 2) {
176 		err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
177 						  last, size - last);
178 		if (err) {
179 			if (err == -ENOMEM) {
180 				pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
181 					last, size - last, ilog2(size));
182 				err = 0; /* virtual space too large! */
183 			}
184 			goto err_ppgtt_cleanup;
185 		}
186 	}
187 
188 err_ppgtt_cleanup:
189 	ppgtt->vm.cleanup(&ppgtt->vm);
190 	kfree(ppgtt);
191 err_unlock:
192 	mutex_unlock(&dev_priv->drm.struct_mutex);
193 	return err;
194 }
195 
196 static int lowlevel_hole(struct drm_i915_private *i915,
197 			 struct i915_address_space *vm,
198 			 u64 hole_start, u64 hole_end,
199 			 unsigned long end_time)
200 {
201 	I915_RND_STATE(seed_prng);
202 	unsigned int size;
203 	struct i915_vma mock_vma;
204 
205 	memset(&mock_vma, 0, sizeof(struct i915_vma));
206 
207 	/* Keep creating larger objects until one cannot fit into the hole */
208 	for (size = 12; (hole_end - hole_start) >> size; size++) {
209 		I915_RND_SUBSTATE(prng, seed_prng);
210 		struct drm_i915_gem_object *obj;
211 		unsigned int *order, count, n;
212 		u64 hole_size;
213 
214 		hole_size = (hole_end - hole_start) >> size;
215 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
216 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
217 		count = hole_size >> 1;
218 		if (!count) {
219 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
220 				 __func__, hole_start, hole_end, size, hole_size);
221 			break;
222 		}
223 
224 		do {
225 			order = i915_random_order(count, &prng);
226 			if (order)
227 				break;
228 		} while (count >>= 1);
229 		if (!count)
230 			return -ENOMEM;
231 		GEM_BUG_ON(!order);
232 
233 		GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
234 		GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
235 
236 		/* Ignore allocation failures (i.e. don't report them as
237 		 * a test failure) as we are purposefully allocating very
238 		 * large objects without checking that we have sufficient
239 		 * memory. We expect to hit -ENOMEM.
240 		 */
241 
242 		obj = fake_dma_object(i915, BIT_ULL(size));
243 		if (IS_ERR(obj)) {
244 			kfree(order);
245 			break;
246 		}
247 
248 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
249 
250 		if (i915_gem_object_pin_pages(obj)) {
251 			i915_gem_object_put(obj);
252 			kfree(order);
253 			break;
254 		}
255 
256 		for (n = 0; n < count; n++) {
257 			u64 addr = hole_start + order[n] * BIT_ULL(size);
258 
259 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
260 
261 			if (igt_timeout(end_time,
262 					"%s timed out before %d/%d\n",
263 					__func__, n, count)) {
264 				hole_end = hole_start; /* quit */
265 				break;
266 			}
267 
268 			if (vm->allocate_va_range &&
269 			    vm->allocate_va_range(vm, addr, BIT_ULL(size)))
270 				break;
271 
272 			mock_vma.pages = obj->mm.pages;
273 			mock_vma.node.size = BIT_ULL(size);
274 			mock_vma.node.start = addr;
275 
276 			intel_runtime_pm_get(i915);
277 			vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
278 			intel_runtime_pm_put(i915);
279 		}
280 		count = n;
281 
282 		i915_random_reorder(order, count, &prng);
283 		for (n = 0; n < count; n++) {
284 			u64 addr = hole_start + order[n] * BIT_ULL(size);
285 
286 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
287 			vm->clear_range(vm, addr, BIT_ULL(size));
288 		}
289 
290 		i915_gem_object_unpin_pages(obj);
291 		i915_gem_object_put(obj);
292 
293 		kfree(order);
294 	}
295 
296 	return 0;
297 }
298 
299 static void close_object_list(struct list_head *objects,
300 			      struct i915_address_space *vm)
301 {
302 	struct drm_i915_gem_object *obj, *on;
303 	int ignored;
304 
305 	list_for_each_entry_safe(obj, on, objects, st_link) {
306 		struct i915_vma *vma;
307 
308 		vma = i915_vma_instance(obj, vm, NULL);
309 		if (!IS_ERR(vma))
310 			ignored = i915_vma_unbind(vma);
311 		/* Only ppgtt vma may be closed before the object is freed */
312 		if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
313 			i915_vma_close(vma);
314 
315 		list_del(&obj->st_link);
316 		i915_gem_object_put(obj);
317 	}
318 }
319 
320 static int fill_hole(struct drm_i915_private *i915,
321 		     struct i915_address_space *vm,
322 		     u64 hole_start, u64 hole_end,
323 		     unsigned long end_time)
324 {
325 	const u64 hole_size = hole_end - hole_start;
326 	struct drm_i915_gem_object *obj;
327 	const unsigned long max_pages =
328 		min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
329 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
330 	unsigned long npages, prime, flags;
331 	struct i915_vma *vma;
332 	LIST_HEAD(objects);
333 	int err;
334 
335 	/* Try binding many VMA working inwards from either edge */
336 
337 	flags = PIN_OFFSET_FIXED | PIN_USER;
338 	if (i915_is_ggtt(vm))
339 		flags |= PIN_GLOBAL;
340 
341 	for_each_prime_number_from(prime, 2, max_step) {
342 		for (npages = 1; npages <= max_pages; npages *= prime) {
343 			const u64 full_size = npages << PAGE_SHIFT;
344 			const struct {
345 				const char *name;
346 				u64 offset;
347 				int step;
348 			} phases[] = {
349 				{ "top-down", hole_end, -1, },
350 				{ "bottom-up", hole_start, 1, },
351 				{ }
352 			}, *p;
353 
354 			obj = fake_dma_object(i915, full_size);
355 			if (IS_ERR(obj))
356 				break;
357 
358 			list_add(&obj->st_link, &objects);
359 
360 			/* Align differing sized objects against the edges, and
361 			 * check we don't walk off into the void when binding
362 			 * them into the GTT.
363 			 */
364 			for (p = phases; p->name; p++) {
365 				u64 offset;
366 
367 				offset = p->offset;
368 				list_for_each_entry(obj, &objects, st_link) {
369 					vma = i915_vma_instance(obj, vm, NULL);
370 					if (IS_ERR(vma))
371 						continue;
372 
373 					if (p->step < 0) {
374 						if (offset < hole_start + obj->base.size)
375 							break;
376 						offset -= obj->base.size;
377 					}
378 
379 					err = i915_vma_pin(vma, 0, 0, offset | flags);
380 					if (err) {
381 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
382 						       __func__, p->name, err, npages, prime, offset);
383 						goto err;
384 					}
385 
386 					if (!drm_mm_node_allocated(&vma->node) ||
387 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
388 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
389 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
390 						       offset);
391 						err = -EINVAL;
392 						goto err;
393 					}
394 
395 					i915_vma_unpin(vma);
396 
397 					if (p->step > 0) {
398 						if (offset + obj->base.size > hole_end)
399 							break;
400 						offset += obj->base.size;
401 					}
402 				}
403 
404 				offset = p->offset;
405 				list_for_each_entry(obj, &objects, st_link) {
406 					vma = i915_vma_instance(obj, vm, NULL);
407 					if (IS_ERR(vma))
408 						continue;
409 
410 					if (p->step < 0) {
411 						if (offset < hole_start + obj->base.size)
412 							break;
413 						offset -= obj->base.size;
414 					}
415 
416 					if (!drm_mm_node_allocated(&vma->node) ||
417 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
418 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
419 						       __func__, p->name, vma->node.start, vma->node.size,
420 						       offset);
421 						err = -EINVAL;
422 						goto err;
423 					}
424 
425 					err = i915_vma_unbind(vma);
426 					if (err) {
427 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
428 						       __func__, p->name, vma->node.start, vma->node.size,
429 						       err);
430 						goto err;
431 					}
432 
433 					if (p->step > 0) {
434 						if (offset + obj->base.size > hole_end)
435 							break;
436 						offset += obj->base.size;
437 					}
438 				}
439 
440 				offset = p->offset;
441 				list_for_each_entry_reverse(obj, &objects, st_link) {
442 					vma = i915_vma_instance(obj, vm, NULL);
443 					if (IS_ERR(vma))
444 						continue;
445 
446 					if (p->step < 0) {
447 						if (offset < hole_start + obj->base.size)
448 							break;
449 						offset -= obj->base.size;
450 					}
451 
452 					err = i915_vma_pin(vma, 0, 0, offset | flags);
453 					if (err) {
454 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
455 						       __func__, p->name, err, npages, prime, offset);
456 						goto err;
457 					}
458 
459 					if (!drm_mm_node_allocated(&vma->node) ||
460 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
461 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
462 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
463 						       offset);
464 						err = -EINVAL;
465 						goto err;
466 					}
467 
468 					i915_vma_unpin(vma);
469 
470 					if (p->step > 0) {
471 						if (offset + obj->base.size > hole_end)
472 							break;
473 						offset += obj->base.size;
474 					}
475 				}
476 
477 				offset = p->offset;
478 				list_for_each_entry_reverse(obj, &objects, st_link) {
479 					vma = i915_vma_instance(obj, vm, NULL);
480 					if (IS_ERR(vma))
481 						continue;
482 
483 					if (p->step < 0) {
484 						if (offset < hole_start + obj->base.size)
485 							break;
486 						offset -= obj->base.size;
487 					}
488 
489 					if (!drm_mm_node_allocated(&vma->node) ||
490 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
491 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
492 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
493 						       offset);
494 						err = -EINVAL;
495 						goto err;
496 					}
497 
498 					err = i915_vma_unbind(vma);
499 					if (err) {
500 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
501 						       __func__, p->name, vma->node.start, vma->node.size,
502 						       err);
503 						goto err;
504 					}
505 
506 					if (p->step > 0) {
507 						if (offset + obj->base.size > hole_end)
508 							break;
509 						offset += obj->base.size;
510 					}
511 				}
512 			}
513 
514 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
515 					__func__, npages, prime)) {
516 				err = -EINTR;
517 				goto err;
518 			}
519 		}
520 
521 		close_object_list(&objects, vm);
522 	}
523 
524 	return 0;
525 
526 err:
527 	close_object_list(&objects, vm);
528 	return err;
529 }
530 
531 static int walk_hole(struct drm_i915_private *i915,
532 		     struct i915_address_space *vm,
533 		     u64 hole_start, u64 hole_end,
534 		     unsigned long end_time)
535 {
536 	const u64 hole_size = hole_end - hole_start;
537 	const unsigned long max_pages =
538 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
539 	unsigned long flags;
540 	u64 size;
541 
542 	/* Try binding a single VMA in different positions within the hole */
543 
544 	flags = PIN_OFFSET_FIXED | PIN_USER;
545 	if (i915_is_ggtt(vm))
546 		flags |= PIN_GLOBAL;
547 
548 	for_each_prime_number_from(size, 1, max_pages) {
549 		struct drm_i915_gem_object *obj;
550 		struct i915_vma *vma;
551 		u64 addr;
552 		int err = 0;
553 
554 		obj = fake_dma_object(i915, size << PAGE_SHIFT);
555 		if (IS_ERR(obj))
556 			break;
557 
558 		vma = i915_vma_instance(obj, vm, NULL);
559 		if (IS_ERR(vma)) {
560 			err = PTR_ERR(vma);
561 			goto err_put;
562 		}
563 
564 		for (addr = hole_start;
565 		     addr + obj->base.size < hole_end;
566 		     addr += obj->base.size) {
567 			err = i915_vma_pin(vma, 0, 0, addr | flags);
568 			if (err) {
569 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
570 				       __func__, addr, vma->size,
571 				       hole_start, hole_end, err);
572 				goto err_close;
573 			}
574 			i915_vma_unpin(vma);
575 
576 			if (!drm_mm_node_allocated(&vma->node) ||
577 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
578 				pr_err("%s incorrect at %llx + %llx\n",
579 				       __func__, addr, vma->size);
580 				err = -EINVAL;
581 				goto err_close;
582 			}
583 
584 			err = i915_vma_unbind(vma);
585 			if (err) {
586 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
587 				       __func__, addr, vma->size, err);
588 				goto err_close;
589 			}
590 
591 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
592 
593 			if (igt_timeout(end_time,
594 					"%s timed out at %llx\n",
595 					__func__, addr)) {
596 				err = -EINTR;
597 				goto err_close;
598 			}
599 		}
600 
601 err_close:
602 		if (!i915_vma_is_ggtt(vma))
603 			i915_vma_close(vma);
604 err_put:
605 		i915_gem_object_put(obj);
606 		if (err)
607 			return err;
608 	}
609 
610 	return 0;
611 }
612 
613 static int pot_hole(struct drm_i915_private *i915,
614 		    struct i915_address_space *vm,
615 		    u64 hole_start, u64 hole_end,
616 		    unsigned long end_time)
617 {
618 	struct drm_i915_gem_object *obj;
619 	struct i915_vma *vma;
620 	unsigned long flags;
621 	unsigned int pot;
622 	int err = 0;
623 
624 	flags = PIN_OFFSET_FIXED | PIN_USER;
625 	if (i915_is_ggtt(vm))
626 		flags |= PIN_GLOBAL;
627 
628 	obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
629 	if (IS_ERR(obj))
630 		return PTR_ERR(obj);
631 
632 	vma = i915_vma_instance(obj, vm, NULL);
633 	if (IS_ERR(vma)) {
634 		err = PTR_ERR(vma);
635 		goto err_obj;
636 	}
637 
638 	/* Insert a pair of pages across every pot boundary within the hole */
639 	for (pot = fls64(hole_end - 1) - 1;
640 	     pot > ilog2(2 * I915_GTT_PAGE_SIZE);
641 	     pot--) {
642 		u64 step = BIT_ULL(pot);
643 		u64 addr;
644 
645 		for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
646 		     addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
647 		     addr += step) {
648 			err = i915_vma_pin(vma, 0, 0, addr | flags);
649 			if (err) {
650 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
651 				       __func__,
652 				       addr,
653 				       hole_start, hole_end,
654 				       err);
655 				goto err;
656 			}
657 
658 			if (!drm_mm_node_allocated(&vma->node) ||
659 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
660 				pr_err("%s incorrect at %llx + %llx\n",
661 				       __func__, addr, vma->size);
662 				i915_vma_unpin(vma);
663 				err = i915_vma_unbind(vma);
664 				err = -EINVAL;
665 				goto err;
666 			}
667 
668 			i915_vma_unpin(vma);
669 			err = i915_vma_unbind(vma);
670 			GEM_BUG_ON(err);
671 		}
672 
673 		if (igt_timeout(end_time,
674 				"%s timed out after %d/%d\n",
675 				__func__, pot, fls64(hole_end - 1) - 1)) {
676 			err = -EINTR;
677 			goto err;
678 		}
679 	}
680 
681 err:
682 	if (!i915_vma_is_ggtt(vma))
683 		i915_vma_close(vma);
684 err_obj:
685 	i915_gem_object_put(obj);
686 	return err;
687 }
688 
689 static int drunk_hole(struct drm_i915_private *i915,
690 		      struct i915_address_space *vm,
691 		      u64 hole_start, u64 hole_end,
692 		      unsigned long end_time)
693 {
694 	I915_RND_STATE(prng);
695 	unsigned int size;
696 	unsigned long flags;
697 
698 	flags = PIN_OFFSET_FIXED | PIN_USER;
699 	if (i915_is_ggtt(vm))
700 		flags |= PIN_GLOBAL;
701 
702 	/* Keep creating larger objects until one cannot fit into the hole */
703 	for (size = 12; (hole_end - hole_start) >> size; size++) {
704 		struct drm_i915_gem_object *obj;
705 		unsigned int *order, count, n;
706 		struct i915_vma *vma;
707 		u64 hole_size;
708 		int err = -ENODEV;
709 
710 		hole_size = (hole_end - hole_start) >> size;
711 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
712 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
713 		count = hole_size >> 1;
714 		if (!count) {
715 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
716 				 __func__, hole_start, hole_end, size, hole_size);
717 			break;
718 		}
719 
720 		do {
721 			order = i915_random_order(count, &prng);
722 			if (order)
723 				break;
724 		} while (count >>= 1);
725 		if (!count)
726 			return -ENOMEM;
727 		GEM_BUG_ON(!order);
728 
729 		/* Ignore allocation failures (i.e. don't report them as
730 		 * a test failure) as we are purposefully allocating very
731 		 * large objects without checking that we have sufficient
732 		 * memory. We expect to hit -ENOMEM.
733 		 */
734 
735 		obj = fake_dma_object(i915, BIT_ULL(size));
736 		if (IS_ERR(obj)) {
737 			kfree(order);
738 			break;
739 		}
740 
741 		vma = i915_vma_instance(obj, vm, NULL);
742 		if (IS_ERR(vma)) {
743 			err = PTR_ERR(vma);
744 			goto err_obj;
745 		}
746 
747 		GEM_BUG_ON(vma->size != BIT_ULL(size));
748 
749 		for (n = 0; n < count; n++) {
750 			u64 addr = hole_start + order[n] * BIT_ULL(size);
751 
752 			err = i915_vma_pin(vma, 0, 0, addr | flags);
753 			if (err) {
754 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
755 				       __func__,
756 				       addr, BIT_ULL(size),
757 				       hole_start, hole_end,
758 				       err);
759 				goto err;
760 			}
761 
762 			if (!drm_mm_node_allocated(&vma->node) ||
763 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
764 				pr_err("%s incorrect at %llx + %llx\n",
765 				       __func__, addr, BIT_ULL(size));
766 				i915_vma_unpin(vma);
767 				err = i915_vma_unbind(vma);
768 				err = -EINVAL;
769 				goto err;
770 			}
771 
772 			i915_vma_unpin(vma);
773 			err = i915_vma_unbind(vma);
774 			GEM_BUG_ON(err);
775 
776 			if (igt_timeout(end_time,
777 					"%s timed out after %d/%d\n",
778 					__func__, n, count)) {
779 				err = -EINTR;
780 				goto err;
781 			}
782 		}
783 
784 err:
785 		if (!i915_vma_is_ggtt(vma))
786 			i915_vma_close(vma);
787 err_obj:
788 		i915_gem_object_put(obj);
789 		kfree(order);
790 		if (err)
791 			return err;
792 	}
793 
794 	return 0;
795 }
796 
797 static int __shrink_hole(struct drm_i915_private *i915,
798 			 struct i915_address_space *vm,
799 			 u64 hole_start, u64 hole_end,
800 			 unsigned long end_time)
801 {
802 	struct drm_i915_gem_object *obj;
803 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
804 	unsigned int order = 12;
805 	LIST_HEAD(objects);
806 	int err = 0;
807 	u64 addr;
808 
809 	/* Keep creating larger objects until one cannot fit into the hole */
810 	for (addr = hole_start; addr < hole_end; ) {
811 		struct i915_vma *vma;
812 		u64 size = BIT_ULL(order++);
813 
814 		size = min(size, hole_end - addr);
815 		obj = fake_dma_object(i915, size);
816 		if (IS_ERR(obj)) {
817 			err = PTR_ERR(obj);
818 			break;
819 		}
820 
821 		list_add(&obj->st_link, &objects);
822 
823 		vma = i915_vma_instance(obj, vm, NULL);
824 		if (IS_ERR(vma)) {
825 			err = PTR_ERR(vma);
826 			break;
827 		}
828 
829 		GEM_BUG_ON(vma->size != size);
830 
831 		err = i915_vma_pin(vma, 0, 0, addr | flags);
832 		if (err) {
833 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
834 			       __func__, addr, size, hole_start, hole_end, err);
835 			break;
836 		}
837 
838 		if (!drm_mm_node_allocated(&vma->node) ||
839 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
840 			pr_err("%s incorrect at %llx + %llx\n",
841 			       __func__, addr, size);
842 			i915_vma_unpin(vma);
843 			err = i915_vma_unbind(vma);
844 			err = -EINVAL;
845 			break;
846 		}
847 
848 		i915_vma_unpin(vma);
849 		addr += size;
850 
851 		if (igt_timeout(end_time,
852 				"%s timed out at ofset %llx [%llx - %llx]\n",
853 				__func__, addr, hole_start, hole_end)) {
854 			err = -EINTR;
855 			break;
856 		}
857 	}
858 
859 	close_object_list(&objects, vm);
860 	return err;
861 }
862 
863 static int shrink_hole(struct drm_i915_private *i915,
864 		       struct i915_address_space *vm,
865 		       u64 hole_start, u64 hole_end,
866 		       unsigned long end_time)
867 {
868 	unsigned long prime;
869 	int err;
870 
871 	vm->fault_attr.probability = 999;
872 	atomic_set(&vm->fault_attr.times, -1);
873 
874 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
875 		vm->fault_attr.interval = prime;
876 		err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
877 		if (err)
878 			break;
879 	}
880 
881 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
882 
883 	return err;
884 }
885 
886 static int shrink_boom(struct drm_i915_private *i915,
887 		       struct i915_address_space *vm,
888 		       u64 hole_start, u64 hole_end,
889 		       unsigned long end_time)
890 {
891 	unsigned int sizes[] = { SZ_2M, SZ_1G };
892 	struct drm_i915_gem_object *purge;
893 	struct drm_i915_gem_object *explode;
894 	int err;
895 	int i;
896 
897 	/*
898 	 * Catch the case which shrink_hole seems to miss. The setup here
899 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
900 	 * ensuring that all vma assiocated with the respective pd/pdp are
901 	 * unpinned at the time.
902 	 */
903 
904 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
905 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
906 		unsigned int size = sizes[i];
907 		struct i915_vma *vma;
908 
909 		purge = fake_dma_object(i915, size);
910 		if (IS_ERR(purge))
911 			return PTR_ERR(purge);
912 
913 		vma = i915_vma_instance(purge, vm, NULL);
914 		if (IS_ERR(vma)) {
915 			err = PTR_ERR(vma);
916 			goto err_purge;
917 		}
918 
919 		err = i915_vma_pin(vma, 0, 0, flags);
920 		if (err)
921 			goto err_purge;
922 
923 		/* Should now be ripe for purging */
924 		i915_vma_unpin(vma);
925 
926 		explode = fake_dma_object(i915, size);
927 		if (IS_ERR(explode)) {
928 			err = PTR_ERR(explode);
929 			goto err_purge;
930 		}
931 
932 		vm->fault_attr.probability = 100;
933 		vm->fault_attr.interval = 1;
934 		atomic_set(&vm->fault_attr.times, -1);
935 
936 		vma = i915_vma_instance(explode, vm, NULL);
937 		if (IS_ERR(vma)) {
938 			err = PTR_ERR(vma);
939 			goto err_explode;
940 		}
941 
942 		err = i915_vma_pin(vma, 0, 0, flags | size);
943 		if (err)
944 			goto err_explode;
945 
946 		i915_vma_unpin(vma);
947 
948 		i915_gem_object_put(purge);
949 		i915_gem_object_put(explode);
950 
951 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
952 	}
953 
954 	return 0;
955 
956 err_explode:
957 	i915_gem_object_put(explode);
958 err_purge:
959 	i915_gem_object_put(purge);
960 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
961 	return err;
962 }
963 
964 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
965 			  int (*func)(struct drm_i915_private *i915,
966 				      struct i915_address_space *vm,
967 				      u64 hole_start, u64 hole_end,
968 				      unsigned long end_time))
969 {
970 	struct drm_file *file;
971 	struct i915_hw_ppgtt *ppgtt;
972 	IGT_TIMEOUT(end_time);
973 	int err;
974 
975 	if (!USES_FULL_PPGTT(dev_priv))
976 		return 0;
977 
978 	file = mock_file(dev_priv);
979 	if (IS_ERR(file))
980 		return PTR_ERR(file);
981 
982 	mutex_lock(&dev_priv->drm.struct_mutex);
983 	ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
984 	if (IS_ERR(ppgtt)) {
985 		err = PTR_ERR(ppgtt);
986 		goto out_unlock;
987 	}
988 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
989 	GEM_BUG_ON(ppgtt->vm.closed);
990 
991 	err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
992 
993 	i915_ppgtt_close(&ppgtt->vm);
994 	i915_ppgtt_put(ppgtt);
995 out_unlock:
996 	mutex_unlock(&dev_priv->drm.struct_mutex);
997 
998 	mock_file_free(dev_priv, file);
999 	return err;
1000 }
1001 
1002 static int igt_ppgtt_fill(void *arg)
1003 {
1004 	return exercise_ppgtt(arg, fill_hole);
1005 }
1006 
1007 static int igt_ppgtt_walk(void *arg)
1008 {
1009 	return exercise_ppgtt(arg, walk_hole);
1010 }
1011 
1012 static int igt_ppgtt_pot(void *arg)
1013 {
1014 	return exercise_ppgtt(arg, pot_hole);
1015 }
1016 
1017 static int igt_ppgtt_drunk(void *arg)
1018 {
1019 	return exercise_ppgtt(arg, drunk_hole);
1020 }
1021 
1022 static int igt_ppgtt_lowlevel(void *arg)
1023 {
1024 	return exercise_ppgtt(arg, lowlevel_hole);
1025 }
1026 
1027 static int igt_ppgtt_shrink(void *arg)
1028 {
1029 	return exercise_ppgtt(arg, shrink_hole);
1030 }
1031 
1032 static int igt_ppgtt_shrink_boom(void *arg)
1033 {
1034 	return exercise_ppgtt(arg, shrink_boom);
1035 }
1036 
1037 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1038 {
1039 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1040 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1041 
1042 	if (a->start < b->start)
1043 		return -1;
1044 	else
1045 		return 1;
1046 }
1047 
1048 static int exercise_ggtt(struct drm_i915_private *i915,
1049 			 int (*func)(struct drm_i915_private *i915,
1050 				     struct i915_address_space *vm,
1051 				     u64 hole_start, u64 hole_end,
1052 				     unsigned long end_time))
1053 {
1054 	struct i915_ggtt *ggtt = &i915->ggtt;
1055 	u64 hole_start, hole_end, last = 0;
1056 	struct drm_mm_node *node;
1057 	IGT_TIMEOUT(end_time);
1058 	int err = 0;
1059 
1060 	mutex_lock(&i915->drm.struct_mutex);
1061 restart:
1062 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1063 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1064 		if (hole_start < last)
1065 			continue;
1066 
1067 		if (ggtt->vm.mm.color_adjust)
1068 			ggtt->vm.mm.color_adjust(node, 0,
1069 						 &hole_start, &hole_end);
1070 		if (hole_start >= hole_end)
1071 			continue;
1072 
1073 		err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
1074 		if (err)
1075 			break;
1076 
1077 		/* As we have manipulated the drm_mm, the list may be corrupt */
1078 		last = hole_end;
1079 		goto restart;
1080 	}
1081 	mutex_unlock(&i915->drm.struct_mutex);
1082 
1083 	return err;
1084 }
1085 
1086 static int igt_ggtt_fill(void *arg)
1087 {
1088 	return exercise_ggtt(arg, fill_hole);
1089 }
1090 
1091 static int igt_ggtt_walk(void *arg)
1092 {
1093 	return exercise_ggtt(arg, walk_hole);
1094 }
1095 
1096 static int igt_ggtt_pot(void *arg)
1097 {
1098 	return exercise_ggtt(arg, pot_hole);
1099 }
1100 
1101 static int igt_ggtt_drunk(void *arg)
1102 {
1103 	return exercise_ggtt(arg, drunk_hole);
1104 }
1105 
1106 static int igt_ggtt_lowlevel(void *arg)
1107 {
1108 	return exercise_ggtt(arg, lowlevel_hole);
1109 }
1110 
1111 static int igt_ggtt_page(void *arg)
1112 {
1113 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1114 	I915_RND_STATE(prng);
1115 	struct drm_i915_private *i915 = arg;
1116 	struct i915_ggtt *ggtt = &i915->ggtt;
1117 	struct drm_i915_gem_object *obj;
1118 	struct drm_mm_node tmp;
1119 	unsigned int *order, n;
1120 	int err;
1121 
1122 	mutex_lock(&i915->drm.struct_mutex);
1123 
1124 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1125 	if (IS_ERR(obj)) {
1126 		err = PTR_ERR(obj);
1127 		goto out_unlock;
1128 	}
1129 
1130 	err = i915_gem_object_pin_pages(obj);
1131 	if (err)
1132 		goto out_free;
1133 
1134 	memset(&tmp, 0, sizeof(tmp));
1135 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1136 					  count * PAGE_SIZE, 0,
1137 					  I915_COLOR_UNEVICTABLE,
1138 					  0, ggtt->mappable_end,
1139 					  DRM_MM_INSERT_LOW);
1140 	if (err)
1141 		goto out_unpin;
1142 
1143 	intel_runtime_pm_get(i915);
1144 
1145 	for (n = 0; n < count; n++) {
1146 		u64 offset = tmp.start + n * PAGE_SIZE;
1147 
1148 		ggtt->vm.insert_page(&ggtt->vm,
1149 				     i915_gem_object_get_dma_address(obj, 0),
1150 				     offset, I915_CACHE_NONE, 0);
1151 	}
1152 
1153 	order = i915_random_order(count, &prng);
1154 	if (!order) {
1155 		err = -ENOMEM;
1156 		goto out_remove;
1157 	}
1158 
1159 	for (n = 0; n < count; n++) {
1160 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1161 		u32 __iomem *vaddr;
1162 
1163 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1164 		iowrite32(n, vaddr + n);
1165 		io_mapping_unmap_atomic(vaddr);
1166 	}
1167 	i915_gem_flush_ggtt_writes(i915);
1168 
1169 	i915_random_reorder(order, count, &prng);
1170 	for (n = 0; n < count; n++) {
1171 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1172 		u32 __iomem *vaddr;
1173 		u32 val;
1174 
1175 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1176 		val = ioread32(vaddr + n);
1177 		io_mapping_unmap_atomic(vaddr);
1178 
1179 		if (val != n) {
1180 			pr_err("insert page failed: found %d, expected %d\n",
1181 			       val, n);
1182 			err = -EINVAL;
1183 			break;
1184 		}
1185 	}
1186 
1187 	kfree(order);
1188 out_remove:
1189 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1190 	intel_runtime_pm_put(i915);
1191 	drm_mm_remove_node(&tmp);
1192 out_unpin:
1193 	i915_gem_object_unpin_pages(obj);
1194 out_free:
1195 	i915_gem_object_put(obj);
1196 out_unlock:
1197 	mutex_unlock(&i915->drm.struct_mutex);
1198 	return err;
1199 }
1200 
1201 static void track_vma_bind(struct i915_vma *vma)
1202 {
1203 	struct drm_i915_gem_object *obj = vma->obj;
1204 
1205 	obj->bind_count++; /* track for eviction later */
1206 	__i915_gem_object_pin_pages(obj);
1207 
1208 	vma->pages = obj->mm.pages;
1209 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1210 }
1211 
1212 static int exercise_mock(struct drm_i915_private *i915,
1213 			 int (*func)(struct drm_i915_private *i915,
1214 				     struct i915_address_space *vm,
1215 				     u64 hole_start, u64 hole_end,
1216 				     unsigned long end_time))
1217 {
1218 	struct i915_gem_context *ctx;
1219 	struct i915_hw_ppgtt *ppgtt;
1220 	IGT_TIMEOUT(end_time);
1221 	int err;
1222 
1223 	ctx = mock_context(i915, "mock");
1224 	if (!ctx)
1225 		return -ENOMEM;
1226 
1227 	ppgtt = ctx->ppgtt;
1228 	GEM_BUG_ON(!ppgtt);
1229 
1230 	err = func(i915, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
1231 
1232 	mock_context_close(ctx);
1233 	return err;
1234 }
1235 
1236 static int igt_mock_fill(void *arg)
1237 {
1238 	return exercise_mock(arg, fill_hole);
1239 }
1240 
1241 static int igt_mock_walk(void *arg)
1242 {
1243 	return exercise_mock(arg, walk_hole);
1244 }
1245 
1246 static int igt_mock_pot(void *arg)
1247 {
1248 	return exercise_mock(arg, pot_hole);
1249 }
1250 
1251 static int igt_mock_drunk(void *arg)
1252 {
1253 	return exercise_mock(arg, drunk_hole);
1254 }
1255 
1256 static int igt_gtt_reserve(void *arg)
1257 {
1258 	struct drm_i915_private *i915 = arg;
1259 	struct drm_i915_gem_object *obj, *on;
1260 	LIST_HEAD(objects);
1261 	u64 total;
1262 	int err = -ENODEV;
1263 
1264 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1265 	 * for the node, and evicts if it has to. So our test checks that
1266 	 * it can give us the requsted space and prevent overlaps.
1267 	 */
1268 
1269 	/* Start by filling the GGTT */
1270 	for (total = 0;
1271 	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1272 	     total += 2*I915_GTT_PAGE_SIZE) {
1273 		struct i915_vma *vma;
1274 
1275 		obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1276 		if (IS_ERR(obj)) {
1277 			err = PTR_ERR(obj);
1278 			goto out;
1279 		}
1280 
1281 		err = i915_gem_object_pin_pages(obj);
1282 		if (err) {
1283 			i915_gem_object_put(obj);
1284 			goto out;
1285 		}
1286 
1287 		list_add(&obj->st_link, &objects);
1288 
1289 		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1290 		if (IS_ERR(vma)) {
1291 			err = PTR_ERR(vma);
1292 			goto out;
1293 		}
1294 
1295 		err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1296 					   obj->base.size,
1297 					   total,
1298 					   obj->cache_level,
1299 					   0);
1300 		if (err) {
1301 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1302 			       total, i915->ggtt.vm.total, err);
1303 			goto out;
1304 		}
1305 		track_vma_bind(vma);
1306 
1307 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1308 		if (vma->node.start != total ||
1309 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1310 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1311 			       vma->node.start, vma->node.size,
1312 			       total, 2*I915_GTT_PAGE_SIZE);
1313 			err = -EINVAL;
1314 			goto out;
1315 		}
1316 	}
1317 
1318 	/* Now we start forcing evictions */
1319 	for (total = I915_GTT_PAGE_SIZE;
1320 	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1321 	     total += 2*I915_GTT_PAGE_SIZE) {
1322 		struct i915_vma *vma;
1323 
1324 		obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1325 		if (IS_ERR(obj)) {
1326 			err = PTR_ERR(obj);
1327 			goto out;
1328 		}
1329 
1330 		err = i915_gem_object_pin_pages(obj);
1331 		if (err) {
1332 			i915_gem_object_put(obj);
1333 			goto out;
1334 		}
1335 
1336 		list_add(&obj->st_link, &objects);
1337 
1338 		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1339 		if (IS_ERR(vma)) {
1340 			err = PTR_ERR(vma);
1341 			goto out;
1342 		}
1343 
1344 		err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1345 					   obj->base.size,
1346 					   total,
1347 					   obj->cache_level,
1348 					   0);
1349 		if (err) {
1350 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1351 			       total, i915->ggtt.vm.total, err);
1352 			goto out;
1353 		}
1354 		track_vma_bind(vma);
1355 
1356 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1357 		if (vma->node.start != total ||
1358 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1359 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1360 			       vma->node.start, vma->node.size,
1361 			       total, 2*I915_GTT_PAGE_SIZE);
1362 			err = -EINVAL;
1363 			goto out;
1364 		}
1365 	}
1366 
1367 	/* And then try at random */
1368 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1369 		struct i915_vma *vma;
1370 		u64 offset;
1371 
1372 		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1373 		if (IS_ERR(vma)) {
1374 			err = PTR_ERR(vma);
1375 			goto out;
1376 		}
1377 
1378 		err = i915_vma_unbind(vma);
1379 		if (err) {
1380 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1381 			goto out;
1382 		}
1383 
1384 		offset = random_offset(0, i915->ggtt.vm.total,
1385 				       2*I915_GTT_PAGE_SIZE,
1386 				       I915_GTT_MIN_ALIGNMENT);
1387 
1388 		err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1389 					   obj->base.size,
1390 					   offset,
1391 					   obj->cache_level,
1392 					   0);
1393 		if (err) {
1394 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1395 			       total, i915->ggtt.vm.total, err);
1396 			goto out;
1397 		}
1398 		track_vma_bind(vma);
1399 
1400 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1401 		if (vma->node.start != offset ||
1402 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1403 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1404 			       vma->node.start, vma->node.size,
1405 			       offset, 2*I915_GTT_PAGE_SIZE);
1406 			err = -EINVAL;
1407 			goto out;
1408 		}
1409 	}
1410 
1411 out:
1412 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1413 		i915_gem_object_unpin_pages(obj);
1414 		i915_gem_object_put(obj);
1415 	}
1416 	return err;
1417 }
1418 
1419 static int igt_gtt_insert(void *arg)
1420 {
1421 	struct drm_i915_private *i915 = arg;
1422 	struct drm_i915_gem_object *obj, *on;
1423 	struct drm_mm_node tmp = {};
1424 	const struct invalid_insert {
1425 		u64 size;
1426 		u64 alignment;
1427 		u64 start, end;
1428 	} invalid_insert[] = {
1429 		{
1430 			i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
1431 			0, i915->ggtt.vm.total,
1432 		},
1433 		{
1434 			2*I915_GTT_PAGE_SIZE, 0,
1435 			0, I915_GTT_PAGE_SIZE,
1436 		},
1437 		{
1438 			-(u64)I915_GTT_PAGE_SIZE, 0,
1439 			0, 4*I915_GTT_PAGE_SIZE,
1440 		},
1441 		{
1442 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1443 			0, 4*I915_GTT_PAGE_SIZE,
1444 		},
1445 		{
1446 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1447 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1448 		},
1449 		{}
1450 	}, *ii;
1451 	LIST_HEAD(objects);
1452 	u64 total;
1453 	int err = -ENODEV;
1454 
1455 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1456 	 * to the node, evicting if required.
1457 	 */
1458 
1459 	/* Check a couple of obviously invalid requests */
1460 	for (ii = invalid_insert; ii->size; ii++) {
1461 		err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
1462 					  ii->size, ii->alignment,
1463 					  I915_COLOR_UNEVICTABLE,
1464 					  ii->start, ii->end,
1465 					  0);
1466 		if (err != -ENOSPC) {
1467 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1468 			       ii->size, ii->alignment, ii->start, ii->end,
1469 			       err);
1470 			return -EINVAL;
1471 		}
1472 	}
1473 
1474 	/* Start by filling the GGTT */
1475 	for (total = 0;
1476 	     total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1477 	     total += I915_GTT_PAGE_SIZE) {
1478 		struct i915_vma *vma;
1479 
1480 		obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
1481 		if (IS_ERR(obj)) {
1482 			err = PTR_ERR(obj);
1483 			goto out;
1484 		}
1485 
1486 		err = i915_gem_object_pin_pages(obj);
1487 		if (err) {
1488 			i915_gem_object_put(obj);
1489 			goto out;
1490 		}
1491 
1492 		list_add(&obj->st_link, &objects);
1493 
1494 		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1495 		if (IS_ERR(vma)) {
1496 			err = PTR_ERR(vma);
1497 			goto out;
1498 		}
1499 
1500 		err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1501 					  obj->base.size, 0, obj->cache_level,
1502 					  0, i915->ggtt.vm.total,
1503 					  0);
1504 		if (err == -ENOSPC) {
1505 			/* maxed out the GGTT space */
1506 			i915_gem_object_put(obj);
1507 			break;
1508 		}
1509 		if (err) {
1510 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1511 			       total, i915->ggtt.vm.total, err);
1512 			goto out;
1513 		}
1514 		track_vma_bind(vma);
1515 		__i915_vma_pin(vma);
1516 
1517 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1518 	}
1519 
1520 	list_for_each_entry(obj, &objects, st_link) {
1521 		struct i915_vma *vma;
1522 
1523 		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1524 		if (IS_ERR(vma)) {
1525 			err = PTR_ERR(vma);
1526 			goto out;
1527 		}
1528 
1529 		if (!drm_mm_node_allocated(&vma->node)) {
1530 			pr_err("VMA was unexpectedly evicted!\n");
1531 			err = -EINVAL;
1532 			goto out;
1533 		}
1534 
1535 		__i915_vma_unpin(vma);
1536 	}
1537 
1538 	/* If we then reinsert, we should find the same hole */
1539 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1540 		struct i915_vma *vma;
1541 		u64 offset;
1542 
1543 		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1544 		if (IS_ERR(vma)) {
1545 			err = PTR_ERR(vma);
1546 			goto out;
1547 		}
1548 
1549 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1550 		offset = vma->node.start;
1551 
1552 		err = i915_vma_unbind(vma);
1553 		if (err) {
1554 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1555 			goto out;
1556 		}
1557 
1558 		err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1559 					  obj->base.size, 0, obj->cache_level,
1560 					  0, i915->ggtt.vm.total,
1561 					  0);
1562 		if (err) {
1563 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1564 			       total, i915->ggtt.vm.total, err);
1565 			goto out;
1566 		}
1567 		track_vma_bind(vma);
1568 
1569 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1570 		if (vma->node.start != offset) {
1571 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1572 			       offset, vma->node.start);
1573 			err = -EINVAL;
1574 			goto out;
1575 		}
1576 	}
1577 
1578 	/* And then force evictions */
1579 	for (total = 0;
1580 	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1581 	     total += 2*I915_GTT_PAGE_SIZE) {
1582 		struct i915_vma *vma;
1583 
1584 		obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
1585 		if (IS_ERR(obj)) {
1586 			err = PTR_ERR(obj);
1587 			goto out;
1588 		}
1589 
1590 		err = i915_gem_object_pin_pages(obj);
1591 		if (err) {
1592 			i915_gem_object_put(obj);
1593 			goto out;
1594 		}
1595 
1596 		list_add(&obj->st_link, &objects);
1597 
1598 		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1599 		if (IS_ERR(vma)) {
1600 			err = PTR_ERR(vma);
1601 			goto out;
1602 		}
1603 
1604 		err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1605 					  obj->base.size, 0, obj->cache_level,
1606 					  0, i915->ggtt.vm.total,
1607 					  0);
1608 		if (err) {
1609 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1610 			       total, i915->ggtt.vm.total, err);
1611 			goto out;
1612 		}
1613 		track_vma_bind(vma);
1614 
1615 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1616 	}
1617 
1618 out:
1619 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1620 		i915_gem_object_unpin_pages(obj);
1621 		i915_gem_object_put(obj);
1622 	}
1623 	return err;
1624 }
1625 
1626 int i915_gem_gtt_mock_selftests(void)
1627 {
1628 	static const struct i915_subtest tests[] = {
1629 		SUBTEST(igt_mock_drunk),
1630 		SUBTEST(igt_mock_walk),
1631 		SUBTEST(igt_mock_pot),
1632 		SUBTEST(igt_mock_fill),
1633 		SUBTEST(igt_gtt_reserve),
1634 		SUBTEST(igt_gtt_insert),
1635 	};
1636 	struct drm_i915_private *i915;
1637 	int err;
1638 
1639 	i915 = mock_gem_device();
1640 	if (!i915)
1641 		return -ENOMEM;
1642 
1643 	mutex_lock(&i915->drm.struct_mutex);
1644 	err = i915_subtests(tests, i915);
1645 	mutex_unlock(&i915->drm.struct_mutex);
1646 
1647 	drm_dev_unref(&i915->drm);
1648 	return err;
1649 }
1650 
1651 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1652 {
1653 	static const struct i915_subtest tests[] = {
1654 		SUBTEST(igt_ppgtt_alloc),
1655 		SUBTEST(igt_ppgtt_lowlevel),
1656 		SUBTEST(igt_ppgtt_drunk),
1657 		SUBTEST(igt_ppgtt_walk),
1658 		SUBTEST(igt_ppgtt_pot),
1659 		SUBTEST(igt_ppgtt_fill),
1660 		SUBTEST(igt_ppgtt_shrink),
1661 		SUBTEST(igt_ppgtt_shrink_boom),
1662 		SUBTEST(igt_ggtt_lowlevel),
1663 		SUBTEST(igt_ggtt_drunk),
1664 		SUBTEST(igt_ggtt_walk),
1665 		SUBTEST(igt_ggtt_pot),
1666 		SUBTEST(igt_ggtt_fill),
1667 		SUBTEST(igt_ggtt_page),
1668 	};
1669 
1670 	GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
1671 
1672 	return i915_subtests(tests, i915);
1673 }
1674