1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
30 
31 #include "mock_context.h"
32 #include "mock_drm.h"
33 #include "mock_gem_device.h"
34 
35 static void fake_free_pages(struct drm_i915_gem_object *obj,
36 			    struct sg_table *pages)
37 {
38 	sg_free_table(pages);
39 	kfree(pages);
40 }
41 
42 static struct sg_table *
43 fake_get_pages(struct drm_i915_gem_object *obj)
44 {
45 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
46 #define PFN_BIAS 0x1000
47 	struct sg_table *pages;
48 	struct scatterlist *sg;
49 	typeof(obj->base.size) rem;
50 
51 	pages = kmalloc(sizeof(*pages), GFP);
52 	if (!pages)
53 		return ERR_PTR(-ENOMEM);
54 
55 	rem = round_up(obj->base.size, BIT(31)) >> 31;
56 	if (sg_alloc_table(pages, rem, GFP)) {
57 		kfree(pages);
58 		return ERR_PTR(-ENOMEM);
59 	}
60 
61 	rem = obj->base.size;
62 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
63 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
64 
65 		GEM_BUG_ON(!len);
66 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
67 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
68 		sg_dma_len(sg) = len;
69 
70 		rem -= len;
71 	}
72 	GEM_BUG_ON(rem);
73 
74 	obj->mm.madv = I915_MADV_DONTNEED;
75 	return pages;
76 #undef GFP
77 }
78 
79 static void fake_put_pages(struct drm_i915_gem_object *obj,
80 			   struct sg_table *pages)
81 {
82 	fake_free_pages(obj, pages);
83 	obj->mm.dirty = false;
84 	obj->mm.madv = I915_MADV_WILLNEED;
85 }
86 
87 static const struct drm_i915_gem_object_ops fake_ops = {
88 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
89 	.get_pages = fake_get_pages,
90 	.put_pages = fake_put_pages,
91 };
92 
93 static struct drm_i915_gem_object *
94 fake_dma_object(struct drm_i915_private *i915, u64 size)
95 {
96 	struct drm_i915_gem_object *obj;
97 
98 	GEM_BUG_ON(!size);
99 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
100 
101 	if (overflows_type(size, obj->base.size))
102 		return ERR_PTR(-E2BIG);
103 
104 	obj = i915_gem_object_alloc(i915);
105 	if (!obj)
106 		goto err;
107 
108 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
109 	i915_gem_object_init(obj, &fake_ops);
110 
111 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
112 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
113 	obj->cache_level = I915_CACHE_NONE;
114 
115 	/* Preallocate the "backing storage" */
116 	if (i915_gem_object_pin_pages(obj))
117 		goto err_obj;
118 
119 	i915_gem_object_unpin_pages(obj);
120 	return obj;
121 
122 err_obj:
123 	i915_gem_object_put(obj);
124 err:
125 	return ERR_PTR(-ENOMEM);
126 }
127 
128 static int igt_ppgtt_alloc(void *arg)
129 {
130 	struct drm_i915_private *dev_priv = arg;
131 	struct i915_hw_ppgtt *ppgtt;
132 	u64 size, last;
133 	int err;
134 
135 	/* Allocate a ppggt and try to fill the entire range */
136 
137 	if (!USES_PPGTT(dev_priv))
138 		return 0;
139 
140 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
141 	if (!ppgtt)
142 		return -ENOMEM;
143 
144 	mutex_lock(&dev_priv->drm.struct_mutex);
145 	err = __hw_ppgtt_init(ppgtt, dev_priv);
146 	if (err)
147 		goto err_ppgtt;
148 
149 	if (!ppgtt->base.allocate_va_range)
150 		goto err_ppgtt_cleanup;
151 
152 	/* Check we can allocate the entire range */
153 	for (size = 4096;
154 	     size <= ppgtt->base.total;
155 	     size <<= 2) {
156 		err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
157 		if (err) {
158 			if (err == -ENOMEM) {
159 				pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
160 					size, ilog2(size));
161 				err = 0; /* virtual space too large! */
162 			}
163 			goto err_ppgtt_cleanup;
164 		}
165 
166 		ppgtt->base.clear_range(&ppgtt->base, 0, size);
167 	}
168 
169 	/* Check we can incrementally allocate the entire range */
170 	for (last = 0, size = 4096;
171 	     size <= ppgtt->base.total;
172 	     last = size, size <<= 2) {
173 		err = ppgtt->base.allocate_va_range(&ppgtt->base,
174 						    last, size - last);
175 		if (err) {
176 			if (err == -ENOMEM) {
177 				pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
178 					last, size - last, ilog2(size));
179 				err = 0; /* virtual space too large! */
180 			}
181 			goto err_ppgtt_cleanup;
182 		}
183 	}
184 
185 err_ppgtt_cleanup:
186 	ppgtt->base.cleanup(&ppgtt->base);
187 err_ppgtt:
188 	mutex_unlock(&dev_priv->drm.struct_mutex);
189 	kfree(ppgtt);
190 	return err;
191 }
192 
193 static int lowlevel_hole(struct drm_i915_private *i915,
194 			 struct i915_address_space *vm,
195 			 u64 hole_start, u64 hole_end,
196 			 unsigned long end_time)
197 {
198 	I915_RND_STATE(seed_prng);
199 	unsigned int size;
200 
201 	/* Keep creating larger objects until one cannot fit into the hole */
202 	for (size = 12; (hole_end - hole_start) >> size; size++) {
203 		I915_RND_SUBSTATE(prng, seed_prng);
204 		struct drm_i915_gem_object *obj;
205 		unsigned int *order, count, n;
206 		u64 hole_size;
207 
208 		hole_size = (hole_end - hole_start) >> size;
209 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
210 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
211 		count = hole_size;
212 		do {
213 			count >>= 1;
214 			order = i915_random_order(count, &prng);
215 		} while (!order && count);
216 		if (!order)
217 			break;
218 
219 		GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
220 		GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
221 
222 		/* Ignore allocation failures (i.e. don't report them as
223 		 * a test failure) as we are purposefully allocating very
224 		 * large objects without checking that we have sufficient
225 		 * memory. We expect to hit -ENOMEM.
226 		 */
227 
228 		obj = fake_dma_object(i915, BIT_ULL(size));
229 		if (IS_ERR(obj)) {
230 			kfree(order);
231 			break;
232 		}
233 
234 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
235 
236 		if (i915_gem_object_pin_pages(obj)) {
237 			i915_gem_object_put(obj);
238 			kfree(order);
239 			break;
240 		}
241 
242 		for (n = 0; n < count; n++) {
243 			u64 addr = hole_start + order[n] * BIT_ULL(size);
244 
245 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
246 
247 			if (igt_timeout(end_time,
248 					"%s timed out before %d/%d\n",
249 					__func__, n, count)) {
250 				hole_end = hole_start; /* quit */
251 				break;
252 			}
253 
254 			if (vm->allocate_va_range &&
255 			    vm->allocate_va_range(vm, addr, BIT_ULL(size)))
256 				break;
257 
258 			vm->insert_entries(vm, obj->mm.pages, addr,
259 					   I915_CACHE_NONE, 0);
260 		}
261 		count = n;
262 
263 		i915_random_reorder(order, count, &prng);
264 		for (n = 0; n < count; n++) {
265 			u64 addr = hole_start + order[n] * BIT_ULL(size);
266 
267 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
268 			vm->clear_range(vm, addr, BIT_ULL(size));
269 		}
270 
271 		i915_gem_object_unpin_pages(obj);
272 		i915_gem_object_put(obj);
273 
274 		kfree(order);
275 	}
276 
277 	return 0;
278 }
279 
280 static void close_object_list(struct list_head *objects,
281 			      struct i915_address_space *vm)
282 {
283 	struct drm_i915_gem_object *obj, *on;
284 	int ignored;
285 
286 	list_for_each_entry_safe(obj, on, objects, st_link) {
287 		struct i915_vma *vma;
288 
289 		vma = i915_vma_instance(obj, vm, NULL);
290 		if (!IS_ERR(vma))
291 			ignored = i915_vma_unbind(vma);
292 		/* Only ppgtt vma may be closed before the object is freed */
293 		if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
294 			i915_vma_close(vma);
295 
296 		list_del(&obj->st_link);
297 		i915_gem_object_put(obj);
298 	}
299 }
300 
301 static int fill_hole(struct drm_i915_private *i915,
302 		     struct i915_address_space *vm,
303 		     u64 hole_start, u64 hole_end,
304 		     unsigned long end_time)
305 {
306 	const u64 hole_size = hole_end - hole_start;
307 	struct drm_i915_gem_object *obj;
308 	const unsigned long max_pages =
309 		min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
310 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
311 	unsigned long npages, prime, flags;
312 	struct i915_vma *vma;
313 	LIST_HEAD(objects);
314 	int err;
315 
316 	/* Try binding many VMA working inwards from either edge */
317 
318 	flags = PIN_OFFSET_FIXED | PIN_USER;
319 	if (i915_is_ggtt(vm))
320 		flags |= PIN_GLOBAL;
321 
322 	for_each_prime_number_from(prime, 2, max_step) {
323 		for (npages = 1; npages <= max_pages; npages *= prime) {
324 			const u64 full_size = npages << PAGE_SHIFT;
325 			const struct {
326 				const char *name;
327 				u64 offset;
328 				int step;
329 			} phases[] = {
330 				{ "top-down", hole_end, -1, },
331 				{ "bottom-up", hole_start, 1, },
332 				{ }
333 			}, *p;
334 
335 			obj = fake_dma_object(i915, full_size);
336 			if (IS_ERR(obj))
337 				break;
338 
339 			list_add(&obj->st_link, &objects);
340 
341 			/* Align differing sized objects against the edges, and
342 			 * check we don't walk off into the void when binding
343 			 * them into the GTT.
344 			 */
345 			for (p = phases; p->name; p++) {
346 				u64 offset;
347 
348 				offset = p->offset;
349 				list_for_each_entry(obj, &objects, st_link) {
350 					vma = i915_vma_instance(obj, vm, NULL);
351 					if (IS_ERR(vma))
352 						continue;
353 
354 					if (p->step < 0) {
355 						if (offset < hole_start + obj->base.size)
356 							break;
357 						offset -= obj->base.size;
358 					}
359 
360 					err = i915_vma_pin(vma, 0, 0, offset | flags);
361 					if (err) {
362 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
363 						       __func__, p->name, err, npages, prime, offset);
364 						goto err;
365 					}
366 
367 					if (!drm_mm_node_allocated(&vma->node) ||
368 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
369 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
370 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
371 						       offset);
372 						err = -EINVAL;
373 						goto err;
374 					}
375 
376 					i915_vma_unpin(vma);
377 
378 					if (p->step > 0) {
379 						if (offset + obj->base.size > hole_end)
380 							break;
381 						offset += obj->base.size;
382 					}
383 				}
384 
385 				offset = p->offset;
386 				list_for_each_entry(obj, &objects, st_link) {
387 					vma = i915_vma_instance(obj, vm, NULL);
388 					if (IS_ERR(vma))
389 						continue;
390 
391 					if (p->step < 0) {
392 						if (offset < hole_start + obj->base.size)
393 							break;
394 						offset -= obj->base.size;
395 					}
396 
397 					if (!drm_mm_node_allocated(&vma->node) ||
398 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
399 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
400 						       __func__, p->name, vma->node.start, vma->node.size,
401 						       offset);
402 						err = -EINVAL;
403 						goto err;
404 					}
405 
406 					err = i915_vma_unbind(vma);
407 					if (err) {
408 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
409 						       __func__, p->name, vma->node.start, vma->node.size,
410 						       err);
411 						goto err;
412 					}
413 
414 					if (p->step > 0) {
415 						if (offset + obj->base.size > hole_end)
416 							break;
417 						offset += obj->base.size;
418 					}
419 				}
420 
421 				offset = p->offset;
422 				list_for_each_entry_reverse(obj, &objects, st_link) {
423 					vma = i915_vma_instance(obj, vm, NULL);
424 					if (IS_ERR(vma))
425 						continue;
426 
427 					if (p->step < 0) {
428 						if (offset < hole_start + obj->base.size)
429 							break;
430 						offset -= obj->base.size;
431 					}
432 
433 					err = i915_vma_pin(vma, 0, 0, offset | flags);
434 					if (err) {
435 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
436 						       __func__, p->name, err, npages, prime, offset);
437 						goto err;
438 					}
439 
440 					if (!drm_mm_node_allocated(&vma->node) ||
441 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
442 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
443 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
444 						       offset);
445 						err = -EINVAL;
446 						goto err;
447 					}
448 
449 					i915_vma_unpin(vma);
450 
451 					if (p->step > 0) {
452 						if (offset + obj->base.size > hole_end)
453 							break;
454 						offset += obj->base.size;
455 					}
456 				}
457 
458 				offset = p->offset;
459 				list_for_each_entry_reverse(obj, &objects, st_link) {
460 					vma = i915_vma_instance(obj, vm, NULL);
461 					if (IS_ERR(vma))
462 						continue;
463 
464 					if (p->step < 0) {
465 						if (offset < hole_start + obj->base.size)
466 							break;
467 						offset -= obj->base.size;
468 					}
469 
470 					if (!drm_mm_node_allocated(&vma->node) ||
471 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
472 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
473 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
474 						       offset);
475 						err = -EINVAL;
476 						goto err;
477 					}
478 
479 					err = i915_vma_unbind(vma);
480 					if (err) {
481 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
482 						       __func__, p->name, vma->node.start, vma->node.size,
483 						       err);
484 						goto err;
485 					}
486 
487 					if (p->step > 0) {
488 						if (offset + obj->base.size > hole_end)
489 							break;
490 						offset += obj->base.size;
491 					}
492 				}
493 			}
494 
495 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
496 					__func__, npages, prime)) {
497 				err = -EINTR;
498 				goto err;
499 			}
500 		}
501 
502 		close_object_list(&objects, vm);
503 	}
504 
505 	return 0;
506 
507 err:
508 	close_object_list(&objects, vm);
509 	return err;
510 }
511 
512 static int walk_hole(struct drm_i915_private *i915,
513 		     struct i915_address_space *vm,
514 		     u64 hole_start, u64 hole_end,
515 		     unsigned long end_time)
516 {
517 	const u64 hole_size = hole_end - hole_start;
518 	const unsigned long max_pages =
519 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
520 	unsigned long flags;
521 	u64 size;
522 
523 	/* Try binding a single VMA in different positions within the hole */
524 
525 	flags = PIN_OFFSET_FIXED | PIN_USER;
526 	if (i915_is_ggtt(vm))
527 		flags |= PIN_GLOBAL;
528 
529 	for_each_prime_number_from(size, 1, max_pages) {
530 		struct drm_i915_gem_object *obj;
531 		struct i915_vma *vma;
532 		u64 addr;
533 		int err = 0;
534 
535 		obj = fake_dma_object(i915, size << PAGE_SHIFT);
536 		if (IS_ERR(obj))
537 			break;
538 
539 		vma = i915_vma_instance(obj, vm, NULL);
540 		if (IS_ERR(vma)) {
541 			err = PTR_ERR(vma);
542 			goto err_put;
543 		}
544 
545 		for (addr = hole_start;
546 		     addr + obj->base.size < hole_end;
547 		     addr += obj->base.size) {
548 			err = i915_vma_pin(vma, 0, 0, addr | flags);
549 			if (err) {
550 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
551 				       __func__, addr, vma->size,
552 				       hole_start, hole_end, err);
553 				goto err_close;
554 			}
555 			i915_vma_unpin(vma);
556 
557 			if (!drm_mm_node_allocated(&vma->node) ||
558 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
559 				pr_err("%s incorrect at %llx + %llx\n",
560 				       __func__, addr, vma->size);
561 				err = -EINVAL;
562 				goto err_close;
563 			}
564 
565 			err = i915_vma_unbind(vma);
566 			if (err) {
567 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
568 				       __func__, addr, vma->size, err);
569 				goto err_close;
570 			}
571 
572 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
573 
574 			if (igt_timeout(end_time,
575 					"%s timed out at %llx\n",
576 					__func__, addr)) {
577 				err = -EINTR;
578 				goto err_close;
579 			}
580 		}
581 
582 err_close:
583 		if (!i915_vma_is_ggtt(vma))
584 			i915_vma_close(vma);
585 err_put:
586 		i915_gem_object_put(obj);
587 		if (err)
588 			return err;
589 	}
590 
591 	return 0;
592 }
593 
594 static int pot_hole(struct drm_i915_private *i915,
595 		    struct i915_address_space *vm,
596 		    u64 hole_start, u64 hole_end,
597 		    unsigned long end_time)
598 {
599 	struct drm_i915_gem_object *obj;
600 	struct i915_vma *vma;
601 	unsigned long flags;
602 	unsigned int pot;
603 	int err = 0;
604 
605 	flags = PIN_OFFSET_FIXED | PIN_USER;
606 	if (i915_is_ggtt(vm))
607 		flags |= PIN_GLOBAL;
608 
609 	obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
610 	if (IS_ERR(obj))
611 		return PTR_ERR(obj);
612 
613 	vma = i915_vma_instance(obj, vm, NULL);
614 	if (IS_ERR(vma)) {
615 		err = PTR_ERR(vma);
616 		goto err_obj;
617 	}
618 
619 	/* Insert a pair of pages across every pot boundary within the hole */
620 	for (pot = fls64(hole_end - 1) - 1;
621 	     pot > ilog2(2 * I915_GTT_PAGE_SIZE);
622 	     pot--) {
623 		u64 step = BIT_ULL(pot);
624 		u64 addr;
625 
626 		for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
627 		     addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
628 		     addr += step) {
629 			err = i915_vma_pin(vma, 0, 0, addr | flags);
630 			if (err) {
631 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
632 				       __func__,
633 				       addr,
634 				       hole_start, hole_end,
635 				       err);
636 				goto err;
637 			}
638 
639 			if (!drm_mm_node_allocated(&vma->node) ||
640 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
641 				pr_err("%s incorrect at %llx + %llx\n",
642 				       __func__, addr, vma->size);
643 				i915_vma_unpin(vma);
644 				err = i915_vma_unbind(vma);
645 				err = -EINVAL;
646 				goto err;
647 			}
648 
649 			i915_vma_unpin(vma);
650 			err = i915_vma_unbind(vma);
651 			GEM_BUG_ON(err);
652 		}
653 
654 		if (igt_timeout(end_time,
655 				"%s timed out after %d/%d\n",
656 				__func__, pot, fls64(hole_end - 1) - 1)) {
657 			err = -EINTR;
658 			goto err;
659 		}
660 	}
661 
662 err:
663 	if (!i915_vma_is_ggtt(vma))
664 		i915_vma_close(vma);
665 err_obj:
666 	i915_gem_object_put(obj);
667 	return err;
668 }
669 
670 static int drunk_hole(struct drm_i915_private *i915,
671 		      struct i915_address_space *vm,
672 		      u64 hole_start, u64 hole_end,
673 		      unsigned long end_time)
674 {
675 	I915_RND_STATE(prng);
676 	unsigned int size;
677 	unsigned long flags;
678 
679 	flags = PIN_OFFSET_FIXED | PIN_USER;
680 	if (i915_is_ggtt(vm))
681 		flags |= PIN_GLOBAL;
682 
683 	/* Keep creating larger objects until one cannot fit into the hole */
684 	for (size = 12; (hole_end - hole_start) >> size; size++) {
685 		struct drm_i915_gem_object *obj;
686 		unsigned int *order, count, n;
687 		struct i915_vma *vma;
688 		u64 hole_size;
689 		int err;
690 
691 		hole_size = (hole_end - hole_start) >> size;
692 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
693 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
694 		count = hole_size;
695 		do {
696 			count >>= 1;
697 			order = i915_random_order(count, &prng);
698 		} while (!order && count);
699 		if (!order)
700 			break;
701 
702 		/* Ignore allocation failures (i.e. don't report them as
703 		 * a test failure) as we are purposefully allocating very
704 		 * large objects without checking that we have sufficient
705 		 * memory. We expect to hit -ENOMEM.
706 		 */
707 
708 		obj = fake_dma_object(i915, BIT_ULL(size));
709 		if (IS_ERR(obj)) {
710 			kfree(order);
711 			break;
712 		}
713 
714 		vma = i915_vma_instance(obj, vm, NULL);
715 		if (IS_ERR(vma)) {
716 			err = PTR_ERR(vma);
717 			goto err_obj;
718 		}
719 
720 		GEM_BUG_ON(vma->size != BIT_ULL(size));
721 
722 		for (n = 0; n < count; n++) {
723 			u64 addr = hole_start + order[n] * BIT_ULL(size);
724 
725 			err = i915_vma_pin(vma, 0, 0, addr | flags);
726 			if (err) {
727 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
728 				       __func__,
729 				       addr, BIT_ULL(size),
730 				       hole_start, hole_end,
731 				       err);
732 				goto err;
733 			}
734 
735 			if (!drm_mm_node_allocated(&vma->node) ||
736 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
737 				pr_err("%s incorrect at %llx + %llx\n",
738 				       __func__, addr, BIT_ULL(size));
739 				i915_vma_unpin(vma);
740 				err = i915_vma_unbind(vma);
741 				err = -EINVAL;
742 				goto err;
743 			}
744 
745 			i915_vma_unpin(vma);
746 			err = i915_vma_unbind(vma);
747 			GEM_BUG_ON(err);
748 
749 			if (igt_timeout(end_time,
750 					"%s timed out after %d/%d\n",
751 					__func__, n, count)) {
752 				err = -EINTR;
753 				goto err;
754 			}
755 		}
756 
757 err:
758 		if (!i915_vma_is_ggtt(vma))
759 			i915_vma_close(vma);
760 err_obj:
761 		i915_gem_object_put(obj);
762 		kfree(order);
763 		if (err)
764 			return err;
765 	}
766 
767 	return 0;
768 }
769 
770 static int __shrink_hole(struct drm_i915_private *i915,
771 			 struct i915_address_space *vm,
772 			 u64 hole_start, u64 hole_end,
773 			 unsigned long end_time)
774 {
775 	struct drm_i915_gem_object *obj;
776 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
777 	unsigned int order = 12;
778 	LIST_HEAD(objects);
779 	int err = 0;
780 	u64 addr;
781 
782 	/* Keep creating larger objects until one cannot fit into the hole */
783 	for (addr = hole_start; addr < hole_end; ) {
784 		struct i915_vma *vma;
785 		u64 size = BIT_ULL(order++);
786 
787 		size = min(size, hole_end - addr);
788 		obj = fake_dma_object(i915, size);
789 		if (IS_ERR(obj)) {
790 			err = PTR_ERR(obj);
791 			break;
792 		}
793 
794 		list_add(&obj->st_link, &objects);
795 
796 		vma = i915_vma_instance(obj, vm, NULL);
797 		if (IS_ERR(vma)) {
798 			err = PTR_ERR(vma);
799 			break;
800 		}
801 
802 		GEM_BUG_ON(vma->size != size);
803 
804 		err = i915_vma_pin(vma, 0, 0, addr | flags);
805 		if (err) {
806 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
807 			       __func__, addr, size, hole_start, hole_end, err);
808 			break;
809 		}
810 
811 		if (!drm_mm_node_allocated(&vma->node) ||
812 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
813 			pr_err("%s incorrect at %llx + %llx\n",
814 			       __func__, addr, size);
815 			i915_vma_unpin(vma);
816 			err = i915_vma_unbind(vma);
817 			err = -EINVAL;
818 			break;
819 		}
820 
821 		i915_vma_unpin(vma);
822 		addr += size;
823 
824 		if (igt_timeout(end_time,
825 				"%s timed out at ofset %llx [%llx - %llx]\n",
826 				__func__, addr, hole_start, hole_end)) {
827 			err = -EINTR;
828 			break;
829 		}
830 	}
831 
832 	close_object_list(&objects, vm);
833 	return err;
834 }
835 
836 static int shrink_hole(struct drm_i915_private *i915,
837 		       struct i915_address_space *vm,
838 		       u64 hole_start, u64 hole_end,
839 		       unsigned long end_time)
840 {
841 	unsigned long prime;
842 	int err;
843 
844 	vm->fault_attr.probability = 999;
845 	atomic_set(&vm->fault_attr.times, -1);
846 
847 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
848 		vm->fault_attr.interval = prime;
849 		err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
850 		if (err)
851 			break;
852 	}
853 
854 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
855 
856 	return err;
857 }
858 
859 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
860 			  int (*func)(struct drm_i915_private *i915,
861 				      struct i915_address_space *vm,
862 				      u64 hole_start, u64 hole_end,
863 				      unsigned long end_time))
864 {
865 	struct drm_file *file;
866 	struct i915_hw_ppgtt *ppgtt;
867 	IGT_TIMEOUT(end_time);
868 	int err;
869 
870 	if (!USES_FULL_PPGTT(dev_priv))
871 		return 0;
872 
873 	file = mock_file(dev_priv);
874 	if (IS_ERR(file))
875 		return PTR_ERR(file);
876 
877 	mutex_lock(&dev_priv->drm.struct_mutex);
878 	ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
879 	if (IS_ERR(ppgtt)) {
880 		err = PTR_ERR(ppgtt);
881 		goto out_unlock;
882 	}
883 	GEM_BUG_ON(offset_in_page(ppgtt->base.total));
884 	GEM_BUG_ON(ppgtt->base.closed);
885 
886 	err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
887 
888 	i915_ppgtt_close(&ppgtt->base);
889 	i915_ppgtt_put(ppgtt);
890 out_unlock:
891 	mutex_unlock(&dev_priv->drm.struct_mutex);
892 
893 	mock_file_free(dev_priv, file);
894 	return err;
895 }
896 
897 static int igt_ppgtt_fill(void *arg)
898 {
899 	return exercise_ppgtt(arg, fill_hole);
900 }
901 
902 static int igt_ppgtt_walk(void *arg)
903 {
904 	return exercise_ppgtt(arg, walk_hole);
905 }
906 
907 static int igt_ppgtt_pot(void *arg)
908 {
909 	return exercise_ppgtt(arg, pot_hole);
910 }
911 
912 static int igt_ppgtt_drunk(void *arg)
913 {
914 	return exercise_ppgtt(arg, drunk_hole);
915 }
916 
917 static int igt_ppgtt_lowlevel(void *arg)
918 {
919 	return exercise_ppgtt(arg, lowlevel_hole);
920 }
921 
922 static int igt_ppgtt_shrink(void *arg)
923 {
924 	return exercise_ppgtt(arg, shrink_hole);
925 }
926 
927 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
928 {
929 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
930 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
931 
932 	if (a->start < b->start)
933 		return -1;
934 	else
935 		return 1;
936 }
937 
938 static int exercise_ggtt(struct drm_i915_private *i915,
939 			 int (*func)(struct drm_i915_private *i915,
940 				     struct i915_address_space *vm,
941 				     u64 hole_start, u64 hole_end,
942 				     unsigned long end_time))
943 {
944 	struct i915_ggtt *ggtt = &i915->ggtt;
945 	u64 hole_start, hole_end, last = 0;
946 	struct drm_mm_node *node;
947 	IGT_TIMEOUT(end_time);
948 	int err;
949 
950 	mutex_lock(&i915->drm.struct_mutex);
951 restart:
952 	list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
953 	drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
954 		if (hole_start < last)
955 			continue;
956 
957 		if (ggtt->base.mm.color_adjust)
958 			ggtt->base.mm.color_adjust(node, 0,
959 						   &hole_start, &hole_end);
960 		if (hole_start >= hole_end)
961 			continue;
962 
963 		err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
964 		if (err)
965 			break;
966 
967 		/* As we have manipulated the drm_mm, the list may be corrupt */
968 		last = hole_end;
969 		goto restart;
970 	}
971 	mutex_unlock(&i915->drm.struct_mutex);
972 
973 	return err;
974 }
975 
976 static int igt_ggtt_fill(void *arg)
977 {
978 	return exercise_ggtt(arg, fill_hole);
979 }
980 
981 static int igt_ggtt_walk(void *arg)
982 {
983 	return exercise_ggtt(arg, walk_hole);
984 }
985 
986 static int igt_ggtt_pot(void *arg)
987 {
988 	return exercise_ggtt(arg, pot_hole);
989 }
990 
991 static int igt_ggtt_drunk(void *arg)
992 {
993 	return exercise_ggtt(arg, drunk_hole);
994 }
995 
996 static int igt_ggtt_lowlevel(void *arg)
997 {
998 	return exercise_ggtt(arg, lowlevel_hole);
999 }
1000 
1001 static int igt_ggtt_page(void *arg)
1002 {
1003 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1004 	I915_RND_STATE(prng);
1005 	struct drm_i915_private *i915 = arg;
1006 	struct i915_ggtt *ggtt = &i915->ggtt;
1007 	struct drm_i915_gem_object *obj;
1008 	struct drm_mm_node tmp;
1009 	unsigned int *order, n;
1010 	int err;
1011 
1012 	mutex_lock(&i915->drm.struct_mutex);
1013 
1014 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1015 	if (IS_ERR(obj)) {
1016 		err = PTR_ERR(obj);
1017 		goto out_unlock;
1018 	}
1019 
1020 	err = i915_gem_object_pin_pages(obj);
1021 	if (err)
1022 		goto out_free;
1023 
1024 	memset(&tmp, 0, sizeof(tmp));
1025 	err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
1026 					  1024 * PAGE_SIZE, 0,
1027 					  I915_COLOR_UNEVICTABLE,
1028 					  0, ggtt->mappable_end,
1029 					  DRM_MM_INSERT_LOW);
1030 	if (err)
1031 		goto out_unpin;
1032 
1033 	order = i915_random_order(count, &prng);
1034 	if (!order) {
1035 		err = -ENOMEM;
1036 		goto out_remove;
1037 	}
1038 
1039 	for (n = 0; n < count; n++) {
1040 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1041 		u32 __iomem *vaddr;
1042 
1043 		ggtt->base.insert_page(&ggtt->base,
1044 				       i915_gem_object_get_dma_address(obj, 0),
1045 				       offset, I915_CACHE_NONE, 0);
1046 
1047 		vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
1048 		iowrite32(n, vaddr + n);
1049 		io_mapping_unmap_atomic(vaddr);
1050 
1051 		wmb();
1052 		ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1053 	}
1054 
1055 	i915_random_reorder(order, count, &prng);
1056 	for (n = 0; n < count; n++) {
1057 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1058 		u32 __iomem *vaddr;
1059 		u32 val;
1060 
1061 		ggtt->base.insert_page(&ggtt->base,
1062 				       i915_gem_object_get_dma_address(obj, 0),
1063 				       offset, I915_CACHE_NONE, 0);
1064 
1065 		vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
1066 		val = ioread32(vaddr + n);
1067 		io_mapping_unmap_atomic(vaddr);
1068 
1069 		ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1070 
1071 		if (val != n) {
1072 			pr_err("insert page failed: found %d, expected %d\n",
1073 			       val, n);
1074 			err = -EINVAL;
1075 			break;
1076 		}
1077 	}
1078 
1079 	kfree(order);
1080 out_remove:
1081 	drm_mm_remove_node(&tmp);
1082 out_unpin:
1083 	i915_gem_object_unpin_pages(obj);
1084 out_free:
1085 	i915_gem_object_put(obj);
1086 out_unlock:
1087 	mutex_unlock(&i915->drm.struct_mutex);
1088 	return err;
1089 }
1090 
1091 static void track_vma_bind(struct i915_vma *vma)
1092 {
1093 	struct drm_i915_gem_object *obj = vma->obj;
1094 
1095 	obj->bind_count++; /* track for eviction later */
1096 	__i915_gem_object_pin_pages(obj);
1097 
1098 	vma->pages = obj->mm.pages;
1099 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1100 }
1101 
1102 static int exercise_mock(struct drm_i915_private *i915,
1103 			 int (*func)(struct drm_i915_private *i915,
1104 				     struct i915_address_space *vm,
1105 				     u64 hole_start, u64 hole_end,
1106 				     unsigned long end_time))
1107 {
1108 	struct i915_gem_context *ctx;
1109 	struct i915_hw_ppgtt *ppgtt;
1110 	IGT_TIMEOUT(end_time);
1111 	int err;
1112 
1113 	ctx = mock_context(i915, "mock");
1114 	if (!ctx)
1115 		return -ENOMEM;
1116 
1117 	ppgtt = ctx->ppgtt;
1118 	GEM_BUG_ON(!ppgtt);
1119 
1120 	err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
1121 
1122 	mock_context_close(ctx);
1123 	return err;
1124 }
1125 
1126 static int igt_mock_fill(void *arg)
1127 {
1128 	return exercise_mock(arg, fill_hole);
1129 }
1130 
1131 static int igt_mock_walk(void *arg)
1132 {
1133 	return exercise_mock(arg, walk_hole);
1134 }
1135 
1136 static int igt_mock_pot(void *arg)
1137 {
1138 	return exercise_mock(arg, pot_hole);
1139 }
1140 
1141 static int igt_mock_drunk(void *arg)
1142 {
1143 	return exercise_mock(arg, drunk_hole);
1144 }
1145 
1146 static int igt_gtt_reserve(void *arg)
1147 {
1148 	struct drm_i915_private *i915 = arg;
1149 	struct drm_i915_gem_object *obj, *on;
1150 	LIST_HEAD(objects);
1151 	u64 total;
1152 	int err;
1153 
1154 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1155 	 * for the node, and evicts if it has to. So our test checks that
1156 	 * it can give us the requsted space and prevent overlaps.
1157 	 */
1158 
1159 	/* Start by filling the GGTT */
1160 	for (total = 0;
1161 	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1162 	     total += 2*I915_GTT_PAGE_SIZE) {
1163 		struct i915_vma *vma;
1164 
1165 		obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1166 		if (IS_ERR(obj)) {
1167 			err = PTR_ERR(obj);
1168 			goto out;
1169 		}
1170 
1171 		err = i915_gem_object_pin_pages(obj);
1172 		if (err) {
1173 			i915_gem_object_put(obj);
1174 			goto out;
1175 		}
1176 
1177 		list_add(&obj->st_link, &objects);
1178 
1179 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1180 		if (IS_ERR(vma)) {
1181 			err = PTR_ERR(vma);
1182 			goto out;
1183 		}
1184 
1185 		err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1186 					   obj->base.size,
1187 					   total,
1188 					   obj->cache_level,
1189 					   0);
1190 		if (err) {
1191 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1192 			       total, i915->ggtt.base.total, err);
1193 			goto out;
1194 		}
1195 		track_vma_bind(vma);
1196 
1197 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1198 		if (vma->node.start != total ||
1199 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1200 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1201 			       vma->node.start, vma->node.size,
1202 			       total, 2*I915_GTT_PAGE_SIZE);
1203 			err = -EINVAL;
1204 			goto out;
1205 		}
1206 	}
1207 
1208 	/* Now we start forcing evictions */
1209 	for (total = I915_GTT_PAGE_SIZE;
1210 	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1211 	     total += 2*I915_GTT_PAGE_SIZE) {
1212 		struct i915_vma *vma;
1213 
1214 		obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1215 		if (IS_ERR(obj)) {
1216 			err = PTR_ERR(obj);
1217 			goto out;
1218 		}
1219 
1220 		err = i915_gem_object_pin_pages(obj);
1221 		if (err) {
1222 			i915_gem_object_put(obj);
1223 			goto out;
1224 		}
1225 
1226 		list_add(&obj->st_link, &objects);
1227 
1228 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1229 		if (IS_ERR(vma)) {
1230 			err = PTR_ERR(vma);
1231 			goto out;
1232 		}
1233 
1234 		err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1235 					   obj->base.size,
1236 					   total,
1237 					   obj->cache_level,
1238 					   0);
1239 		if (err) {
1240 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1241 			       total, i915->ggtt.base.total, err);
1242 			goto out;
1243 		}
1244 		track_vma_bind(vma);
1245 
1246 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1247 		if (vma->node.start != total ||
1248 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1249 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1250 			       vma->node.start, vma->node.size,
1251 			       total, 2*I915_GTT_PAGE_SIZE);
1252 			err = -EINVAL;
1253 			goto out;
1254 		}
1255 	}
1256 
1257 	/* And then try at random */
1258 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1259 		struct i915_vma *vma;
1260 		u64 offset;
1261 
1262 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1263 		if (IS_ERR(vma)) {
1264 			err = PTR_ERR(vma);
1265 			goto out;
1266 		}
1267 
1268 		err = i915_vma_unbind(vma);
1269 		if (err) {
1270 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1271 			goto out;
1272 		}
1273 
1274 		offset = random_offset(0, i915->ggtt.base.total,
1275 				       2*I915_GTT_PAGE_SIZE,
1276 				       I915_GTT_MIN_ALIGNMENT);
1277 
1278 		err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1279 					   obj->base.size,
1280 					   offset,
1281 					   obj->cache_level,
1282 					   0);
1283 		if (err) {
1284 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1285 			       total, i915->ggtt.base.total, err);
1286 			goto out;
1287 		}
1288 		track_vma_bind(vma);
1289 
1290 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1291 		if (vma->node.start != offset ||
1292 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1293 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1294 			       vma->node.start, vma->node.size,
1295 			       offset, 2*I915_GTT_PAGE_SIZE);
1296 			err = -EINVAL;
1297 			goto out;
1298 		}
1299 	}
1300 
1301 out:
1302 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1303 		i915_gem_object_unpin_pages(obj);
1304 		i915_gem_object_put(obj);
1305 	}
1306 	return err;
1307 }
1308 
1309 static int igt_gtt_insert(void *arg)
1310 {
1311 	struct drm_i915_private *i915 = arg;
1312 	struct drm_i915_gem_object *obj, *on;
1313 	struct drm_mm_node tmp = {};
1314 	const struct invalid_insert {
1315 		u64 size;
1316 		u64 alignment;
1317 		u64 start, end;
1318 	} invalid_insert[] = {
1319 		{
1320 			i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
1321 			0, i915->ggtt.base.total,
1322 		},
1323 		{
1324 			2*I915_GTT_PAGE_SIZE, 0,
1325 			0, I915_GTT_PAGE_SIZE,
1326 		},
1327 		{
1328 			-(u64)I915_GTT_PAGE_SIZE, 0,
1329 			0, 4*I915_GTT_PAGE_SIZE,
1330 		},
1331 		{
1332 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1333 			0, 4*I915_GTT_PAGE_SIZE,
1334 		},
1335 		{
1336 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1337 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1338 		},
1339 		{}
1340 	}, *ii;
1341 	LIST_HEAD(objects);
1342 	u64 total;
1343 	int err;
1344 
1345 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1346 	 * to the node, evicting if required.
1347 	 */
1348 
1349 	/* Check a couple of obviously invalid requests */
1350 	for (ii = invalid_insert; ii->size; ii++) {
1351 		err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
1352 					  ii->size, ii->alignment,
1353 					  I915_COLOR_UNEVICTABLE,
1354 					  ii->start, ii->end,
1355 					  0);
1356 		if (err != -ENOSPC) {
1357 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1358 			       ii->size, ii->alignment, ii->start, ii->end,
1359 			       err);
1360 			return -EINVAL;
1361 		}
1362 	}
1363 
1364 	/* Start by filling the GGTT */
1365 	for (total = 0;
1366 	     total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1367 	     total += I915_GTT_PAGE_SIZE) {
1368 		struct i915_vma *vma;
1369 
1370 		obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
1371 		if (IS_ERR(obj)) {
1372 			err = PTR_ERR(obj);
1373 			goto out;
1374 		}
1375 
1376 		err = i915_gem_object_pin_pages(obj);
1377 		if (err) {
1378 			i915_gem_object_put(obj);
1379 			goto out;
1380 		}
1381 
1382 		list_add(&obj->st_link, &objects);
1383 
1384 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1385 		if (IS_ERR(vma)) {
1386 			err = PTR_ERR(vma);
1387 			goto out;
1388 		}
1389 
1390 		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1391 					  obj->base.size, 0, obj->cache_level,
1392 					  0, i915->ggtt.base.total,
1393 					  0);
1394 		if (err == -ENOSPC) {
1395 			/* maxed out the GGTT space */
1396 			i915_gem_object_put(obj);
1397 			break;
1398 		}
1399 		if (err) {
1400 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1401 			       total, i915->ggtt.base.total, err);
1402 			goto out;
1403 		}
1404 		track_vma_bind(vma);
1405 		__i915_vma_pin(vma);
1406 
1407 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1408 	}
1409 
1410 	list_for_each_entry(obj, &objects, st_link) {
1411 		struct i915_vma *vma;
1412 
1413 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1414 		if (IS_ERR(vma)) {
1415 			err = PTR_ERR(vma);
1416 			goto out;
1417 		}
1418 
1419 		if (!drm_mm_node_allocated(&vma->node)) {
1420 			pr_err("VMA was unexpectedly evicted!\n");
1421 			err = -EINVAL;
1422 			goto out;
1423 		}
1424 
1425 		__i915_vma_unpin(vma);
1426 	}
1427 
1428 	/* If we then reinsert, we should find the same hole */
1429 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1430 		struct i915_vma *vma;
1431 		u64 offset;
1432 
1433 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1434 		if (IS_ERR(vma)) {
1435 			err = PTR_ERR(vma);
1436 			goto out;
1437 		}
1438 
1439 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1440 		offset = vma->node.start;
1441 
1442 		err = i915_vma_unbind(vma);
1443 		if (err) {
1444 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1445 			goto out;
1446 		}
1447 
1448 		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1449 					  obj->base.size, 0, obj->cache_level,
1450 					  0, i915->ggtt.base.total,
1451 					  0);
1452 		if (err) {
1453 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1454 			       total, i915->ggtt.base.total, err);
1455 			goto out;
1456 		}
1457 		track_vma_bind(vma);
1458 
1459 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1460 		if (vma->node.start != offset) {
1461 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1462 			       offset, vma->node.start);
1463 			err = -EINVAL;
1464 			goto out;
1465 		}
1466 	}
1467 
1468 	/* And then force evictions */
1469 	for (total = 0;
1470 	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1471 	     total += 2*I915_GTT_PAGE_SIZE) {
1472 		struct i915_vma *vma;
1473 
1474 		obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
1475 		if (IS_ERR(obj)) {
1476 			err = PTR_ERR(obj);
1477 			goto out;
1478 		}
1479 
1480 		err = i915_gem_object_pin_pages(obj);
1481 		if (err) {
1482 			i915_gem_object_put(obj);
1483 			goto out;
1484 		}
1485 
1486 		list_add(&obj->st_link, &objects);
1487 
1488 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1489 		if (IS_ERR(vma)) {
1490 			err = PTR_ERR(vma);
1491 			goto out;
1492 		}
1493 
1494 		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1495 					  obj->base.size, 0, obj->cache_level,
1496 					  0, i915->ggtt.base.total,
1497 					  0);
1498 		if (err) {
1499 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1500 			       total, i915->ggtt.base.total, err);
1501 			goto out;
1502 		}
1503 		track_vma_bind(vma);
1504 
1505 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1506 	}
1507 
1508 out:
1509 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1510 		i915_gem_object_unpin_pages(obj);
1511 		i915_gem_object_put(obj);
1512 	}
1513 	return err;
1514 }
1515 
1516 int i915_gem_gtt_mock_selftests(void)
1517 {
1518 	static const struct i915_subtest tests[] = {
1519 		SUBTEST(igt_mock_drunk),
1520 		SUBTEST(igt_mock_walk),
1521 		SUBTEST(igt_mock_pot),
1522 		SUBTEST(igt_mock_fill),
1523 		SUBTEST(igt_gtt_reserve),
1524 		SUBTEST(igt_gtt_insert),
1525 	};
1526 	struct drm_i915_private *i915;
1527 	int err;
1528 
1529 	i915 = mock_gem_device();
1530 	if (!i915)
1531 		return -ENOMEM;
1532 
1533 	mutex_lock(&i915->drm.struct_mutex);
1534 	err = i915_subtests(tests, i915);
1535 	mutex_unlock(&i915->drm.struct_mutex);
1536 
1537 	drm_dev_unref(&i915->drm);
1538 	return err;
1539 }
1540 
1541 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1542 {
1543 	static const struct i915_subtest tests[] = {
1544 		SUBTEST(igt_ppgtt_alloc),
1545 		SUBTEST(igt_ppgtt_lowlevel),
1546 		SUBTEST(igt_ppgtt_drunk),
1547 		SUBTEST(igt_ppgtt_walk),
1548 		SUBTEST(igt_ppgtt_pot),
1549 		SUBTEST(igt_ppgtt_fill),
1550 		SUBTEST(igt_ppgtt_shrink),
1551 		SUBTEST(igt_ggtt_lowlevel),
1552 		SUBTEST(igt_ggtt_drunk),
1553 		SUBTEST(igt_ggtt_walk),
1554 		SUBTEST(igt_ggtt_pot),
1555 		SUBTEST(igt_ggtt_fill),
1556 		SUBTEST(igt_ggtt_page),
1557 	};
1558 
1559 	GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
1560 
1561 	return i915_subtests(tests, i915);
1562 }
1563