1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "i915_selftest.h"
10 
11 #include "gem/i915_gem_region.h"
12 #include "gem/i915_gem_lmem.h"
13 #include "gem/i915_gem_pm.h"
14 
15 #include "gt/intel_gt.h"
16 
17 #include "igt_gem_utils.h"
18 #include "mock_context.h"
19 
20 #include "selftests/mock_drm.h"
21 #include "selftests/mock_gem_device.h"
22 #include "selftests/mock_region.h"
23 #include "selftests/i915_random.h"
24 
25 static const unsigned int page_sizes[] = {
26 	I915_GTT_PAGE_SIZE_2M,
27 	I915_GTT_PAGE_SIZE_64K,
28 	I915_GTT_PAGE_SIZE_4K,
29 };
30 
31 static unsigned int get_largest_page_size(struct drm_i915_private *i915,
32 					  u64 rem)
33 {
34 	int i;
35 
36 	for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
37 		unsigned int page_size = page_sizes[i];
38 
39 		if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
40 			return page_size;
41 	}
42 
43 	return 0;
44 }
45 
46 static void huge_pages_free_pages(struct sg_table *st)
47 {
48 	struct scatterlist *sg;
49 
50 	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
51 		if (sg_page(sg))
52 			__free_pages(sg_page(sg), get_order(sg->length));
53 	}
54 
55 	sg_free_table(st);
56 	kfree(st);
57 }
58 
59 static int get_huge_pages(struct drm_i915_gem_object *obj)
60 {
61 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
62 	unsigned int page_mask = obj->mm.page_mask;
63 	struct sg_table *st;
64 	struct scatterlist *sg;
65 	unsigned int sg_page_sizes;
66 	u64 rem;
67 
68 	st = kmalloc(sizeof(*st), GFP);
69 	if (!st)
70 		return -ENOMEM;
71 
72 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
73 		kfree(st);
74 		return -ENOMEM;
75 	}
76 
77 	rem = obj->base.size;
78 	sg = st->sgl;
79 	st->nents = 0;
80 	sg_page_sizes = 0;
81 
82 	/*
83 	 * Our goal here is simple, we want to greedily fill the object from
84 	 * largest to smallest page-size, while ensuring that we use *every*
85 	 * page-size as per the given page-mask.
86 	 */
87 	do {
88 		unsigned int bit = ilog2(page_mask);
89 		unsigned int page_size = BIT(bit);
90 		int order = get_order(page_size);
91 
92 		do {
93 			struct page *page;
94 
95 			GEM_BUG_ON(order >= MAX_ORDER);
96 			page = alloc_pages(GFP | __GFP_ZERO, order);
97 			if (!page)
98 				goto err;
99 
100 			sg_set_page(sg, page, page_size, 0);
101 			sg_page_sizes |= page_size;
102 			st->nents++;
103 
104 			rem -= page_size;
105 			if (!rem) {
106 				sg_mark_end(sg);
107 				break;
108 			}
109 
110 			sg = __sg_next(sg);
111 		} while ((rem - ((page_size-1) & page_mask)) >= page_size);
112 
113 		page_mask &= (page_size-1);
114 	} while (page_mask);
115 
116 	if (i915_gem_gtt_prepare_pages(obj, st))
117 		goto err;
118 
119 	GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
120 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
121 
122 	return 0;
123 
124 err:
125 	sg_set_page(sg, NULL, 0, 0);
126 	sg_mark_end(sg);
127 	huge_pages_free_pages(st);
128 
129 	return -ENOMEM;
130 }
131 
132 static void put_huge_pages(struct drm_i915_gem_object *obj,
133 			   struct sg_table *pages)
134 {
135 	i915_gem_gtt_finish_pages(obj, pages);
136 	huge_pages_free_pages(pages);
137 
138 	obj->mm.dirty = false;
139 }
140 
141 static const struct drm_i915_gem_object_ops huge_page_ops = {
142 	.name = "huge-gem",
143 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
144 		 I915_GEM_OBJECT_IS_SHRINKABLE,
145 	.get_pages = get_huge_pages,
146 	.put_pages = put_huge_pages,
147 };
148 
149 static struct drm_i915_gem_object *
150 huge_pages_object(struct drm_i915_private *i915,
151 		  u64 size,
152 		  unsigned int page_mask)
153 {
154 	static struct lock_class_key lock_class;
155 	struct drm_i915_gem_object *obj;
156 
157 	GEM_BUG_ON(!size);
158 	GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
159 
160 	if (size >> PAGE_SHIFT > INT_MAX)
161 		return ERR_PTR(-E2BIG);
162 
163 	if (overflows_type(size, obj->base.size))
164 		return ERR_PTR(-E2BIG);
165 
166 	obj = i915_gem_object_alloc();
167 	if (!obj)
168 		return ERR_PTR(-ENOMEM);
169 
170 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
171 	i915_gem_object_init(obj, &huge_page_ops, &lock_class);
172 
173 	i915_gem_object_set_volatile(obj);
174 
175 	obj->write_domain = I915_GEM_DOMAIN_CPU;
176 	obj->read_domains = I915_GEM_DOMAIN_CPU;
177 	obj->cache_level = I915_CACHE_NONE;
178 
179 	obj->mm.page_mask = page_mask;
180 
181 	return obj;
182 }
183 
184 static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
185 {
186 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
187 	const u64 max_len = rounddown_pow_of_two(UINT_MAX);
188 	struct sg_table *st;
189 	struct scatterlist *sg;
190 	unsigned int sg_page_sizes;
191 	u64 rem;
192 
193 	st = kmalloc(sizeof(*st), GFP);
194 	if (!st)
195 		return -ENOMEM;
196 
197 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
198 		kfree(st);
199 		return -ENOMEM;
200 	}
201 
202 	/* Use optimal page sized chunks to fill in the sg table */
203 	rem = obj->base.size;
204 	sg = st->sgl;
205 	st->nents = 0;
206 	sg_page_sizes = 0;
207 	do {
208 		unsigned int page_size = get_largest_page_size(i915, rem);
209 		unsigned int len = min(page_size * div_u64(rem, page_size),
210 				       max_len);
211 
212 		GEM_BUG_ON(!page_size);
213 
214 		sg->offset = 0;
215 		sg->length = len;
216 		sg_dma_len(sg) = len;
217 		sg_dma_address(sg) = page_size;
218 
219 		sg_page_sizes |= len;
220 
221 		st->nents++;
222 
223 		rem -= len;
224 		if (!rem) {
225 			sg_mark_end(sg);
226 			break;
227 		}
228 
229 		sg = sg_next(sg);
230 	} while (1);
231 
232 	i915_sg_trim(st);
233 
234 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
235 
236 	return 0;
237 }
238 
239 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
240 {
241 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
242 	struct sg_table *st;
243 	struct scatterlist *sg;
244 	unsigned int page_size;
245 
246 	st = kmalloc(sizeof(*st), GFP);
247 	if (!st)
248 		return -ENOMEM;
249 
250 	if (sg_alloc_table(st, 1, GFP)) {
251 		kfree(st);
252 		return -ENOMEM;
253 	}
254 
255 	sg = st->sgl;
256 	st->nents = 1;
257 
258 	page_size = get_largest_page_size(i915, obj->base.size);
259 	GEM_BUG_ON(!page_size);
260 
261 	sg->offset = 0;
262 	sg->length = obj->base.size;
263 	sg_dma_len(sg) = obj->base.size;
264 	sg_dma_address(sg) = page_size;
265 
266 	__i915_gem_object_set_pages(obj, st, sg->length);
267 
268 	return 0;
269 #undef GFP
270 }
271 
272 static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
273 				 struct sg_table *pages)
274 {
275 	sg_free_table(pages);
276 	kfree(pages);
277 }
278 
279 static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
280 				struct sg_table *pages)
281 {
282 	fake_free_huge_pages(obj, pages);
283 	obj->mm.dirty = false;
284 }
285 
286 static const struct drm_i915_gem_object_ops fake_ops = {
287 	.name = "fake-gem",
288 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
289 	.get_pages = fake_get_huge_pages,
290 	.put_pages = fake_put_huge_pages,
291 };
292 
293 static const struct drm_i915_gem_object_ops fake_ops_single = {
294 	.name = "fake-gem",
295 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
296 	.get_pages = fake_get_huge_pages_single,
297 	.put_pages = fake_put_huge_pages,
298 };
299 
300 static struct drm_i915_gem_object *
301 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
302 {
303 	static struct lock_class_key lock_class;
304 	struct drm_i915_gem_object *obj;
305 
306 	GEM_BUG_ON(!size);
307 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
308 
309 	if (size >> PAGE_SHIFT > UINT_MAX)
310 		return ERR_PTR(-E2BIG);
311 
312 	if (overflows_type(size, obj->base.size))
313 		return ERR_PTR(-E2BIG);
314 
315 	obj = i915_gem_object_alloc();
316 	if (!obj)
317 		return ERR_PTR(-ENOMEM);
318 
319 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
320 
321 	if (single)
322 		i915_gem_object_init(obj, &fake_ops_single, &lock_class);
323 	else
324 		i915_gem_object_init(obj, &fake_ops, &lock_class);
325 
326 	i915_gem_object_set_volatile(obj);
327 
328 	obj->write_domain = I915_GEM_DOMAIN_CPU;
329 	obj->read_domains = I915_GEM_DOMAIN_CPU;
330 	obj->cache_level = I915_CACHE_NONE;
331 
332 	return obj;
333 }
334 
335 static int igt_check_page_sizes(struct i915_vma *vma)
336 {
337 	struct drm_i915_private *i915 = vma->vm->i915;
338 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
339 	struct drm_i915_gem_object *obj = vma->obj;
340 	int err;
341 
342 	/* We have to wait for the async bind to complete before our asserts */
343 	err = i915_vma_sync(vma);
344 	if (err)
345 		return err;
346 
347 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
348 		pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
349 		       vma->page_sizes.sg & ~supported, supported);
350 		err = -EINVAL;
351 	}
352 
353 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
354 		pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
355 		       vma->page_sizes.gtt & ~supported, supported);
356 		err = -EINVAL;
357 	}
358 
359 	if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
360 		pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
361 		       vma->page_sizes.phys, obj->mm.page_sizes.phys);
362 		err = -EINVAL;
363 	}
364 
365 	if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
366 		pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
367 		       vma->page_sizes.sg, obj->mm.page_sizes.sg);
368 		err = -EINVAL;
369 	}
370 
371 	if (obj->mm.page_sizes.gtt) {
372 		pr_err("obj->page_sizes.gtt(%u) should never be set\n",
373 		       obj->mm.page_sizes.gtt);
374 		err = -EINVAL;
375 	}
376 
377 	return err;
378 }
379 
380 static int igt_mock_exhaust_device_supported_pages(void *arg)
381 {
382 	struct i915_ppgtt *ppgtt = arg;
383 	struct drm_i915_private *i915 = ppgtt->vm.i915;
384 	unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
385 	struct drm_i915_gem_object *obj;
386 	struct i915_vma *vma;
387 	int i, j, single;
388 	int err;
389 
390 	/*
391 	 * Sanity check creating objects with every valid page support
392 	 * combination for our mock device.
393 	 */
394 
395 	for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
396 		unsigned int combination = 0;
397 
398 		for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
399 			if (i & BIT(j))
400 				combination |= page_sizes[j];
401 		}
402 
403 		mkwrite_device_info(i915)->page_sizes = combination;
404 
405 		for (single = 0; single <= 1; ++single) {
406 			obj = fake_huge_pages_object(i915, combination, !!single);
407 			if (IS_ERR(obj)) {
408 				err = PTR_ERR(obj);
409 				goto out_device;
410 			}
411 
412 			if (obj->base.size != combination) {
413 				pr_err("obj->base.size=%zu, expected=%u\n",
414 				       obj->base.size, combination);
415 				err = -EINVAL;
416 				goto out_put;
417 			}
418 
419 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
420 			if (IS_ERR(vma)) {
421 				err = PTR_ERR(vma);
422 				goto out_put;
423 			}
424 
425 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
426 			if (err)
427 				goto out_put;
428 
429 			err = igt_check_page_sizes(vma);
430 
431 			if (vma->page_sizes.sg != combination) {
432 				pr_err("page_sizes.sg=%u, expected=%u\n",
433 				       vma->page_sizes.sg, combination);
434 				err = -EINVAL;
435 			}
436 
437 			i915_vma_unpin(vma);
438 			i915_gem_object_put(obj);
439 
440 			if (err)
441 				goto out_device;
442 		}
443 	}
444 
445 	goto out_device;
446 
447 out_put:
448 	i915_gem_object_put(obj);
449 out_device:
450 	mkwrite_device_info(i915)->page_sizes = saved_mask;
451 
452 	return err;
453 }
454 
455 static int igt_mock_memory_region_huge_pages(void *arg)
456 {
457 	const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
458 	struct i915_ppgtt *ppgtt = arg;
459 	struct drm_i915_private *i915 = ppgtt->vm.i915;
460 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
461 	struct intel_memory_region *mem;
462 	struct drm_i915_gem_object *obj;
463 	struct i915_vma *vma;
464 	int bit;
465 	int err = 0;
466 
467 	mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
468 	if (IS_ERR(mem)) {
469 		pr_err("%s failed to create memory region\n", __func__);
470 		return PTR_ERR(mem);
471 	}
472 
473 	for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
474 		unsigned int page_size = BIT(bit);
475 		resource_size_t phys;
476 		int i;
477 
478 		for (i = 0; i < ARRAY_SIZE(flags); ++i) {
479 			obj = i915_gem_object_create_region(mem, page_size,
480 							    flags[i]);
481 			if (IS_ERR(obj)) {
482 				err = PTR_ERR(obj);
483 				goto out_region;
484 			}
485 
486 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
487 			if (IS_ERR(vma)) {
488 				err = PTR_ERR(vma);
489 				goto out_put;
490 			}
491 
492 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
493 			if (err)
494 				goto out_put;
495 
496 			err = igt_check_page_sizes(vma);
497 			if (err)
498 				goto out_unpin;
499 
500 			phys = i915_gem_object_get_dma_address(obj, 0);
501 			if (!IS_ALIGNED(phys, page_size)) {
502 				pr_err("%s addr misaligned(%pa) page_size=%u\n",
503 				       __func__, &phys, page_size);
504 				err = -EINVAL;
505 				goto out_unpin;
506 			}
507 
508 			if (vma->page_sizes.gtt != page_size) {
509 				pr_err("%s page_sizes.gtt=%u, expected=%u\n",
510 				       __func__, vma->page_sizes.gtt,
511 				       page_size);
512 				err = -EINVAL;
513 				goto out_unpin;
514 			}
515 
516 			i915_vma_unpin(vma);
517 			__i915_gem_object_put_pages(obj);
518 			i915_gem_object_put(obj);
519 		}
520 	}
521 
522 	goto out_region;
523 
524 out_unpin:
525 	i915_vma_unpin(vma);
526 out_put:
527 	i915_gem_object_put(obj);
528 out_region:
529 	intel_memory_region_put(mem);
530 	return err;
531 }
532 
533 static int igt_mock_ppgtt_misaligned_dma(void *arg)
534 {
535 	struct i915_ppgtt *ppgtt = arg;
536 	struct drm_i915_private *i915 = ppgtt->vm.i915;
537 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
538 	struct drm_i915_gem_object *obj;
539 	int bit;
540 	int err;
541 
542 	/*
543 	 * Sanity check dma misalignment for huge pages -- the dma addresses we
544 	 * insert into the paging structures need to always respect the page
545 	 * size alignment.
546 	 */
547 
548 	bit = ilog2(I915_GTT_PAGE_SIZE_64K);
549 
550 	for_each_set_bit_from(bit, &supported,
551 			      ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
552 		IGT_TIMEOUT(end_time);
553 		unsigned int page_size = BIT(bit);
554 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
555 		unsigned int offset;
556 		unsigned int size =
557 			round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
558 		struct i915_vma *vma;
559 
560 		obj = fake_huge_pages_object(i915, size, true);
561 		if (IS_ERR(obj))
562 			return PTR_ERR(obj);
563 
564 		if (obj->base.size != size) {
565 			pr_err("obj->base.size=%zu, expected=%u\n",
566 			       obj->base.size, size);
567 			err = -EINVAL;
568 			goto out_put;
569 		}
570 
571 		err = i915_gem_object_pin_pages(obj);
572 		if (err)
573 			goto out_put;
574 
575 		/* Force the page size for this object */
576 		obj->mm.page_sizes.sg = page_size;
577 
578 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
579 		if (IS_ERR(vma)) {
580 			err = PTR_ERR(vma);
581 			goto out_unpin;
582 		}
583 
584 		err = i915_vma_pin(vma, 0, 0, flags);
585 		if (err)
586 			goto out_unpin;
587 
588 
589 		err = igt_check_page_sizes(vma);
590 
591 		if (vma->page_sizes.gtt != page_size) {
592 			pr_err("page_sizes.gtt=%u, expected %u\n",
593 			       vma->page_sizes.gtt, page_size);
594 			err = -EINVAL;
595 		}
596 
597 		i915_vma_unpin(vma);
598 
599 		if (err)
600 			goto out_unpin;
601 
602 		/*
603 		 * Try all the other valid offsets until the next
604 		 * boundary -- should always fall back to using 4K
605 		 * pages.
606 		 */
607 		for (offset = 4096; offset < page_size; offset += 4096) {
608 			err = i915_vma_unbind(vma);
609 			if (err)
610 				goto out_unpin;
611 
612 			err = i915_vma_pin(vma, 0, 0, flags | offset);
613 			if (err)
614 				goto out_unpin;
615 
616 			err = igt_check_page_sizes(vma);
617 
618 			if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
619 				pr_err("page_sizes.gtt=%u, expected %llu\n",
620 				       vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
621 				err = -EINVAL;
622 			}
623 
624 			i915_vma_unpin(vma);
625 
626 			if (err)
627 				goto out_unpin;
628 
629 			if (igt_timeout(end_time,
630 					"%s timed out at offset %x with page-size %x\n",
631 					__func__, offset, page_size))
632 				break;
633 		}
634 
635 		i915_gem_object_unpin_pages(obj);
636 		__i915_gem_object_put_pages(obj);
637 		i915_gem_object_put(obj);
638 	}
639 
640 	return 0;
641 
642 out_unpin:
643 	i915_gem_object_unpin_pages(obj);
644 out_put:
645 	i915_gem_object_put(obj);
646 
647 	return err;
648 }
649 
650 static void close_object_list(struct list_head *objects,
651 			      struct i915_ppgtt *ppgtt)
652 {
653 	struct drm_i915_gem_object *obj, *on;
654 
655 	list_for_each_entry_safe(obj, on, objects, st_link) {
656 		list_del(&obj->st_link);
657 		i915_gem_object_unpin_pages(obj);
658 		__i915_gem_object_put_pages(obj);
659 		i915_gem_object_put(obj);
660 	}
661 }
662 
663 static int igt_mock_ppgtt_huge_fill(void *arg)
664 {
665 	struct i915_ppgtt *ppgtt = arg;
666 	struct drm_i915_private *i915 = ppgtt->vm.i915;
667 	unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
668 	unsigned long page_num;
669 	bool single = false;
670 	LIST_HEAD(objects);
671 	IGT_TIMEOUT(end_time);
672 	int err = -ENODEV;
673 
674 	for_each_prime_number_from(page_num, 1, max_pages) {
675 		struct drm_i915_gem_object *obj;
676 		u64 size = page_num << PAGE_SHIFT;
677 		struct i915_vma *vma;
678 		unsigned int expected_gtt = 0;
679 		int i;
680 
681 		obj = fake_huge_pages_object(i915, size, single);
682 		if (IS_ERR(obj)) {
683 			err = PTR_ERR(obj);
684 			break;
685 		}
686 
687 		if (obj->base.size != size) {
688 			pr_err("obj->base.size=%zd, expected=%llu\n",
689 			       obj->base.size, size);
690 			i915_gem_object_put(obj);
691 			err = -EINVAL;
692 			break;
693 		}
694 
695 		err = i915_gem_object_pin_pages(obj);
696 		if (err) {
697 			i915_gem_object_put(obj);
698 			break;
699 		}
700 
701 		list_add(&obj->st_link, &objects);
702 
703 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
704 		if (IS_ERR(vma)) {
705 			err = PTR_ERR(vma);
706 			break;
707 		}
708 
709 		err = i915_vma_pin(vma, 0, 0, PIN_USER);
710 		if (err)
711 			break;
712 
713 		err = igt_check_page_sizes(vma);
714 		if (err) {
715 			i915_vma_unpin(vma);
716 			break;
717 		}
718 
719 		/*
720 		 * Figure out the expected gtt page size knowing that we go from
721 		 * largest to smallest page size sg chunks, and that we align to
722 		 * the largest page size.
723 		 */
724 		for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
725 			unsigned int page_size = page_sizes[i];
726 
727 			if (HAS_PAGE_SIZES(i915, page_size) &&
728 			    size >= page_size) {
729 				expected_gtt |= page_size;
730 				size &= page_size-1;
731 			}
732 		}
733 
734 		GEM_BUG_ON(!expected_gtt);
735 		GEM_BUG_ON(size);
736 
737 		if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
738 			expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
739 
740 		i915_vma_unpin(vma);
741 
742 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
743 			if (!IS_ALIGNED(vma->node.start,
744 					I915_GTT_PAGE_SIZE_2M)) {
745 				pr_err("node.start(%llx) not aligned to 2M\n",
746 				       vma->node.start);
747 				err = -EINVAL;
748 				break;
749 			}
750 
751 			if (!IS_ALIGNED(vma->node.size,
752 					I915_GTT_PAGE_SIZE_2M)) {
753 				pr_err("node.size(%llx) not aligned to 2M\n",
754 				       vma->node.size);
755 				err = -EINVAL;
756 				break;
757 			}
758 		}
759 
760 		if (vma->page_sizes.gtt != expected_gtt) {
761 			pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
762 			       vma->page_sizes.gtt, expected_gtt,
763 			       obj->base.size, yesno(!!single));
764 			err = -EINVAL;
765 			break;
766 		}
767 
768 		if (igt_timeout(end_time,
769 				"%s timed out at size %zd\n",
770 				__func__, obj->base.size))
771 			break;
772 
773 		single = !single;
774 	}
775 
776 	close_object_list(&objects, ppgtt);
777 
778 	if (err == -ENOMEM || err == -ENOSPC)
779 		err = 0;
780 
781 	return err;
782 }
783 
784 static int igt_mock_ppgtt_64K(void *arg)
785 {
786 	struct i915_ppgtt *ppgtt = arg;
787 	struct drm_i915_private *i915 = ppgtt->vm.i915;
788 	struct drm_i915_gem_object *obj;
789 	const struct object_info {
790 		unsigned int size;
791 		unsigned int gtt;
792 		unsigned int offset;
793 	} objects[] = {
794 		/* Cases with forced padding/alignment */
795 		{
796 			.size = SZ_64K,
797 			.gtt = I915_GTT_PAGE_SIZE_64K,
798 			.offset = 0,
799 		},
800 		{
801 			.size = SZ_64K + SZ_4K,
802 			.gtt = I915_GTT_PAGE_SIZE_4K,
803 			.offset = 0,
804 		},
805 		{
806 			.size = SZ_64K - SZ_4K,
807 			.gtt = I915_GTT_PAGE_SIZE_4K,
808 			.offset = 0,
809 		},
810 		{
811 			.size = SZ_2M,
812 			.gtt = I915_GTT_PAGE_SIZE_64K,
813 			.offset = 0,
814 		},
815 		{
816 			.size = SZ_2M - SZ_4K,
817 			.gtt = I915_GTT_PAGE_SIZE_4K,
818 			.offset = 0,
819 		},
820 		{
821 			.size = SZ_2M + SZ_4K,
822 			.gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
823 			.offset = 0,
824 		},
825 		{
826 			.size = SZ_2M + SZ_64K,
827 			.gtt = I915_GTT_PAGE_SIZE_64K,
828 			.offset = 0,
829 		},
830 		{
831 			.size = SZ_2M - SZ_64K,
832 			.gtt = I915_GTT_PAGE_SIZE_64K,
833 			.offset = 0,
834 		},
835 		/* Try without any forced padding/alignment */
836 		{
837 			.size = SZ_64K,
838 			.offset = SZ_2M,
839 			.gtt = I915_GTT_PAGE_SIZE_4K,
840 		},
841 		{
842 			.size = SZ_128K,
843 			.offset = SZ_2M - SZ_64K,
844 			.gtt = I915_GTT_PAGE_SIZE_4K,
845 		},
846 	};
847 	struct i915_vma *vma;
848 	int i, single;
849 	int err;
850 
851 	/*
852 	 * Sanity check some of the trickiness with 64K pages -- either we can
853 	 * safely mark the whole page-table(2M block) as 64K, or we have to
854 	 * always fallback to 4K.
855 	 */
856 
857 	if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
858 		return 0;
859 
860 	for (i = 0; i < ARRAY_SIZE(objects); ++i) {
861 		unsigned int size = objects[i].size;
862 		unsigned int expected_gtt = objects[i].gtt;
863 		unsigned int offset = objects[i].offset;
864 		unsigned int flags = PIN_USER;
865 
866 		for (single = 0; single <= 1; single++) {
867 			obj = fake_huge_pages_object(i915, size, !!single);
868 			if (IS_ERR(obj))
869 				return PTR_ERR(obj);
870 
871 			err = i915_gem_object_pin_pages(obj);
872 			if (err)
873 				goto out_object_put;
874 
875 			/*
876 			 * Disable 2M pages -- We only want to use 64K/4K pages
877 			 * for this test.
878 			 */
879 			obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
880 
881 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
882 			if (IS_ERR(vma)) {
883 				err = PTR_ERR(vma);
884 				goto out_object_unpin;
885 			}
886 
887 			if (offset)
888 				flags |= PIN_OFFSET_FIXED | offset;
889 
890 			err = i915_vma_pin(vma, 0, 0, flags);
891 			if (err)
892 				goto out_object_unpin;
893 
894 			err = igt_check_page_sizes(vma);
895 			if (err)
896 				goto out_vma_unpin;
897 
898 			if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
899 				if (!IS_ALIGNED(vma->node.start,
900 						I915_GTT_PAGE_SIZE_2M)) {
901 					pr_err("node.start(%llx) not aligned to 2M\n",
902 					       vma->node.start);
903 					err = -EINVAL;
904 					goto out_vma_unpin;
905 				}
906 
907 				if (!IS_ALIGNED(vma->node.size,
908 						I915_GTT_PAGE_SIZE_2M)) {
909 					pr_err("node.size(%llx) not aligned to 2M\n",
910 					       vma->node.size);
911 					err = -EINVAL;
912 					goto out_vma_unpin;
913 				}
914 			}
915 
916 			if (vma->page_sizes.gtt != expected_gtt) {
917 				pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
918 				       vma->page_sizes.gtt, expected_gtt, i,
919 				       yesno(!!single));
920 				err = -EINVAL;
921 				goto out_vma_unpin;
922 			}
923 
924 			i915_vma_unpin(vma);
925 			i915_gem_object_unpin_pages(obj);
926 			__i915_gem_object_put_pages(obj);
927 			i915_gem_object_put(obj);
928 		}
929 	}
930 
931 	return 0;
932 
933 out_vma_unpin:
934 	i915_vma_unpin(vma);
935 out_object_unpin:
936 	i915_gem_object_unpin_pages(obj);
937 out_object_put:
938 	i915_gem_object_put(obj);
939 
940 	return err;
941 }
942 
943 static int gpu_write(struct intel_context *ce,
944 		     struct i915_vma *vma,
945 		     u32 dw,
946 		     u32 val)
947 {
948 	int err;
949 
950 	i915_gem_object_lock(vma->obj);
951 	err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
952 	i915_gem_object_unlock(vma->obj);
953 	if (err)
954 		return err;
955 
956 	return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
957 			       vma->size >> PAGE_SHIFT, val);
958 }
959 
960 static int
961 __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
962 {
963 	unsigned int needs_flush;
964 	unsigned long n;
965 	int err;
966 
967 	err = i915_gem_object_prepare_read(obj, &needs_flush);
968 	if (err)
969 		return err;
970 
971 	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
972 		u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
973 
974 		if (needs_flush & CLFLUSH_BEFORE)
975 			drm_clflush_virt_range(ptr, PAGE_SIZE);
976 
977 		if (ptr[dword] != val) {
978 			pr_err("n=%lu ptr[%u]=%u, val=%u\n",
979 			       n, dword, ptr[dword], val);
980 			kunmap_atomic(ptr);
981 			err = -EINVAL;
982 			break;
983 		}
984 
985 		kunmap_atomic(ptr);
986 	}
987 
988 	i915_gem_object_finish_access(obj);
989 
990 	return err;
991 }
992 
993 static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
994 {
995 	unsigned long n = obj->base.size >> PAGE_SHIFT;
996 	u32 *ptr;
997 	int err;
998 
999 	err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
1000 	if (err)
1001 		return err;
1002 
1003 	ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
1004 	if (IS_ERR(ptr))
1005 		return PTR_ERR(ptr);
1006 
1007 	ptr += dword;
1008 	while (n--) {
1009 		if (*ptr != val) {
1010 			pr_err("base[%u]=%08x, val=%08x\n",
1011 			       dword, *ptr, val);
1012 			err = -EINVAL;
1013 			break;
1014 		}
1015 
1016 		ptr += PAGE_SIZE / sizeof(*ptr);
1017 	}
1018 
1019 	i915_gem_object_unpin_map(obj);
1020 	return err;
1021 }
1022 
1023 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1024 {
1025 	if (i915_gem_object_has_struct_page(obj))
1026 		return __cpu_check_shmem(obj, dword, val);
1027 	else
1028 		return __cpu_check_vmap(obj, dword, val);
1029 }
1030 
1031 static int __igt_write_huge(struct intel_context *ce,
1032 			    struct drm_i915_gem_object *obj,
1033 			    u64 size, u64 offset,
1034 			    u32 dword, u32 val)
1035 {
1036 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1037 	struct i915_vma *vma;
1038 	int err;
1039 
1040 	vma = i915_vma_instance(obj, ce->vm, NULL);
1041 	if (IS_ERR(vma))
1042 		return PTR_ERR(vma);
1043 
1044 	err = i915_vma_unbind(vma);
1045 	if (err)
1046 		return err;
1047 
1048 	err = i915_vma_pin(vma, size, 0, flags | offset);
1049 	if (err) {
1050 		/*
1051 		 * The ggtt may have some pages reserved so
1052 		 * refrain from erroring out.
1053 		 */
1054 		if (err == -ENOSPC && i915_is_ggtt(ce->vm))
1055 			err = 0;
1056 
1057 		return err;
1058 	}
1059 
1060 	err = igt_check_page_sizes(vma);
1061 	if (err)
1062 		goto out_vma_unpin;
1063 
1064 	err = gpu_write(ce, vma, dword, val);
1065 	if (err) {
1066 		pr_err("gpu-write failed at offset=%llx\n", offset);
1067 		goto out_vma_unpin;
1068 	}
1069 
1070 	err = cpu_check(obj, dword, val);
1071 	if (err) {
1072 		pr_err("cpu-check failed at offset=%llx\n", offset);
1073 		goto out_vma_unpin;
1074 	}
1075 
1076 out_vma_unpin:
1077 	i915_vma_unpin(vma);
1078 	return err;
1079 }
1080 
1081 static int igt_write_huge(struct i915_gem_context *ctx,
1082 			  struct drm_i915_gem_object *obj)
1083 {
1084 	struct i915_gem_engines *engines;
1085 	struct i915_gem_engines_iter it;
1086 	struct intel_context *ce;
1087 	I915_RND_STATE(prng);
1088 	IGT_TIMEOUT(end_time);
1089 	unsigned int max_page_size;
1090 	unsigned int count;
1091 	u64 max;
1092 	u64 num;
1093 	u64 size;
1094 	int *order;
1095 	int i, n;
1096 	int err = 0;
1097 
1098 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1099 
1100 	size = obj->base.size;
1101 	if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1102 		size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1103 
1104 	n = 0;
1105 	count = 0;
1106 	max = U64_MAX;
1107 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1108 		count++;
1109 		if (!intel_engine_can_store_dword(ce->engine))
1110 			continue;
1111 
1112 		max = min(max, ce->vm->total);
1113 		n++;
1114 	}
1115 	i915_gem_context_unlock_engines(ctx);
1116 	if (!n)
1117 		return 0;
1118 
1119 	/*
1120 	 * To keep things interesting when alternating between engines in our
1121 	 * randomized order, lets also make feeding to the same engine a few
1122 	 * times in succession a possibility by enlarging the permutation array.
1123 	 */
1124 	order = i915_random_order(count * count, &prng);
1125 	if (!order)
1126 		return -ENOMEM;
1127 
1128 	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1129 	max = div_u64(max - size, max_page_size);
1130 
1131 	/*
1132 	 * Try various offsets in an ascending/descending fashion until we
1133 	 * timeout -- we want to avoid issues hidden by effectively always using
1134 	 * offset = 0.
1135 	 */
1136 	i = 0;
1137 	engines = i915_gem_context_lock_engines(ctx);
1138 	for_each_prime_number_from(num, 0, max) {
1139 		u64 offset_low = num * max_page_size;
1140 		u64 offset_high = (max - num) * max_page_size;
1141 		u32 dword = offset_in_page(num) / 4;
1142 		struct intel_context *ce;
1143 
1144 		ce = engines->engines[order[i] % engines->num_engines];
1145 		i = (i + 1) % (count * count);
1146 		if (!ce || !intel_engine_can_store_dword(ce->engine))
1147 			continue;
1148 
1149 		/*
1150 		 * In order to utilize 64K pages we need to both pad the vma
1151 		 * size and ensure the vma offset is at the start of the pt
1152 		 * boundary, however to improve coverage we opt for testing both
1153 		 * aligned and unaligned offsets.
1154 		 */
1155 		if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1156 			offset_low = round_down(offset_low,
1157 						I915_GTT_PAGE_SIZE_2M);
1158 
1159 		err = __igt_write_huge(ce, obj, size, offset_low,
1160 				       dword, num + 1);
1161 		if (err)
1162 			break;
1163 
1164 		err = __igt_write_huge(ce, obj, size, offset_high,
1165 				       dword, num + 1);
1166 		if (err)
1167 			break;
1168 
1169 		if (igt_timeout(end_time,
1170 				"%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1171 				__func__, ce->engine->name, offset_low, offset_high,
1172 				max_page_size))
1173 			break;
1174 	}
1175 	i915_gem_context_unlock_engines(ctx);
1176 
1177 	kfree(order);
1178 
1179 	return err;
1180 }
1181 
1182 typedef struct drm_i915_gem_object *
1183 (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
1184 
1185 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1186 {
1187 	return i915->mm.gemfs && has_transparent_hugepage();
1188 }
1189 
1190 static struct drm_i915_gem_object *
1191 igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
1192 {
1193 	if (!igt_can_allocate_thp(i915)) {
1194 		pr_info("%s missing THP support, skipping\n", __func__);
1195 		return ERR_PTR(-ENODEV);
1196 	}
1197 
1198 	return i915_gem_object_create_shmem(i915, size);
1199 }
1200 
1201 static struct drm_i915_gem_object *
1202 igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
1203 {
1204 	return i915_gem_object_create_internal(i915, size);
1205 }
1206 
1207 static struct drm_i915_gem_object *
1208 igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
1209 {
1210 	return huge_pages_object(i915, size, size);
1211 }
1212 
1213 static struct drm_i915_gem_object *
1214 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
1215 {
1216 	return i915_gem_object_create_lmem(i915, size, flags);
1217 }
1218 
1219 static u32 igt_random_size(struct rnd_state *prng,
1220 			   u32 min_page_size,
1221 			   u32 max_page_size)
1222 {
1223 	u64 mask;
1224 	u32 size;
1225 
1226 	GEM_BUG_ON(!is_power_of_2(min_page_size));
1227 	GEM_BUG_ON(!is_power_of_2(max_page_size));
1228 	GEM_BUG_ON(min_page_size < PAGE_SIZE);
1229 	GEM_BUG_ON(min_page_size > max_page_size);
1230 
1231 	mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
1232 	size = prandom_u32_state(prng) & mask;
1233 	if (size < min_page_size)
1234 		size |= min_page_size;
1235 
1236 	return size;
1237 }
1238 
1239 static int igt_ppgtt_smoke_huge(void *arg)
1240 {
1241 	struct i915_gem_context *ctx = arg;
1242 	struct drm_i915_private *i915 = ctx->i915;
1243 	struct drm_i915_gem_object *obj;
1244 	I915_RND_STATE(prng);
1245 	struct {
1246 		igt_create_fn fn;
1247 		u32 min;
1248 		u32 max;
1249 	} backends[] = {
1250 		{ igt_create_internal, SZ_64K, SZ_2M,  },
1251 		{ igt_create_shmem,    SZ_64K, SZ_32M, },
1252 		{ igt_create_local,    SZ_64K, SZ_1G,  },
1253 	};
1254 	int err;
1255 	int i;
1256 
1257 	/*
1258 	 * Sanity check that the HW uses huge pages correctly through our
1259 	 * various backends -- ensure that our writes land in the right place.
1260 	 */
1261 
1262 	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1263 		u32 min = backends[i].min;
1264 		u32 max = backends[i].max;
1265 		u32 size = max;
1266 try_again:
1267 		size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
1268 
1269 		obj = backends[i].fn(i915, size, 0);
1270 		if (IS_ERR(obj)) {
1271 			err = PTR_ERR(obj);
1272 			if (err == -E2BIG) {
1273 				size >>= 1;
1274 				goto try_again;
1275 			} else if (err == -ENODEV) {
1276 				err = 0;
1277 				continue;
1278 			}
1279 
1280 			return err;
1281 		}
1282 
1283 		err = i915_gem_object_pin_pages(obj);
1284 		if (err) {
1285 			if (err == -ENXIO || err == -E2BIG) {
1286 				i915_gem_object_put(obj);
1287 				size >>= 1;
1288 				goto try_again;
1289 			}
1290 			goto out_put;
1291 		}
1292 
1293 		if (obj->mm.page_sizes.phys < min) {
1294 			pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
1295 				__func__, size, i);
1296 			err = -ENOMEM;
1297 			goto out_unpin;
1298 		}
1299 
1300 		err = igt_write_huge(ctx, obj);
1301 		if (err) {
1302 			pr_err("%s write-huge failed with size=%u, i=%d\n",
1303 			       __func__, size, i);
1304 		}
1305 out_unpin:
1306 		i915_gem_object_unpin_pages(obj);
1307 		__i915_gem_object_put_pages(obj);
1308 out_put:
1309 		i915_gem_object_put(obj);
1310 
1311 		if (err == -ENOMEM || err == -ENXIO)
1312 			err = 0;
1313 
1314 		if (err)
1315 			break;
1316 
1317 		cond_resched();
1318 	}
1319 
1320 	return err;
1321 }
1322 
1323 static int igt_ppgtt_sanity_check(void *arg)
1324 {
1325 	struct i915_gem_context *ctx = arg;
1326 	struct drm_i915_private *i915 = ctx->i915;
1327 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
1328 	struct {
1329 		igt_create_fn fn;
1330 		unsigned int flags;
1331 	} backends[] = {
1332 		{ igt_create_system, 0,                        },
1333 		{ igt_create_local,  I915_BO_ALLOC_CONTIGUOUS, },
1334 	};
1335 	struct {
1336 		u32 size;
1337 		u32 pages;
1338 	} combos[] = {
1339 		{ SZ_64K,		SZ_64K		},
1340 		{ SZ_2M,		SZ_2M		},
1341 		{ SZ_2M,		SZ_64K		},
1342 		{ SZ_2M - SZ_64K,	SZ_64K		},
1343 		{ SZ_2M - SZ_4K,	SZ_64K | SZ_4K	},
1344 		{ SZ_2M + SZ_4K,	SZ_64K | SZ_4K	},
1345 		{ SZ_2M + SZ_4K,	SZ_2M  | SZ_4K	},
1346 		{ SZ_2M + SZ_64K,	SZ_2M  | SZ_64K },
1347 	};
1348 	int i, j;
1349 	int err;
1350 
1351 	if (supported == I915_GTT_PAGE_SIZE_4K)
1352 		return 0;
1353 
1354 	/*
1355 	 * Sanity check that the HW behaves with a limited set of combinations.
1356 	 * We already have a bunch of randomised testing, which should give us
1357 	 * a decent amount of variation between runs, however we should keep
1358 	 * this to limit the chances of introducing a temporary regression, by
1359 	 * testing the most obvious cases that might make something blow up.
1360 	 */
1361 
1362 	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1363 		for (j = 0; j < ARRAY_SIZE(combos); ++j) {
1364 			struct drm_i915_gem_object *obj;
1365 			u32 size = combos[j].size;
1366 			u32 pages = combos[j].pages;
1367 
1368 			obj = backends[i].fn(i915, size, backends[i].flags);
1369 			if (IS_ERR(obj)) {
1370 				err = PTR_ERR(obj);
1371 				if (err == -ENODEV) {
1372 					pr_info("Device lacks local memory, skipping\n");
1373 					err = 0;
1374 					break;
1375 				}
1376 
1377 				return err;
1378 			}
1379 
1380 			err = i915_gem_object_pin_pages(obj);
1381 			if (err) {
1382 				i915_gem_object_put(obj);
1383 				goto out;
1384 			}
1385 
1386 			GEM_BUG_ON(pages > obj->base.size);
1387 			pages = pages & supported;
1388 
1389 			if (pages)
1390 				obj->mm.page_sizes.sg = pages;
1391 
1392 			err = igt_write_huge(ctx, obj);
1393 
1394 			i915_gem_object_unpin_pages(obj);
1395 			__i915_gem_object_put_pages(obj);
1396 			i915_gem_object_put(obj);
1397 
1398 			if (err) {
1399 				pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
1400 				       __func__, size, pages, i, j);
1401 				goto out;
1402 			}
1403 		}
1404 
1405 		cond_resched();
1406 	}
1407 
1408 out:
1409 	if (err == -ENOMEM)
1410 		err = 0;
1411 
1412 	return err;
1413 }
1414 
1415 static int igt_tmpfs_fallback(void *arg)
1416 {
1417 	struct i915_gem_context *ctx = arg;
1418 	struct drm_i915_private *i915 = ctx->i915;
1419 	struct vfsmount *gemfs = i915->mm.gemfs;
1420 	struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
1421 	struct drm_i915_gem_object *obj;
1422 	struct i915_vma *vma;
1423 	u32 *vaddr;
1424 	int err = 0;
1425 
1426 	/*
1427 	 * Make sure that we don't burst into a ball of flames upon falling back
1428 	 * to tmpfs, which we rely on if on the off-chance we encouter a failure
1429 	 * when setting up gemfs.
1430 	 */
1431 
1432 	i915->mm.gemfs = NULL;
1433 
1434 	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
1435 	if (IS_ERR(obj)) {
1436 		err = PTR_ERR(obj);
1437 		goto out_restore;
1438 	}
1439 
1440 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1441 	if (IS_ERR(vaddr)) {
1442 		err = PTR_ERR(vaddr);
1443 		goto out_put;
1444 	}
1445 	*vaddr = 0xdeadbeaf;
1446 
1447 	__i915_gem_object_flush_map(obj, 0, 64);
1448 	i915_gem_object_unpin_map(obj);
1449 
1450 	vma = i915_vma_instance(obj, vm, NULL);
1451 	if (IS_ERR(vma)) {
1452 		err = PTR_ERR(vma);
1453 		goto out_put;
1454 	}
1455 
1456 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
1457 	if (err)
1458 		goto out_put;
1459 
1460 	err = igt_check_page_sizes(vma);
1461 
1462 	i915_vma_unpin(vma);
1463 out_put:
1464 	i915_gem_object_put(obj);
1465 out_restore:
1466 	i915->mm.gemfs = gemfs;
1467 
1468 	i915_vm_put(vm);
1469 	return err;
1470 }
1471 
1472 static int igt_shrink_thp(void *arg)
1473 {
1474 	struct i915_gem_context *ctx = arg;
1475 	struct drm_i915_private *i915 = ctx->i915;
1476 	struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
1477 	struct drm_i915_gem_object *obj;
1478 	struct i915_gem_engines_iter it;
1479 	struct intel_context *ce;
1480 	struct i915_vma *vma;
1481 	unsigned int flags = PIN_USER;
1482 	unsigned int n;
1483 	int err = 0;
1484 
1485 	/*
1486 	 * Sanity check shrinking huge-paged object -- make sure nothing blows
1487 	 * up.
1488 	 */
1489 
1490 	if (!igt_can_allocate_thp(i915)) {
1491 		pr_info("missing THP support, skipping\n");
1492 		goto out_vm;
1493 	}
1494 
1495 	obj = i915_gem_object_create_shmem(i915, SZ_2M);
1496 	if (IS_ERR(obj)) {
1497 		err = PTR_ERR(obj);
1498 		goto out_vm;
1499 	}
1500 
1501 	vma = i915_vma_instance(obj, vm, NULL);
1502 	if (IS_ERR(vma)) {
1503 		err = PTR_ERR(vma);
1504 		goto out_put;
1505 	}
1506 
1507 	err = i915_vma_pin(vma, 0, 0, flags);
1508 	if (err)
1509 		goto out_put;
1510 
1511 	if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1512 		pr_info("failed to allocate THP, finishing test early\n");
1513 		goto out_unpin;
1514 	}
1515 
1516 	err = igt_check_page_sizes(vma);
1517 	if (err)
1518 		goto out_unpin;
1519 
1520 	n = 0;
1521 
1522 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1523 		if (!intel_engine_can_store_dword(ce->engine))
1524 			continue;
1525 
1526 		err = gpu_write(ce, vma, n++, 0xdeadbeaf);
1527 		if (err)
1528 			break;
1529 	}
1530 	i915_gem_context_unlock_engines(ctx);
1531 	i915_vma_unpin(vma);
1532 	if (err)
1533 		goto out_put;
1534 
1535 	/*
1536 	 * Now that the pages are *unpinned* shrink-all should invoke
1537 	 * shmem to truncate our pages.
1538 	 */
1539 	i915_gem_shrink_all(i915);
1540 	if (i915_gem_object_has_pages(obj)) {
1541 		pr_err("shrink-all didn't truncate the pages\n");
1542 		err = -EINVAL;
1543 		goto out_put;
1544 	}
1545 
1546 	if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
1547 		pr_err("residual page-size bits left\n");
1548 		err = -EINVAL;
1549 		goto out_put;
1550 	}
1551 
1552 	err = i915_vma_pin(vma, 0, 0, flags);
1553 	if (err)
1554 		goto out_put;
1555 
1556 	while (n--) {
1557 		err = cpu_check(obj, n, 0xdeadbeaf);
1558 		if (err)
1559 			break;
1560 	}
1561 
1562 out_unpin:
1563 	i915_vma_unpin(vma);
1564 out_put:
1565 	i915_gem_object_put(obj);
1566 out_vm:
1567 	i915_vm_put(vm);
1568 
1569 	return err;
1570 }
1571 
1572 int i915_gem_huge_page_mock_selftests(void)
1573 {
1574 	static const struct i915_subtest tests[] = {
1575 		SUBTEST(igt_mock_exhaust_device_supported_pages),
1576 		SUBTEST(igt_mock_memory_region_huge_pages),
1577 		SUBTEST(igt_mock_ppgtt_misaligned_dma),
1578 		SUBTEST(igt_mock_ppgtt_huge_fill),
1579 		SUBTEST(igt_mock_ppgtt_64K),
1580 	};
1581 	struct drm_i915_private *dev_priv;
1582 	struct i915_ppgtt *ppgtt;
1583 	int err;
1584 
1585 	dev_priv = mock_gem_device();
1586 	if (!dev_priv)
1587 		return -ENOMEM;
1588 
1589 	/* Pretend to be a device which supports the 48b PPGTT */
1590 	mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
1591 	mkwrite_device_info(dev_priv)->ppgtt_size = 48;
1592 
1593 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
1594 	if (IS_ERR(ppgtt)) {
1595 		err = PTR_ERR(ppgtt);
1596 		goto out_unlock;
1597 	}
1598 
1599 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1600 		pr_err("failed to create 48b PPGTT\n");
1601 		err = -EINVAL;
1602 		goto out_put;
1603 	}
1604 
1605 	/* If we were ever hit this then it's time to mock the 64K scratch */
1606 	if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
1607 		pr_err("PPGTT missing 64K scratch page\n");
1608 		err = -EINVAL;
1609 		goto out_put;
1610 	}
1611 
1612 	err = i915_subtests(tests, ppgtt);
1613 
1614 out_put:
1615 	i915_vm_put(&ppgtt->vm);
1616 out_unlock:
1617 	drm_dev_put(&dev_priv->drm);
1618 	return err;
1619 }
1620 
1621 int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
1622 {
1623 	static const struct i915_subtest tests[] = {
1624 		SUBTEST(igt_shrink_thp),
1625 		SUBTEST(igt_tmpfs_fallback),
1626 		SUBTEST(igt_ppgtt_smoke_huge),
1627 		SUBTEST(igt_ppgtt_sanity_check),
1628 	};
1629 	struct i915_gem_context *ctx;
1630 	struct i915_address_space *vm;
1631 	struct file *file;
1632 	int err;
1633 
1634 	if (!HAS_PPGTT(i915)) {
1635 		pr_info("PPGTT not supported, skipping live-selftests\n");
1636 		return 0;
1637 	}
1638 
1639 	if (intel_gt_is_wedged(&i915->gt))
1640 		return 0;
1641 
1642 	file = mock_file(i915);
1643 	if (IS_ERR(file))
1644 		return PTR_ERR(file);
1645 
1646 	ctx = live_context(i915, file);
1647 	if (IS_ERR(ctx)) {
1648 		err = PTR_ERR(ctx);
1649 		goto out_file;
1650 	}
1651 
1652 	mutex_lock(&ctx->mutex);
1653 	vm = i915_gem_context_vm(ctx);
1654 	if (vm)
1655 		WRITE_ONCE(vm->scrub_64K, true);
1656 	mutex_unlock(&ctx->mutex);
1657 
1658 	err = i915_subtests(tests, ctx);
1659 
1660 out_file:
1661 	fput(file);
1662 	return err;
1663 }
1664