1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "i915_selftest.h"
10 
11 #include "gem/i915_gem_region.h"
12 #include "gem/i915_gem_lmem.h"
13 #include "gem/i915_gem_pm.h"
14 
15 #include "gt/intel_gt.h"
16 
17 #include "igt_gem_utils.h"
18 #include "mock_context.h"
19 
20 #include "selftests/mock_drm.h"
21 #include "selftests/mock_gem_device.h"
22 #include "selftests/mock_region.h"
23 #include "selftests/i915_random.h"
24 
25 static const unsigned int page_sizes[] = {
26 	I915_GTT_PAGE_SIZE_2M,
27 	I915_GTT_PAGE_SIZE_64K,
28 	I915_GTT_PAGE_SIZE_4K,
29 };
30 
31 static unsigned int get_largest_page_size(struct drm_i915_private *i915,
32 					  u64 rem)
33 {
34 	int i;
35 
36 	for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
37 		unsigned int page_size = page_sizes[i];
38 
39 		if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
40 			return page_size;
41 	}
42 
43 	return 0;
44 }
45 
46 static void huge_pages_free_pages(struct sg_table *st)
47 {
48 	struct scatterlist *sg;
49 
50 	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
51 		if (sg_page(sg))
52 			__free_pages(sg_page(sg), get_order(sg->length));
53 	}
54 
55 	sg_free_table(st);
56 	kfree(st);
57 }
58 
59 static int get_huge_pages(struct drm_i915_gem_object *obj)
60 {
61 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
62 	unsigned int page_mask = obj->mm.page_mask;
63 	struct sg_table *st;
64 	struct scatterlist *sg;
65 	unsigned int sg_page_sizes;
66 	u64 rem;
67 
68 	st = kmalloc(sizeof(*st), GFP);
69 	if (!st)
70 		return -ENOMEM;
71 
72 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
73 		kfree(st);
74 		return -ENOMEM;
75 	}
76 
77 	rem = obj->base.size;
78 	sg = st->sgl;
79 	st->nents = 0;
80 	sg_page_sizes = 0;
81 
82 	/*
83 	 * Our goal here is simple, we want to greedily fill the object from
84 	 * largest to smallest page-size, while ensuring that we use *every*
85 	 * page-size as per the given page-mask.
86 	 */
87 	do {
88 		unsigned int bit = ilog2(page_mask);
89 		unsigned int page_size = BIT(bit);
90 		int order = get_order(page_size);
91 
92 		do {
93 			struct page *page;
94 
95 			GEM_BUG_ON(order >= MAX_ORDER);
96 			page = alloc_pages(GFP | __GFP_ZERO, order);
97 			if (!page)
98 				goto err;
99 
100 			sg_set_page(sg, page, page_size, 0);
101 			sg_page_sizes |= page_size;
102 			st->nents++;
103 
104 			rem -= page_size;
105 			if (!rem) {
106 				sg_mark_end(sg);
107 				break;
108 			}
109 
110 			sg = __sg_next(sg);
111 		} while ((rem - ((page_size-1) & page_mask)) >= page_size);
112 
113 		page_mask &= (page_size-1);
114 	} while (page_mask);
115 
116 	if (i915_gem_gtt_prepare_pages(obj, st))
117 		goto err;
118 
119 	GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
120 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
121 
122 	return 0;
123 
124 err:
125 	sg_set_page(sg, NULL, 0, 0);
126 	sg_mark_end(sg);
127 	huge_pages_free_pages(st);
128 
129 	return -ENOMEM;
130 }
131 
132 static void put_huge_pages(struct drm_i915_gem_object *obj,
133 			   struct sg_table *pages)
134 {
135 	i915_gem_gtt_finish_pages(obj, pages);
136 	huge_pages_free_pages(pages);
137 
138 	obj->mm.dirty = false;
139 
140 	__start_cpu_write(obj);
141 }
142 
143 static const struct drm_i915_gem_object_ops huge_page_ops = {
144 	.name = "huge-gem",
145 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
146 	.get_pages = get_huge_pages,
147 	.put_pages = put_huge_pages,
148 };
149 
150 static struct drm_i915_gem_object *
151 huge_pages_object(struct drm_i915_private *i915,
152 		  u64 size,
153 		  unsigned int page_mask)
154 {
155 	static struct lock_class_key lock_class;
156 	struct drm_i915_gem_object *obj;
157 	unsigned int cache_level;
158 
159 	GEM_BUG_ON(!size);
160 	GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
161 
162 	if (size >> PAGE_SHIFT > INT_MAX)
163 		return ERR_PTR(-E2BIG);
164 
165 	if (overflows_type(size, obj->base.size))
166 		return ERR_PTR(-E2BIG);
167 
168 	obj = i915_gem_object_alloc();
169 	if (!obj)
170 		return ERR_PTR(-ENOMEM);
171 
172 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
173 	i915_gem_object_init(obj, &huge_page_ops, &lock_class, 0);
174 	obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
175 	i915_gem_object_set_volatile(obj);
176 
177 	obj->write_domain = I915_GEM_DOMAIN_CPU;
178 	obj->read_domains = I915_GEM_DOMAIN_CPU;
179 
180 	cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
181 	i915_gem_object_set_cache_coherency(obj, cache_level);
182 
183 	obj->mm.page_mask = page_mask;
184 
185 	return obj;
186 }
187 
188 static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
189 {
190 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
191 	const u64 max_len = rounddown_pow_of_two(UINT_MAX);
192 	struct sg_table *st;
193 	struct scatterlist *sg;
194 	unsigned int sg_page_sizes;
195 	u64 rem;
196 
197 	st = kmalloc(sizeof(*st), GFP);
198 	if (!st)
199 		return -ENOMEM;
200 
201 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
202 		kfree(st);
203 		return -ENOMEM;
204 	}
205 
206 	/* Use optimal page sized chunks to fill in the sg table */
207 	rem = obj->base.size;
208 	sg = st->sgl;
209 	st->nents = 0;
210 	sg_page_sizes = 0;
211 	do {
212 		unsigned int page_size = get_largest_page_size(i915, rem);
213 		unsigned int len = min(page_size * div_u64(rem, page_size),
214 				       max_len);
215 
216 		GEM_BUG_ON(!page_size);
217 
218 		sg->offset = 0;
219 		sg->length = len;
220 		sg_dma_len(sg) = len;
221 		sg_dma_address(sg) = page_size;
222 
223 		sg_page_sizes |= len;
224 
225 		st->nents++;
226 
227 		rem -= len;
228 		if (!rem) {
229 			sg_mark_end(sg);
230 			break;
231 		}
232 
233 		sg = sg_next(sg);
234 	} while (1);
235 
236 	i915_sg_trim(st);
237 
238 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
239 
240 	return 0;
241 }
242 
243 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
244 {
245 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
246 	struct sg_table *st;
247 	struct scatterlist *sg;
248 	unsigned int page_size;
249 
250 	st = kmalloc(sizeof(*st), GFP);
251 	if (!st)
252 		return -ENOMEM;
253 
254 	if (sg_alloc_table(st, 1, GFP)) {
255 		kfree(st);
256 		return -ENOMEM;
257 	}
258 
259 	sg = st->sgl;
260 	st->nents = 1;
261 
262 	page_size = get_largest_page_size(i915, obj->base.size);
263 	GEM_BUG_ON(!page_size);
264 
265 	sg->offset = 0;
266 	sg->length = obj->base.size;
267 	sg_dma_len(sg) = obj->base.size;
268 	sg_dma_address(sg) = page_size;
269 
270 	__i915_gem_object_set_pages(obj, st, sg->length);
271 
272 	return 0;
273 #undef GFP
274 }
275 
276 static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
277 				 struct sg_table *pages)
278 {
279 	sg_free_table(pages);
280 	kfree(pages);
281 }
282 
283 static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
284 				struct sg_table *pages)
285 {
286 	fake_free_huge_pages(obj, pages);
287 	obj->mm.dirty = false;
288 }
289 
290 static const struct drm_i915_gem_object_ops fake_ops = {
291 	.name = "fake-gem",
292 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
293 	.get_pages = fake_get_huge_pages,
294 	.put_pages = fake_put_huge_pages,
295 };
296 
297 static const struct drm_i915_gem_object_ops fake_ops_single = {
298 	.name = "fake-gem",
299 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
300 	.get_pages = fake_get_huge_pages_single,
301 	.put_pages = fake_put_huge_pages,
302 };
303 
304 static struct drm_i915_gem_object *
305 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
306 {
307 	static struct lock_class_key lock_class;
308 	struct drm_i915_gem_object *obj;
309 
310 	GEM_BUG_ON(!size);
311 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
312 
313 	if (size >> PAGE_SHIFT > UINT_MAX)
314 		return ERR_PTR(-E2BIG);
315 
316 	if (overflows_type(size, obj->base.size))
317 		return ERR_PTR(-E2BIG);
318 
319 	obj = i915_gem_object_alloc();
320 	if (!obj)
321 		return ERR_PTR(-ENOMEM);
322 
323 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
324 
325 	if (single)
326 		i915_gem_object_init(obj, &fake_ops_single, &lock_class, 0);
327 	else
328 		i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
329 
330 	i915_gem_object_set_volatile(obj);
331 
332 	obj->write_domain = I915_GEM_DOMAIN_CPU;
333 	obj->read_domains = I915_GEM_DOMAIN_CPU;
334 	obj->cache_level = I915_CACHE_NONE;
335 
336 	return obj;
337 }
338 
339 static int igt_check_page_sizes(struct i915_vma *vma)
340 {
341 	struct drm_i915_private *i915 = vma->vm->i915;
342 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
343 	struct drm_i915_gem_object *obj = vma->obj;
344 	int err;
345 
346 	/* We have to wait for the async bind to complete before our asserts */
347 	err = i915_vma_sync(vma);
348 	if (err)
349 		return err;
350 
351 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
352 		pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
353 		       vma->page_sizes.sg & ~supported, supported);
354 		err = -EINVAL;
355 	}
356 
357 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
358 		pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
359 		       vma->page_sizes.gtt & ~supported, supported);
360 		err = -EINVAL;
361 	}
362 
363 	if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
364 		pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
365 		       vma->page_sizes.phys, obj->mm.page_sizes.phys);
366 		err = -EINVAL;
367 	}
368 
369 	if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
370 		pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
371 		       vma->page_sizes.sg, obj->mm.page_sizes.sg);
372 		err = -EINVAL;
373 	}
374 
375 	/*
376 	 * The dma-api is like a box of chocolates when it comes to the
377 	 * alignment of dma addresses, however for LMEM we have total control
378 	 * and so can guarantee alignment, likewise when we allocate our blocks
379 	 * they should appear in descending order, and if we know that we align
380 	 * to the largest page size for the GTT address, we should be able to
381 	 * assert that if we see 2M physical pages then we should also get 2M
382 	 * GTT pages. If we don't then something might be wrong in our
383 	 * construction of the backing pages.
384 	 *
385 	 * Maintaining alignment is required to utilise huge pages in the ppGGT.
386 	 */
387 	if (i915_gem_object_is_lmem(obj) &&
388 	    IS_ALIGNED(vma->node.start, SZ_2M) &&
389 	    vma->page_sizes.sg & SZ_2M &&
390 	    vma->page_sizes.gtt < SZ_2M) {
391 		pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
392 		       vma->page_sizes.sg, vma->page_sizes.gtt);
393 		err = -EINVAL;
394 	}
395 
396 	if (obj->mm.page_sizes.gtt) {
397 		pr_err("obj->page_sizes.gtt(%u) should never be set\n",
398 		       obj->mm.page_sizes.gtt);
399 		err = -EINVAL;
400 	}
401 
402 	return err;
403 }
404 
405 static int igt_mock_exhaust_device_supported_pages(void *arg)
406 {
407 	struct i915_ppgtt *ppgtt = arg;
408 	struct drm_i915_private *i915 = ppgtt->vm.i915;
409 	unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
410 	struct drm_i915_gem_object *obj;
411 	struct i915_vma *vma;
412 	int i, j, single;
413 	int err;
414 
415 	/*
416 	 * Sanity check creating objects with every valid page support
417 	 * combination for our mock device.
418 	 */
419 
420 	for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
421 		unsigned int combination = SZ_4K; /* Required for ppGTT */
422 
423 		for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
424 			if (i & BIT(j))
425 				combination |= page_sizes[j];
426 		}
427 
428 		mkwrite_device_info(i915)->page_sizes = combination;
429 
430 		for (single = 0; single <= 1; ++single) {
431 			obj = fake_huge_pages_object(i915, combination, !!single);
432 			if (IS_ERR(obj)) {
433 				err = PTR_ERR(obj);
434 				goto out_device;
435 			}
436 
437 			if (obj->base.size != combination) {
438 				pr_err("obj->base.size=%zu, expected=%u\n",
439 				       obj->base.size, combination);
440 				err = -EINVAL;
441 				goto out_put;
442 			}
443 
444 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
445 			if (IS_ERR(vma)) {
446 				err = PTR_ERR(vma);
447 				goto out_put;
448 			}
449 
450 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
451 			if (err)
452 				goto out_put;
453 
454 			err = igt_check_page_sizes(vma);
455 
456 			if (vma->page_sizes.sg != combination) {
457 				pr_err("page_sizes.sg=%u, expected=%u\n",
458 				       vma->page_sizes.sg, combination);
459 				err = -EINVAL;
460 			}
461 
462 			i915_vma_unpin(vma);
463 			i915_gem_object_put(obj);
464 
465 			if (err)
466 				goto out_device;
467 		}
468 	}
469 
470 	goto out_device;
471 
472 out_put:
473 	i915_gem_object_put(obj);
474 out_device:
475 	mkwrite_device_info(i915)->page_sizes = saved_mask;
476 
477 	return err;
478 }
479 
480 static int igt_mock_memory_region_huge_pages(void *arg)
481 {
482 	const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
483 	struct i915_ppgtt *ppgtt = arg;
484 	struct drm_i915_private *i915 = ppgtt->vm.i915;
485 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
486 	struct intel_memory_region *mem;
487 	struct drm_i915_gem_object *obj;
488 	struct i915_vma *vma;
489 	int bit;
490 	int err = 0;
491 
492 	mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
493 	if (IS_ERR(mem)) {
494 		pr_err("%s failed to create memory region\n", __func__);
495 		return PTR_ERR(mem);
496 	}
497 
498 	for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
499 		unsigned int page_size = BIT(bit);
500 		resource_size_t phys;
501 		int i;
502 
503 		for (i = 0; i < ARRAY_SIZE(flags); ++i) {
504 			obj = i915_gem_object_create_region(mem,
505 							    page_size, page_size,
506 							    flags[i]);
507 			if (IS_ERR(obj)) {
508 				err = PTR_ERR(obj);
509 				goto out_region;
510 			}
511 
512 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
513 			if (IS_ERR(vma)) {
514 				err = PTR_ERR(vma);
515 				goto out_put;
516 			}
517 
518 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
519 			if (err)
520 				goto out_put;
521 
522 			err = igt_check_page_sizes(vma);
523 			if (err)
524 				goto out_unpin;
525 
526 			phys = i915_gem_object_get_dma_address(obj, 0);
527 			if (!IS_ALIGNED(phys, page_size)) {
528 				pr_err("%s addr misaligned(%pa) page_size=%u\n",
529 				       __func__, &phys, page_size);
530 				err = -EINVAL;
531 				goto out_unpin;
532 			}
533 
534 			if (vma->page_sizes.gtt != page_size) {
535 				pr_err("%s page_sizes.gtt=%u, expected=%u\n",
536 				       __func__, vma->page_sizes.gtt,
537 				       page_size);
538 				err = -EINVAL;
539 				goto out_unpin;
540 			}
541 
542 			i915_vma_unpin(vma);
543 			__i915_gem_object_put_pages(obj);
544 			i915_gem_object_put(obj);
545 		}
546 	}
547 
548 	goto out_region;
549 
550 out_unpin:
551 	i915_vma_unpin(vma);
552 out_put:
553 	i915_gem_object_put(obj);
554 out_region:
555 	intel_memory_region_put(mem);
556 	return err;
557 }
558 
559 static int igt_mock_ppgtt_misaligned_dma(void *arg)
560 {
561 	struct i915_ppgtt *ppgtt = arg;
562 	struct drm_i915_private *i915 = ppgtt->vm.i915;
563 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
564 	struct drm_i915_gem_object *obj;
565 	int bit;
566 	int err;
567 
568 	/*
569 	 * Sanity check dma misalignment for huge pages -- the dma addresses we
570 	 * insert into the paging structures need to always respect the page
571 	 * size alignment.
572 	 */
573 
574 	bit = ilog2(I915_GTT_PAGE_SIZE_64K);
575 
576 	for_each_set_bit_from(bit, &supported,
577 			      ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
578 		IGT_TIMEOUT(end_time);
579 		unsigned int page_size = BIT(bit);
580 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
581 		unsigned int offset;
582 		unsigned int size =
583 			round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
584 		struct i915_vma *vma;
585 
586 		obj = fake_huge_pages_object(i915, size, true);
587 		if (IS_ERR(obj))
588 			return PTR_ERR(obj);
589 
590 		if (obj->base.size != size) {
591 			pr_err("obj->base.size=%zu, expected=%u\n",
592 			       obj->base.size, size);
593 			err = -EINVAL;
594 			goto out_put;
595 		}
596 
597 		err = i915_gem_object_pin_pages_unlocked(obj);
598 		if (err)
599 			goto out_put;
600 
601 		/* Force the page size for this object */
602 		obj->mm.page_sizes.sg = page_size;
603 
604 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
605 		if (IS_ERR(vma)) {
606 			err = PTR_ERR(vma);
607 			goto out_unpin;
608 		}
609 
610 		err = i915_vma_pin(vma, 0, 0, flags);
611 		if (err)
612 			goto out_unpin;
613 
614 
615 		err = igt_check_page_sizes(vma);
616 
617 		if (vma->page_sizes.gtt != page_size) {
618 			pr_err("page_sizes.gtt=%u, expected %u\n",
619 			       vma->page_sizes.gtt, page_size);
620 			err = -EINVAL;
621 		}
622 
623 		i915_vma_unpin(vma);
624 
625 		if (err)
626 			goto out_unpin;
627 
628 		/*
629 		 * Try all the other valid offsets until the next
630 		 * boundary -- should always fall back to using 4K
631 		 * pages.
632 		 */
633 		for (offset = 4096; offset < page_size; offset += 4096) {
634 			err = i915_vma_unbind(vma);
635 			if (err)
636 				goto out_unpin;
637 
638 			err = i915_vma_pin(vma, 0, 0, flags | offset);
639 			if (err)
640 				goto out_unpin;
641 
642 			err = igt_check_page_sizes(vma);
643 
644 			if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
645 				pr_err("page_sizes.gtt=%u, expected %llu\n",
646 				       vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
647 				err = -EINVAL;
648 			}
649 
650 			i915_vma_unpin(vma);
651 
652 			if (err)
653 				goto out_unpin;
654 
655 			if (igt_timeout(end_time,
656 					"%s timed out at offset %x with page-size %x\n",
657 					__func__, offset, page_size))
658 				break;
659 		}
660 
661 		i915_gem_object_lock(obj, NULL);
662 		i915_gem_object_unpin_pages(obj);
663 		__i915_gem_object_put_pages(obj);
664 		i915_gem_object_unlock(obj);
665 		i915_gem_object_put(obj);
666 	}
667 
668 	return 0;
669 
670 out_unpin:
671 	i915_gem_object_lock(obj, NULL);
672 	i915_gem_object_unpin_pages(obj);
673 	i915_gem_object_unlock(obj);
674 out_put:
675 	i915_gem_object_put(obj);
676 
677 	return err;
678 }
679 
680 static void close_object_list(struct list_head *objects,
681 			      struct i915_ppgtt *ppgtt)
682 {
683 	struct drm_i915_gem_object *obj, *on;
684 
685 	list_for_each_entry_safe(obj, on, objects, st_link) {
686 		list_del(&obj->st_link);
687 		i915_gem_object_lock(obj, NULL);
688 		i915_gem_object_unpin_pages(obj);
689 		__i915_gem_object_put_pages(obj);
690 		i915_gem_object_unlock(obj);
691 		i915_gem_object_put(obj);
692 	}
693 }
694 
695 static int igt_mock_ppgtt_huge_fill(void *arg)
696 {
697 	struct i915_ppgtt *ppgtt = arg;
698 	struct drm_i915_private *i915 = ppgtt->vm.i915;
699 	unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
700 	unsigned long page_num;
701 	bool single = false;
702 	LIST_HEAD(objects);
703 	IGT_TIMEOUT(end_time);
704 	int err = -ENODEV;
705 
706 	for_each_prime_number_from(page_num, 1, max_pages) {
707 		struct drm_i915_gem_object *obj;
708 		u64 size = page_num << PAGE_SHIFT;
709 		struct i915_vma *vma;
710 		unsigned int expected_gtt = 0;
711 		int i;
712 
713 		obj = fake_huge_pages_object(i915, size, single);
714 		if (IS_ERR(obj)) {
715 			err = PTR_ERR(obj);
716 			break;
717 		}
718 
719 		if (obj->base.size != size) {
720 			pr_err("obj->base.size=%zd, expected=%llu\n",
721 			       obj->base.size, size);
722 			i915_gem_object_put(obj);
723 			err = -EINVAL;
724 			break;
725 		}
726 
727 		err = i915_gem_object_pin_pages_unlocked(obj);
728 		if (err) {
729 			i915_gem_object_put(obj);
730 			break;
731 		}
732 
733 		list_add(&obj->st_link, &objects);
734 
735 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
736 		if (IS_ERR(vma)) {
737 			err = PTR_ERR(vma);
738 			break;
739 		}
740 
741 		err = i915_vma_pin(vma, 0, 0, PIN_USER);
742 		if (err)
743 			break;
744 
745 		err = igt_check_page_sizes(vma);
746 		if (err) {
747 			i915_vma_unpin(vma);
748 			break;
749 		}
750 
751 		/*
752 		 * Figure out the expected gtt page size knowing that we go from
753 		 * largest to smallest page size sg chunks, and that we align to
754 		 * the largest page size.
755 		 */
756 		for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
757 			unsigned int page_size = page_sizes[i];
758 
759 			if (HAS_PAGE_SIZES(i915, page_size) &&
760 			    size >= page_size) {
761 				expected_gtt |= page_size;
762 				size &= page_size-1;
763 			}
764 		}
765 
766 		GEM_BUG_ON(!expected_gtt);
767 		GEM_BUG_ON(size);
768 
769 		if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
770 			expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
771 
772 		i915_vma_unpin(vma);
773 
774 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
775 			if (!IS_ALIGNED(vma->node.start,
776 					I915_GTT_PAGE_SIZE_2M)) {
777 				pr_err("node.start(%llx) not aligned to 2M\n",
778 				       vma->node.start);
779 				err = -EINVAL;
780 				break;
781 			}
782 
783 			if (!IS_ALIGNED(vma->node.size,
784 					I915_GTT_PAGE_SIZE_2M)) {
785 				pr_err("node.size(%llx) not aligned to 2M\n",
786 				       vma->node.size);
787 				err = -EINVAL;
788 				break;
789 			}
790 		}
791 
792 		if (vma->page_sizes.gtt != expected_gtt) {
793 			pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
794 			       vma->page_sizes.gtt, expected_gtt,
795 			       obj->base.size, yesno(!!single));
796 			err = -EINVAL;
797 			break;
798 		}
799 
800 		if (igt_timeout(end_time,
801 				"%s timed out at size %zd\n",
802 				__func__, obj->base.size))
803 			break;
804 
805 		single = !single;
806 	}
807 
808 	close_object_list(&objects, ppgtt);
809 
810 	if (err == -ENOMEM || err == -ENOSPC)
811 		err = 0;
812 
813 	return err;
814 }
815 
816 static int igt_mock_ppgtt_64K(void *arg)
817 {
818 	struct i915_ppgtt *ppgtt = arg;
819 	struct drm_i915_private *i915 = ppgtt->vm.i915;
820 	struct drm_i915_gem_object *obj;
821 	const struct object_info {
822 		unsigned int size;
823 		unsigned int gtt;
824 		unsigned int offset;
825 	} objects[] = {
826 		/* Cases with forced padding/alignment */
827 		{
828 			.size = SZ_64K,
829 			.gtt = I915_GTT_PAGE_SIZE_64K,
830 			.offset = 0,
831 		},
832 		{
833 			.size = SZ_64K + SZ_4K,
834 			.gtt = I915_GTT_PAGE_SIZE_4K,
835 			.offset = 0,
836 		},
837 		{
838 			.size = SZ_64K - SZ_4K,
839 			.gtt = I915_GTT_PAGE_SIZE_4K,
840 			.offset = 0,
841 		},
842 		{
843 			.size = SZ_2M,
844 			.gtt = I915_GTT_PAGE_SIZE_64K,
845 			.offset = 0,
846 		},
847 		{
848 			.size = SZ_2M - SZ_4K,
849 			.gtt = I915_GTT_PAGE_SIZE_4K,
850 			.offset = 0,
851 		},
852 		{
853 			.size = SZ_2M + SZ_4K,
854 			.gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
855 			.offset = 0,
856 		},
857 		{
858 			.size = SZ_2M + SZ_64K,
859 			.gtt = I915_GTT_PAGE_SIZE_64K,
860 			.offset = 0,
861 		},
862 		{
863 			.size = SZ_2M - SZ_64K,
864 			.gtt = I915_GTT_PAGE_SIZE_64K,
865 			.offset = 0,
866 		},
867 		/* Try without any forced padding/alignment */
868 		{
869 			.size = SZ_64K,
870 			.offset = SZ_2M,
871 			.gtt = I915_GTT_PAGE_SIZE_4K,
872 		},
873 		{
874 			.size = SZ_128K,
875 			.offset = SZ_2M - SZ_64K,
876 			.gtt = I915_GTT_PAGE_SIZE_4K,
877 		},
878 	};
879 	struct i915_vma *vma;
880 	int i, single;
881 	int err;
882 
883 	/*
884 	 * Sanity check some of the trickiness with 64K pages -- either we can
885 	 * safely mark the whole page-table(2M block) as 64K, or we have to
886 	 * always fallback to 4K.
887 	 */
888 
889 	if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
890 		return 0;
891 
892 	for (i = 0; i < ARRAY_SIZE(objects); ++i) {
893 		unsigned int size = objects[i].size;
894 		unsigned int expected_gtt = objects[i].gtt;
895 		unsigned int offset = objects[i].offset;
896 		unsigned int flags = PIN_USER;
897 
898 		for (single = 0; single <= 1; single++) {
899 			obj = fake_huge_pages_object(i915, size, !!single);
900 			if (IS_ERR(obj))
901 				return PTR_ERR(obj);
902 
903 			err = i915_gem_object_pin_pages_unlocked(obj);
904 			if (err)
905 				goto out_object_put;
906 
907 			/*
908 			 * Disable 2M pages -- We only want to use 64K/4K pages
909 			 * for this test.
910 			 */
911 			obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
912 
913 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
914 			if (IS_ERR(vma)) {
915 				err = PTR_ERR(vma);
916 				goto out_object_unpin;
917 			}
918 
919 			if (offset)
920 				flags |= PIN_OFFSET_FIXED | offset;
921 
922 			err = i915_vma_pin(vma, 0, 0, flags);
923 			if (err)
924 				goto out_object_unpin;
925 
926 			err = igt_check_page_sizes(vma);
927 			if (err)
928 				goto out_vma_unpin;
929 
930 			if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
931 				if (!IS_ALIGNED(vma->node.start,
932 						I915_GTT_PAGE_SIZE_2M)) {
933 					pr_err("node.start(%llx) not aligned to 2M\n",
934 					       vma->node.start);
935 					err = -EINVAL;
936 					goto out_vma_unpin;
937 				}
938 
939 				if (!IS_ALIGNED(vma->node.size,
940 						I915_GTT_PAGE_SIZE_2M)) {
941 					pr_err("node.size(%llx) not aligned to 2M\n",
942 					       vma->node.size);
943 					err = -EINVAL;
944 					goto out_vma_unpin;
945 				}
946 			}
947 
948 			if (vma->page_sizes.gtt != expected_gtt) {
949 				pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
950 				       vma->page_sizes.gtt, expected_gtt, i,
951 				       yesno(!!single));
952 				err = -EINVAL;
953 				goto out_vma_unpin;
954 			}
955 
956 			i915_vma_unpin(vma);
957 			i915_gem_object_lock(obj, NULL);
958 			i915_gem_object_unpin_pages(obj);
959 			__i915_gem_object_put_pages(obj);
960 			i915_gem_object_unlock(obj);
961 			i915_gem_object_put(obj);
962 		}
963 	}
964 
965 	return 0;
966 
967 out_vma_unpin:
968 	i915_vma_unpin(vma);
969 out_object_unpin:
970 	i915_gem_object_lock(obj, NULL);
971 	i915_gem_object_unpin_pages(obj);
972 	i915_gem_object_unlock(obj);
973 out_object_put:
974 	i915_gem_object_put(obj);
975 
976 	return err;
977 }
978 
979 static int gpu_write(struct intel_context *ce,
980 		     struct i915_vma *vma,
981 		     u32 dw,
982 		     u32 val)
983 {
984 	int err;
985 
986 	i915_gem_object_lock(vma->obj, NULL);
987 	err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
988 	i915_gem_object_unlock(vma->obj);
989 	if (err)
990 		return err;
991 
992 	return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
993 			       vma->size >> PAGE_SHIFT, val);
994 }
995 
996 static int
997 __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
998 {
999 	unsigned int needs_flush;
1000 	unsigned long n;
1001 	int err;
1002 
1003 	i915_gem_object_lock(obj, NULL);
1004 	err = i915_gem_object_prepare_read(obj, &needs_flush);
1005 	if (err)
1006 		goto err_unlock;
1007 
1008 	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
1009 		u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
1010 
1011 		if (needs_flush & CLFLUSH_BEFORE)
1012 			drm_clflush_virt_range(ptr, PAGE_SIZE);
1013 
1014 		if (ptr[dword] != val) {
1015 			pr_err("n=%lu ptr[%u]=%u, val=%u\n",
1016 			       n, dword, ptr[dword], val);
1017 			kunmap_atomic(ptr);
1018 			err = -EINVAL;
1019 			break;
1020 		}
1021 
1022 		kunmap_atomic(ptr);
1023 	}
1024 
1025 	i915_gem_object_finish_access(obj);
1026 err_unlock:
1027 	i915_gem_object_unlock(obj);
1028 
1029 	return err;
1030 }
1031 
1032 static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1033 {
1034 	unsigned long n = obj->base.size >> PAGE_SHIFT;
1035 	u32 *ptr;
1036 	int err;
1037 
1038 	err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
1039 	if (err)
1040 		return err;
1041 
1042 	ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1043 	if (IS_ERR(ptr))
1044 		return PTR_ERR(ptr);
1045 
1046 	ptr += dword;
1047 	while (n--) {
1048 		if (*ptr != val) {
1049 			pr_err("base[%u]=%08x, val=%08x\n",
1050 			       dword, *ptr, val);
1051 			err = -EINVAL;
1052 			break;
1053 		}
1054 
1055 		ptr += PAGE_SIZE / sizeof(*ptr);
1056 	}
1057 
1058 	i915_gem_object_unpin_map(obj);
1059 	return err;
1060 }
1061 
1062 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1063 {
1064 	if (i915_gem_object_has_struct_page(obj))
1065 		return __cpu_check_shmem(obj, dword, val);
1066 	else
1067 		return __cpu_check_vmap(obj, dword, val);
1068 }
1069 
1070 static int __igt_write_huge(struct intel_context *ce,
1071 			    struct drm_i915_gem_object *obj,
1072 			    u64 size, u64 offset,
1073 			    u32 dword, u32 val)
1074 {
1075 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1076 	struct i915_vma *vma;
1077 	int err;
1078 
1079 	vma = i915_vma_instance(obj, ce->vm, NULL);
1080 	if (IS_ERR(vma))
1081 		return PTR_ERR(vma);
1082 
1083 	err = i915_vma_unbind(vma);
1084 	if (err)
1085 		return err;
1086 
1087 	err = i915_vma_pin(vma, size, 0, flags | offset);
1088 	if (err) {
1089 		/*
1090 		 * The ggtt may have some pages reserved so
1091 		 * refrain from erroring out.
1092 		 */
1093 		if (err == -ENOSPC && i915_is_ggtt(ce->vm))
1094 			err = 0;
1095 
1096 		return err;
1097 	}
1098 
1099 	err = igt_check_page_sizes(vma);
1100 	if (err)
1101 		goto out_vma_unpin;
1102 
1103 	err = gpu_write(ce, vma, dword, val);
1104 	if (err) {
1105 		pr_err("gpu-write failed at offset=%llx\n", offset);
1106 		goto out_vma_unpin;
1107 	}
1108 
1109 	err = cpu_check(obj, dword, val);
1110 	if (err) {
1111 		pr_err("cpu-check failed at offset=%llx\n", offset);
1112 		goto out_vma_unpin;
1113 	}
1114 
1115 out_vma_unpin:
1116 	i915_vma_unpin(vma);
1117 	return err;
1118 }
1119 
1120 static int igt_write_huge(struct i915_gem_context *ctx,
1121 			  struct drm_i915_gem_object *obj)
1122 {
1123 	struct i915_gem_engines *engines;
1124 	struct i915_gem_engines_iter it;
1125 	struct intel_context *ce;
1126 	I915_RND_STATE(prng);
1127 	IGT_TIMEOUT(end_time);
1128 	unsigned int max_page_size;
1129 	unsigned int count;
1130 	u64 max;
1131 	u64 num;
1132 	u64 size;
1133 	int *order;
1134 	int i, n;
1135 	int err = 0;
1136 
1137 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1138 
1139 	size = obj->base.size;
1140 	if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1141 		size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1142 
1143 	n = 0;
1144 	count = 0;
1145 	max = U64_MAX;
1146 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1147 		count++;
1148 		if (!intel_engine_can_store_dword(ce->engine))
1149 			continue;
1150 
1151 		max = min(max, ce->vm->total);
1152 		n++;
1153 	}
1154 	i915_gem_context_unlock_engines(ctx);
1155 	if (!n)
1156 		return 0;
1157 
1158 	/*
1159 	 * To keep things interesting when alternating between engines in our
1160 	 * randomized order, lets also make feeding to the same engine a few
1161 	 * times in succession a possibility by enlarging the permutation array.
1162 	 */
1163 	order = i915_random_order(count * count, &prng);
1164 	if (!order)
1165 		return -ENOMEM;
1166 
1167 	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1168 	max = div_u64(max - size, max_page_size);
1169 
1170 	/*
1171 	 * Try various offsets in an ascending/descending fashion until we
1172 	 * timeout -- we want to avoid issues hidden by effectively always using
1173 	 * offset = 0.
1174 	 */
1175 	i = 0;
1176 	engines = i915_gem_context_lock_engines(ctx);
1177 	for_each_prime_number_from(num, 0, max) {
1178 		u64 offset_low = num * max_page_size;
1179 		u64 offset_high = (max - num) * max_page_size;
1180 		u32 dword = offset_in_page(num) / 4;
1181 		struct intel_context *ce;
1182 
1183 		ce = engines->engines[order[i] % engines->num_engines];
1184 		i = (i + 1) % (count * count);
1185 		if (!ce || !intel_engine_can_store_dword(ce->engine))
1186 			continue;
1187 
1188 		/*
1189 		 * In order to utilize 64K pages we need to both pad the vma
1190 		 * size and ensure the vma offset is at the start of the pt
1191 		 * boundary, however to improve coverage we opt for testing both
1192 		 * aligned and unaligned offsets.
1193 		 */
1194 		if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1195 			offset_low = round_down(offset_low,
1196 						I915_GTT_PAGE_SIZE_2M);
1197 
1198 		err = __igt_write_huge(ce, obj, size, offset_low,
1199 				       dword, num + 1);
1200 		if (err)
1201 			break;
1202 
1203 		err = __igt_write_huge(ce, obj, size, offset_high,
1204 				       dword, num + 1);
1205 		if (err)
1206 			break;
1207 
1208 		if (igt_timeout(end_time,
1209 				"%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1210 				__func__, ce->engine->name, offset_low, offset_high,
1211 				max_page_size))
1212 			break;
1213 	}
1214 	i915_gem_context_unlock_engines(ctx);
1215 
1216 	kfree(order);
1217 
1218 	return err;
1219 }
1220 
1221 typedef struct drm_i915_gem_object *
1222 (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
1223 
1224 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1225 {
1226 	return i915->mm.gemfs && has_transparent_hugepage();
1227 }
1228 
1229 static struct drm_i915_gem_object *
1230 igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
1231 {
1232 	if (!igt_can_allocate_thp(i915)) {
1233 		pr_info("%s missing THP support, skipping\n", __func__);
1234 		return ERR_PTR(-ENODEV);
1235 	}
1236 
1237 	return i915_gem_object_create_shmem(i915, size);
1238 }
1239 
1240 static struct drm_i915_gem_object *
1241 igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
1242 {
1243 	return i915_gem_object_create_internal(i915, size);
1244 }
1245 
1246 static struct drm_i915_gem_object *
1247 igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
1248 {
1249 	return huge_pages_object(i915, size, size);
1250 }
1251 
1252 static struct drm_i915_gem_object *
1253 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
1254 {
1255 	return i915_gem_object_create_lmem(i915, size, flags);
1256 }
1257 
1258 static u32 igt_random_size(struct rnd_state *prng,
1259 			   u32 min_page_size,
1260 			   u32 max_page_size)
1261 {
1262 	u64 mask;
1263 	u32 size;
1264 
1265 	GEM_BUG_ON(!is_power_of_2(min_page_size));
1266 	GEM_BUG_ON(!is_power_of_2(max_page_size));
1267 	GEM_BUG_ON(min_page_size < PAGE_SIZE);
1268 	GEM_BUG_ON(min_page_size > max_page_size);
1269 
1270 	mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
1271 	size = prandom_u32_state(prng) & mask;
1272 	if (size < min_page_size)
1273 		size |= min_page_size;
1274 
1275 	return size;
1276 }
1277 
1278 static int igt_ppgtt_smoke_huge(void *arg)
1279 {
1280 	struct i915_gem_context *ctx = arg;
1281 	struct drm_i915_private *i915 = ctx->i915;
1282 	struct drm_i915_gem_object *obj;
1283 	I915_RND_STATE(prng);
1284 	struct {
1285 		igt_create_fn fn;
1286 		u32 min;
1287 		u32 max;
1288 	} backends[] = {
1289 		{ igt_create_internal, SZ_64K, SZ_2M,  },
1290 		{ igt_create_shmem,    SZ_64K, SZ_32M, },
1291 		{ igt_create_local,    SZ_64K, SZ_1G,  },
1292 	};
1293 	int err;
1294 	int i;
1295 
1296 	/*
1297 	 * Sanity check that the HW uses huge pages correctly through our
1298 	 * various backends -- ensure that our writes land in the right place.
1299 	 */
1300 
1301 	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1302 		u32 min = backends[i].min;
1303 		u32 max = backends[i].max;
1304 		u32 size = max;
1305 try_again:
1306 		size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
1307 
1308 		obj = backends[i].fn(i915, size, 0);
1309 		if (IS_ERR(obj)) {
1310 			err = PTR_ERR(obj);
1311 			if (err == -E2BIG) {
1312 				size >>= 1;
1313 				goto try_again;
1314 			} else if (err == -ENODEV) {
1315 				err = 0;
1316 				continue;
1317 			}
1318 
1319 			return err;
1320 		}
1321 
1322 		err = i915_gem_object_pin_pages_unlocked(obj);
1323 		if (err) {
1324 			if (err == -ENXIO || err == -E2BIG) {
1325 				i915_gem_object_put(obj);
1326 				size >>= 1;
1327 				goto try_again;
1328 			}
1329 			goto out_put;
1330 		}
1331 
1332 		if (obj->mm.page_sizes.phys < min) {
1333 			pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
1334 				__func__, size, i);
1335 			err = -ENOMEM;
1336 			goto out_unpin;
1337 		}
1338 
1339 		err = igt_write_huge(ctx, obj);
1340 		if (err) {
1341 			pr_err("%s write-huge failed with size=%u, i=%d\n",
1342 			       __func__, size, i);
1343 		}
1344 out_unpin:
1345 		i915_gem_object_lock(obj, NULL);
1346 		i915_gem_object_unpin_pages(obj);
1347 		__i915_gem_object_put_pages(obj);
1348 		i915_gem_object_unlock(obj);
1349 out_put:
1350 		i915_gem_object_put(obj);
1351 
1352 		if (err == -ENOMEM || err == -ENXIO)
1353 			err = 0;
1354 
1355 		if (err)
1356 			break;
1357 
1358 		cond_resched();
1359 	}
1360 
1361 	return err;
1362 }
1363 
1364 static int igt_ppgtt_sanity_check(void *arg)
1365 {
1366 	struct i915_gem_context *ctx = arg;
1367 	struct drm_i915_private *i915 = ctx->i915;
1368 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
1369 	struct {
1370 		igt_create_fn fn;
1371 		unsigned int flags;
1372 	} backends[] = {
1373 		{ igt_create_system, 0,                        },
1374 		{ igt_create_local,  0,                        },
1375 		{ igt_create_local,  I915_BO_ALLOC_CONTIGUOUS, },
1376 	};
1377 	struct {
1378 		u32 size;
1379 		u32 pages;
1380 	} combos[] = {
1381 		{ SZ_64K,		SZ_64K		},
1382 		{ SZ_2M,		SZ_2M		},
1383 		{ SZ_2M,		SZ_64K		},
1384 		{ SZ_2M - SZ_64K,	SZ_64K		},
1385 		{ SZ_2M - SZ_4K,	SZ_64K | SZ_4K	},
1386 		{ SZ_2M + SZ_4K,	SZ_64K | SZ_4K	},
1387 		{ SZ_2M + SZ_4K,	SZ_2M  | SZ_4K	},
1388 		{ SZ_2M + SZ_64K,	SZ_2M  | SZ_64K },
1389 	};
1390 	int i, j;
1391 	int err;
1392 
1393 	if (supported == I915_GTT_PAGE_SIZE_4K)
1394 		return 0;
1395 
1396 	/*
1397 	 * Sanity check that the HW behaves with a limited set of combinations.
1398 	 * We already have a bunch of randomised testing, which should give us
1399 	 * a decent amount of variation between runs, however we should keep
1400 	 * this to limit the chances of introducing a temporary regression, by
1401 	 * testing the most obvious cases that might make something blow up.
1402 	 */
1403 
1404 	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1405 		for (j = 0; j < ARRAY_SIZE(combos); ++j) {
1406 			struct drm_i915_gem_object *obj;
1407 			u32 size = combos[j].size;
1408 			u32 pages = combos[j].pages;
1409 
1410 			obj = backends[i].fn(i915, size, backends[i].flags);
1411 			if (IS_ERR(obj)) {
1412 				err = PTR_ERR(obj);
1413 				if (err == -ENODEV) {
1414 					pr_info("Device lacks local memory, skipping\n");
1415 					err = 0;
1416 					break;
1417 				}
1418 
1419 				return err;
1420 			}
1421 
1422 			err = i915_gem_object_pin_pages_unlocked(obj);
1423 			if (err) {
1424 				i915_gem_object_put(obj);
1425 				goto out;
1426 			}
1427 
1428 			GEM_BUG_ON(pages > obj->base.size);
1429 			pages = pages & supported;
1430 
1431 			if (pages)
1432 				obj->mm.page_sizes.sg = pages;
1433 
1434 			err = igt_write_huge(ctx, obj);
1435 
1436 			i915_gem_object_lock(obj, NULL);
1437 			i915_gem_object_unpin_pages(obj);
1438 			__i915_gem_object_put_pages(obj);
1439 			i915_gem_object_unlock(obj);
1440 			i915_gem_object_put(obj);
1441 
1442 			if (err) {
1443 				pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
1444 				       __func__, size, pages, i, j);
1445 				goto out;
1446 			}
1447 		}
1448 
1449 		cond_resched();
1450 	}
1451 
1452 out:
1453 	if (err == -ENOMEM)
1454 		err = 0;
1455 
1456 	return err;
1457 }
1458 
1459 static int igt_tmpfs_fallback(void *arg)
1460 {
1461 	struct i915_gem_context *ctx = arg;
1462 	struct drm_i915_private *i915 = ctx->i915;
1463 	struct vfsmount *gemfs = i915->mm.gemfs;
1464 	struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx);
1465 	struct drm_i915_gem_object *obj;
1466 	struct i915_vma *vma;
1467 	u32 *vaddr;
1468 	int err = 0;
1469 
1470 	/*
1471 	 * Make sure that we don't burst into a ball of flames upon falling back
1472 	 * to tmpfs, which we rely on if on the off-chance we encouter a failure
1473 	 * when setting up gemfs.
1474 	 */
1475 
1476 	i915->mm.gemfs = NULL;
1477 
1478 	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
1479 	if (IS_ERR(obj)) {
1480 		err = PTR_ERR(obj);
1481 		goto out_restore;
1482 	}
1483 
1484 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
1485 	if (IS_ERR(vaddr)) {
1486 		err = PTR_ERR(vaddr);
1487 		goto out_put;
1488 	}
1489 	*vaddr = 0xdeadbeaf;
1490 
1491 	__i915_gem_object_flush_map(obj, 0, 64);
1492 	i915_gem_object_unpin_map(obj);
1493 
1494 	vma = i915_vma_instance(obj, vm, NULL);
1495 	if (IS_ERR(vma)) {
1496 		err = PTR_ERR(vma);
1497 		goto out_put;
1498 	}
1499 
1500 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
1501 	if (err)
1502 		goto out_put;
1503 
1504 	err = igt_check_page_sizes(vma);
1505 
1506 	i915_vma_unpin(vma);
1507 out_put:
1508 	i915_gem_object_put(obj);
1509 out_restore:
1510 	i915->mm.gemfs = gemfs;
1511 
1512 	i915_vm_put(vm);
1513 	return err;
1514 }
1515 
1516 static int igt_shrink_thp(void *arg)
1517 {
1518 	struct i915_gem_context *ctx = arg;
1519 	struct drm_i915_private *i915 = ctx->i915;
1520 	struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx);
1521 	struct drm_i915_gem_object *obj;
1522 	struct i915_gem_engines_iter it;
1523 	struct intel_context *ce;
1524 	struct i915_vma *vma;
1525 	unsigned int flags = PIN_USER;
1526 	unsigned int n;
1527 	bool should_swap;
1528 	int err = 0;
1529 
1530 	/*
1531 	 * Sanity check shrinking huge-paged object -- make sure nothing blows
1532 	 * up.
1533 	 */
1534 
1535 	if (!igt_can_allocate_thp(i915)) {
1536 		pr_info("missing THP support, skipping\n");
1537 		goto out_vm;
1538 	}
1539 
1540 	obj = i915_gem_object_create_shmem(i915, SZ_2M);
1541 	if (IS_ERR(obj)) {
1542 		err = PTR_ERR(obj);
1543 		goto out_vm;
1544 	}
1545 
1546 	vma = i915_vma_instance(obj, vm, NULL);
1547 	if (IS_ERR(vma)) {
1548 		err = PTR_ERR(vma);
1549 		goto out_put;
1550 	}
1551 
1552 	err = i915_vma_pin(vma, 0, 0, flags);
1553 	if (err)
1554 		goto out_put;
1555 
1556 	if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1557 		pr_info("failed to allocate THP, finishing test early\n");
1558 		goto out_unpin;
1559 	}
1560 
1561 	err = igt_check_page_sizes(vma);
1562 	if (err)
1563 		goto out_unpin;
1564 
1565 	n = 0;
1566 
1567 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1568 		if (!intel_engine_can_store_dword(ce->engine))
1569 			continue;
1570 
1571 		err = gpu_write(ce, vma, n++, 0xdeadbeaf);
1572 		if (err)
1573 			break;
1574 	}
1575 	i915_gem_context_unlock_engines(ctx);
1576 	/*
1577 	 * Nuke everything *before* we unpin the pages so we can be reasonably
1578 	 * sure that when later checking get_nr_swap_pages() that some random
1579 	 * leftover object doesn't steal the remaining swap space.
1580 	 */
1581 	i915_gem_shrink(NULL, i915, -1UL, NULL,
1582 			I915_SHRINK_BOUND |
1583 			I915_SHRINK_UNBOUND |
1584 			I915_SHRINK_ACTIVE);
1585 	i915_vma_unpin(vma);
1586 	if (err)
1587 		goto out_put;
1588 
1589 	/*
1590 	 * Now that the pages are *unpinned* shrinking should invoke
1591 	 * shmem to truncate our pages, if we have available swap.
1592 	 */
1593 	should_swap = get_nr_swap_pages() > 0;
1594 	i915_gem_shrink(NULL, i915, -1UL, NULL,
1595 			I915_SHRINK_BOUND |
1596 			I915_SHRINK_UNBOUND |
1597 			I915_SHRINK_ACTIVE |
1598 			I915_SHRINK_WRITEBACK);
1599 	if (should_swap == i915_gem_object_has_pages(obj)) {
1600 		pr_err("unexpected pages mismatch, should_swap=%s\n",
1601 		       yesno(should_swap));
1602 		err = -EINVAL;
1603 		goto out_put;
1604 	}
1605 
1606 	if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
1607 		pr_err("unexpected residual page-size bits, should_swap=%s\n",
1608 		       yesno(should_swap));
1609 		err = -EINVAL;
1610 		goto out_put;
1611 	}
1612 
1613 	err = i915_vma_pin(vma, 0, 0, flags);
1614 	if (err)
1615 		goto out_put;
1616 
1617 	while (n--) {
1618 		err = cpu_check(obj, n, 0xdeadbeaf);
1619 		if (err)
1620 			break;
1621 	}
1622 
1623 out_unpin:
1624 	i915_vma_unpin(vma);
1625 out_put:
1626 	i915_gem_object_put(obj);
1627 out_vm:
1628 	i915_vm_put(vm);
1629 
1630 	return err;
1631 }
1632 
1633 int i915_gem_huge_page_mock_selftests(void)
1634 {
1635 	static const struct i915_subtest tests[] = {
1636 		SUBTEST(igt_mock_exhaust_device_supported_pages),
1637 		SUBTEST(igt_mock_memory_region_huge_pages),
1638 		SUBTEST(igt_mock_ppgtt_misaligned_dma),
1639 		SUBTEST(igt_mock_ppgtt_huge_fill),
1640 		SUBTEST(igt_mock_ppgtt_64K),
1641 	};
1642 	struct drm_i915_private *dev_priv;
1643 	struct i915_ppgtt *ppgtt;
1644 	int err;
1645 
1646 	dev_priv = mock_gem_device();
1647 	if (!dev_priv)
1648 		return -ENOMEM;
1649 
1650 	/* Pretend to be a device which supports the 48b PPGTT */
1651 	mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
1652 	mkwrite_device_info(dev_priv)->ppgtt_size = 48;
1653 
1654 	ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
1655 	if (IS_ERR(ppgtt)) {
1656 		err = PTR_ERR(ppgtt);
1657 		goto out_unlock;
1658 	}
1659 
1660 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1661 		pr_err("failed to create 48b PPGTT\n");
1662 		err = -EINVAL;
1663 		goto out_put;
1664 	}
1665 
1666 	/* If we were ever hit this then it's time to mock the 64K scratch */
1667 	if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
1668 		pr_err("PPGTT missing 64K scratch page\n");
1669 		err = -EINVAL;
1670 		goto out_put;
1671 	}
1672 
1673 	err = i915_subtests(tests, ppgtt);
1674 
1675 out_put:
1676 	i915_vm_put(&ppgtt->vm);
1677 out_unlock:
1678 	mock_destroy_device(dev_priv);
1679 	return err;
1680 }
1681 
1682 int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
1683 {
1684 	static const struct i915_subtest tests[] = {
1685 		SUBTEST(igt_shrink_thp),
1686 		SUBTEST(igt_tmpfs_fallback),
1687 		SUBTEST(igt_ppgtt_smoke_huge),
1688 		SUBTEST(igt_ppgtt_sanity_check),
1689 	};
1690 	struct i915_gem_context *ctx;
1691 	struct i915_address_space *vm;
1692 	struct file *file;
1693 	int err;
1694 
1695 	if (!HAS_PPGTT(i915)) {
1696 		pr_info("PPGTT not supported, skipping live-selftests\n");
1697 		return 0;
1698 	}
1699 
1700 	if (intel_gt_is_wedged(&i915->gt))
1701 		return 0;
1702 
1703 	file = mock_file(i915);
1704 	if (IS_ERR(file))
1705 		return PTR_ERR(file);
1706 
1707 	ctx = live_context(i915, file);
1708 	if (IS_ERR(ctx)) {
1709 		err = PTR_ERR(ctx);
1710 		goto out_file;
1711 	}
1712 
1713 	vm = ctx->vm;
1714 	if (vm)
1715 		WRITE_ONCE(vm->scrub_64K, true);
1716 
1717 	err = i915_subtests(tests, ctx);
1718 
1719 out_file:
1720 	fput(file);
1721 	return err;
1722 }
1723