1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "i915_selftest.h"
10 
11 #include "gem/i915_gem_region.h"
12 #include "gem/i915_gem_lmem.h"
13 #include "gem/i915_gem_pm.h"
14 
15 #include "gt/intel_gt.h"
16 
17 #include "igt_gem_utils.h"
18 #include "mock_context.h"
19 
20 #include "selftests/mock_drm.h"
21 #include "selftests/mock_gem_device.h"
22 #include "selftests/mock_region.h"
23 #include "selftests/i915_random.h"
24 
25 static struct i915_gem_context *hugepage_ctx(struct drm_i915_private *i915,
26 					     struct file *file)
27 {
28 	struct i915_gem_context *ctx = live_context(i915, file);
29 	struct i915_address_space *vm;
30 
31 	if (IS_ERR(ctx))
32 		return ctx;
33 
34 	vm = ctx->vm;
35 	if (vm)
36 		WRITE_ONCE(vm->scrub_64K, true);
37 
38 	return ctx;
39 }
40 
41 static const unsigned int page_sizes[] = {
42 	I915_GTT_PAGE_SIZE_2M,
43 	I915_GTT_PAGE_SIZE_64K,
44 	I915_GTT_PAGE_SIZE_4K,
45 };
46 
47 static unsigned int get_largest_page_size(struct drm_i915_private *i915,
48 					  u64 rem)
49 {
50 	int i;
51 
52 	for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
53 		unsigned int page_size = page_sizes[i];
54 
55 		if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
56 			return page_size;
57 	}
58 
59 	return 0;
60 }
61 
62 static void huge_pages_free_pages(struct sg_table *st)
63 {
64 	struct scatterlist *sg;
65 
66 	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
67 		if (sg_page(sg))
68 			__free_pages(sg_page(sg), get_order(sg->length));
69 	}
70 
71 	sg_free_table(st);
72 	kfree(st);
73 }
74 
75 static int get_huge_pages(struct drm_i915_gem_object *obj)
76 {
77 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
78 	unsigned int page_mask = obj->mm.page_mask;
79 	struct sg_table *st;
80 	struct scatterlist *sg;
81 	unsigned int sg_page_sizes;
82 	u64 rem;
83 
84 	st = kmalloc(sizeof(*st), GFP);
85 	if (!st)
86 		return -ENOMEM;
87 
88 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
89 		kfree(st);
90 		return -ENOMEM;
91 	}
92 
93 	rem = obj->base.size;
94 	sg = st->sgl;
95 	st->nents = 0;
96 	sg_page_sizes = 0;
97 
98 	/*
99 	 * Our goal here is simple, we want to greedily fill the object from
100 	 * largest to smallest page-size, while ensuring that we use *every*
101 	 * page-size as per the given page-mask.
102 	 */
103 	do {
104 		unsigned int bit = ilog2(page_mask);
105 		unsigned int page_size = BIT(bit);
106 		int order = get_order(page_size);
107 
108 		do {
109 			struct page *page;
110 
111 			GEM_BUG_ON(order >= MAX_ORDER);
112 			page = alloc_pages(GFP | __GFP_ZERO, order);
113 			if (!page)
114 				goto err;
115 
116 			sg_set_page(sg, page, page_size, 0);
117 			sg_page_sizes |= page_size;
118 			st->nents++;
119 
120 			rem -= page_size;
121 			if (!rem) {
122 				sg_mark_end(sg);
123 				break;
124 			}
125 
126 			sg = __sg_next(sg);
127 		} while ((rem - ((page_size-1) & page_mask)) >= page_size);
128 
129 		page_mask &= (page_size-1);
130 	} while (page_mask);
131 
132 	if (i915_gem_gtt_prepare_pages(obj, st))
133 		goto err;
134 
135 	GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
136 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
137 
138 	return 0;
139 
140 err:
141 	sg_set_page(sg, NULL, 0, 0);
142 	sg_mark_end(sg);
143 	huge_pages_free_pages(st);
144 
145 	return -ENOMEM;
146 }
147 
148 static void put_huge_pages(struct drm_i915_gem_object *obj,
149 			   struct sg_table *pages)
150 {
151 	i915_gem_gtt_finish_pages(obj, pages);
152 	huge_pages_free_pages(pages);
153 
154 	obj->mm.dirty = false;
155 
156 	__start_cpu_write(obj);
157 }
158 
159 static const struct drm_i915_gem_object_ops huge_page_ops = {
160 	.name = "huge-gem",
161 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
162 	.get_pages = get_huge_pages,
163 	.put_pages = put_huge_pages,
164 };
165 
166 static struct drm_i915_gem_object *
167 huge_pages_object(struct drm_i915_private *i915,
168 		  u64 size,
169 		  unsigned int page_mask)
170 {
171 	static struct lock_class_key lock_class;
172 	struct drm_i915_gem_object *obj;
173 	unsigned int cache_level;
174 
175 	GEM_BUG_ON(!size);
176 	GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
177 
178 	if (size >> PAGE_SHIFT > INT_MAX)
179 		return ERR_PTR(-E2BIG);
180 
181 	if (overflows_type(size, obj->base.size))
182 		return ERR_PTR(-E2BIG);
183 
184 	obj = i915_gem_object_alloc();
185 	if (!obj)
186 		return ERR_PTR(-ENOMEM);
187 
188 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
189 	i915_gem_object_init(obj, &huge_page_ops, &lock_class, 0);
190 	obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
191 	i915_gem_object_set_volatile(obj);
192 
193 	obj->write_domain = I915_GEM_DOMAIN_CPU;
194 	obj->read_domains = I915_GEM_DOMAIN_CPU;
195 
196 	cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
197 	i915_gem_object_set_cache_coherency(obj, cache_level);
198 
199 	obj->mm.page_mask = page_mask;
200 
201 	return obj;
202 }
203 
204 static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
205 {
206 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
207 	const u64 max_len = rounddown_pow_of_two(UINT_MAX);
208 	struct sg_table *st;
209 	struct scatterlist *sg;
210 	unsigned int sg_page_sizes;
211 	u64 rem;
212 
213 	st = kmalloc(sizeof(*st), GFP);
214 	if (!st)
215 		return -ENOMEM;
216 
217 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
218 		kfree(st);
219 		return -ENOMEM;
220 	}
221 
222 	/* Use optimal page sized chunks to fill in the sg table */
223 	rem = obj->base.size;
224 	sg = st->sgl;
225 	st->nents = 0;
226 	sg_page_sizes = 0;
227 	do {
228 		unsigned int page_size = get_largest_page_size(i915, rem);
229 		unsigned int len = min(page_size * div_u64(rem, page_size),
230 				       max_len);
231 
232 		GEM_BUG_ON(!page_size);
233 
234 		sg->offset = 0;
235 		sg->length = len;
236 		sg_dma_len(sg) = len;
237 		sg_dma_address(sg) = page_size;
238 
239 		sg_page_sizes |= len;
240 
241 		st->nents++;
242 
243 		rem -= len;
244 		if (!rem) {
245 			sg_mark_end(sg);
246 			break;
247 		}
248 
249 		sg = sg_next(sg);
250 	} while (1);
251 
252 	i915_sg_trim(st);
253 
254 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
255 
256 	return 0;
257 }
258 
259 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
260 {
261 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
262 	struct sg_table *st;
263 	struct scatterlist *sg;
264 	unsigned int page_size;
265 
266 	st = kmalloc(sizeof(*st), GFP);
267 	if (!st)
268 		return -ENOMEM;
269 
270 	if (sg_alloc_table(st, 1, GFP)) {
271 		kfree(st);
272 		return -ENOMEM;
273 	}
274 
275 	sg = st->sgl;
276 	st->nents = 1;
277 
278 	page_size = get_largest_page_size(i915, obj->base.size);
279 	GEM_BUG_ON(!page_size);
280 
281 	sg->offset = 0;
282 	sg->length = obj->base.size;
283 	sg_dma_len(sg) = obj->base.size;
284 	sg_dma_address(sg) = page_size;
285 
286 	__i915_gem_object_set_pages(obj, st, sg->length);
287 
288 	return 0;
289 #undef GFP
290 }
291 
292 static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
293 				 struct sg_table *pages)
294 {
295 	sg_free_table(pages);
296 	kfree(pages);
297 }
298 
299 static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
300 				struct sg_table *pages)
301 {
302 	fake_free_huge_pages(obj, pages);
303 	obj->mm.dirty = false;
304 }
305 
306 static const struct drm_i915_gem_object_ops fake_ops = {
307 	.name = "fake-gem",
308 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
309 	.get_pages = fake_get_huge_pages,
310 	.put_pages = fake_put_huge_pages,
311 };
312 
313 static const struct drm_i915_gem_object_ops fake_ops_single = {
314 	.name = "fake-gem",
315 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
316 	.get_pages = fake_get_huge_pages_single,
317 	.put_pages = fake_put_huge_pages,
318 };
319 
320 static struct drm_i915_gem_object *
321 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
322 {
323 	static struct lock_class_key lock_class;
324 	struct drm_i915_gem_object *obj;
325 
326 	GEM_BUG_ON(!size);
327 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
328 
329 	if (size >> PAGE_SHIFT > UINT_MAX)
330 		return ERR_PTR(-E2BIG);
331 
332 	if (overflows_type(size, obj->base.size))
333 		return ERR_PTR(-E2BIG);
334 
335 	obj = i915_gem_object_alloc();
336 	if (!obj)
337 		return ERR_PTR(-ENOMEM);
338 
339 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
340 
341 	if (single)
342 		i915_gem_object_init(obj, &fake_ops_single, &lock_class, 0);
343 	else
344 		i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
345 
346 	i915_gem_object_set_volatile(obj);
347 
348 	obj->write_domain = I915_GEM_DOMAIN_CPU;
349 	obj->read_domains = I915_GEM_DOMAIN_CPU;
350 	obj->cache_level = I915_CACHE_NONE;
351 
352 	return obj;
353 }
354 
355 static int igt_check_page_sizes(struct i915_vma *vma)
356 {
357 	struct drm_i915_private *i915 = vma->vm->i915;
358 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
359 	struct drm_i915_gem_object *obj = vma->obj;
360 	int err;
361 
362 	/* We have to wait for the async bind to complete before our asserts */
363 	err = i915_vma_sync(vma);
364 	if (err)
365 		return err;
366 
367 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
368 		pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
369 		       vma->page_sizes.sg & ~supported, supported);
370 		err = -EINVAL;
371 	}
372 
373 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
374 		pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
375 		       vma->page_sizes.gtt & ~supported, supported);
376 		err = -EINVAL;
377 	}
378 
379 	if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
380 		pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
381 		       vma->page_sizes.phys, obj->mm.page_sizes.phys);
382 		err = -EINVAL;
383 	}
384 
385 	if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
386 		pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
387 		       vma->page_sizes.sg, obj->mm.page_sizes.sg);
388 		err = -EINVAL;
389 	}
390 
391 	/*
392 	 * The dma-api is like a box of chocolates when it comes to the
393 	 * alignment of dma addresses, however for LMEM we have total control
394 	 * and so can guarantee alignment, likewise when we allocate our blocks
395 	 * they should appear in descending order, and if we know that we align
396 	 * to the largest page size for the GTT address, we should be able to
397 	 * assert that if we see 2M physical pages then we should also get 2M
398 	 * GTT pages. If we don't then something might be wrong in our
399 	 * construction of the backing pages.
400 	 *
401 	 * Maintaining alignment is required to utilise huge pages in the ppGGT.
402 	 */
403 	if (i915_gem_object_is_lmem(obj) &&
404 	    IS_ALIGNED(vma->node.start, SZ_2M) &&
405 	    vma->page_sizes.sg & SZ_2M &&
406 	    vma->page_sizes.gtt < SZ_2M) {
407 		pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
408 		       vma->page_sizes.sg, vma->page_sizes.gtt);
409 		err = -EINVAL;
410 	}
411 
412 	if (obj->mm.page_sizes.gtt) {
413 		pr_err("obj->page_sizes.gtt(%u) should never be set\n",
414 		       obj->mm.page_sizes.gtt);
415 		err = -EINVAL;
416 	}
417 
418 	return err;
419 }
420 
421 static int igt_mock_exhaust_device_supported_pages(void *arg)
422 {
423 	struct i915_ppgtt *ppgtt = arg;
424 	struct drm_i915_private *i915 = ppgtt->vm.i915;
425 	unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
426 	struct drm_i915_gem_object *obj;
427 	struct i915_vma *vma;
428 	int i, j, single;
429 	int err;
430 
431 	/*
432 	 * Sanity check creating objects with every valid page support
433 	 * combination for our mock device.
434 	 */
435 
436 	for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
437 		unsigned int combination = SZ_4K; /* Required for ppGTT */
438 
439 		for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
440 			if (i & BIT(j))
441 				combination |= page_sizes[j];
442 		}
443 
444 		mkwrite_device_info(i915)->page_sizes = combination;
445 
446 		for (single = 0; single <= 1; ++single) {
447 			obj = fake_huge_pages_object(i915, combination, !!single);
448 			if (IS_ERR(obj)) {
449 				err = PTR_ERR(obj);
450 				goto out_device;
451 			}
452 
453 			if (obj->base.size != combination) {
454 				pr_err("obj->base.size=%zu, expected=%u\n",
455 				       obj->base.size, combination);
456 				err = -EINVAL;
457 				goto out_put;
458 			}
459 
460 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
461 			if (IS_ERR(vma)) {
462 				err = PTR_ERR(vma);
463 				goto out_put;
464 			}
465 
466 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
467 			if (err)
468 				goto out_put;
469 
470 			err = igt_check_page_sizes(vma);
471 
472 			if (vma->page_sizes.sg != combination) {
473 				pr_err("page_sizes.sg=%u, expected=%u\n",
474 				       vma->page_sizes.sg, combination);
475 				err = -EINVAL;
476 			}
477 
478 			i915_vma_unpin(vma);
479 			i915_gem_object_put(obj);
480 
481 			if (err)
482 				goto out_device;
483 		}
484 	}
485 
486 	goto out_device;
487 
488 out_put:
489 	i915_gem_object_put(obj);
490 out_device:
491 	mkwrite_device_info(i915)->page_sizes = saved_mask;
492 
493 	return err;
494 }
495 
496 static int igt_mock_memory_region_huge_pages(void *arg)
497 {
498 	const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
499 	struct i915_ppgtt *ppgtt = arg;
500 	struct drm_i915_private *i915 = ppgtt->vm.i915;
501 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
502 	struct intel_memory_region *mem;
503 	struct drm_i915_gem_object *obj;
504 	struct i915_vma *vma;
505 	int bit;
506 	int err = 0;
507 
508 	mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
509 	if (IS_ERR(mem)) {
510 		pr_err("%s failed to create memory region\n", __func__);
511 		return PTR_ERR(mem);
512 	}
513 
514 	for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
515 		unsigned int page_size = BIT(bit);
516 		resource_size_t phys;
517 		int i;
518 
519 		for (i = 0; i < ARRAY_SIZE(flags); ++i) {
520 			obj = i915_gem_object_create_region(mem,
521 							    page_size, page_size,
522 							    flags[i]);
523 			if (IS_ERR(obj)) {
524 				err = PTR_ERR(obj);
525 				goto out_region;
526 			}
527 
528 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
529 			if (IS_ERR(vma)) {
530 				err = PTR_ERR(vma);
531 				goto out_put;
532 			}
533 
534 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
535 			if (err)
536 				goto out_put;
537 
538 			err = igt_check_page_sizes(vma);
539 			if (err)
540 				goto out_unpin;
541 
542 			phys = i915_gem_object_get_dma_address(obj, 0);
543 			if (!IS_ALIGNED(phys, page_size)) {
544 				pr_err("%s addr misaligned(%pa) page_size=%u\n",
545 				       __func__, &phys, page_size);
546 				err = -EINVAL;
547 				goto out_unpin;
548 			}
549 
550 			if (vma->page_sizes.gtt != page_size) {
551 				pr_err("%s page_sizes.gtt=%u, expected=%u\n",
552 				       __func__, vma->page_sizes.gtt,
553 				       page_size);
554 				err = -EINVAL;
555 				goto out_unpin;
556 			}
557 
558 			i915_vma_unpin(vma);
559 			__i915_gem_object_put_pages(obj);
560 			i915_gem_object_put(obj);
561 		}
562 	}
563 
564 	goto out_region;
565 
566 out_unpin:
567 	i915_vma_unpin(vma);
568 out_put:
569 	i915_gem_object_put(obj);
570 out_region:
571 	intel_memory_region_destroy(mem);
572 	return err;
573 }
574 
575 static int igt_mock_ppgtt_misaligned_dma(void *arg)
576 {
577 	struct i915_ppgtt *ppgtt = arg;
578 	struct drm_i915_private *i915 = ppgtt->vm.i915;
579 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
580 	struct drm_i915_gem_object *obj;
581 	int bit;
582 	int err;
583 
584 	/*
585 	 * Sanity check dma misalignment for huge pages -- the dma addresses we
586 	 * insert into the paging structures need to always respect the page
587 	 * size alignment.
588 	 */
589 
590 	bit = ilog2(I915_GTT_PAGE_SIZE_64K);
591 
592 	for_each_set_bit_from(bit, &supported,
593 			      ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
594 		IGT_TIMEOUT(end_time);
595 		unsigned int page_size = BIT(bit);
596 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
597 		unsigned int offset;
598 		unsigned int size =
599 			round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
600 		struct i915_vma *vma;
601 
602 		obj = fake_huge_pages_object(i915, size, true);
603 		if (IS_ERR(obj))
604 			return PTR_ERR(obj);
605 
606 		if (obj->base.size != size) {
607 			pr_err("obj->base.size=%zu, expected=%u\n",
608 			       obj->base.size, size);
609 			err = -EINVAL;
610 			goto out_put;
611 		}
612 
613 		err = i915_gem_object_pin_pages_unlocked(obj);
614 		if (err)
615 			goto out_put;
616 
617 		/* Force the page size for this object */
618 		obj->mm.page_sizes.sg = page_size;
619 
620 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
621 		if (IS_ERR(vma)) {
622 			err = PTR_ERR(vma);
623 			goto out_unpin;
624 		}
625 
626 		err = i915_vma_pin(vma, 0, 0, flags);
627 		if (err)
628 			goto out_unpin;
629 
630 
631 		err = igt_check_page_sizes(vma);
632 
633 		if (vma->page_sizes.gtt != page_size) {
634 			pr_err("page_sizes.gtt=%u, expected %u\n",
635 			       vma->page_sizes.gtt, page_size);
636 			err = -EINVAL;
637 		}
638 
639 		i915_vma_unpin(vma);
640 
641 		if (err)
642 			goto out_unpin;
643 
644 		/*
645 		 * Try all the other valid offsets until the next
646 		 * boundary -- should always fall back to using 4K
647 		 * pages.
648 		 */
649 		for (offset = 4096; offset < page_size; offset += 4096) {
650 			err = i915_vma_unbind(vma);
651 			if (err)
652 				goto out_unpin;
653 
654 			err = i915_vma_pin(vma, 0, 0, flags | offset);
655 			if (err)
656 				goto out_unpin;
657 
658 			err = igt_check_page_sizes(vma);
659 
660 			if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
661 				pr_err("page_sizes.gtt=%u, expected %llu\n",
662 				       vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
663 				err = -EINVAL;
664 			}
665 
666 			i915_vma_unpin(vma);
667 
668 			if (err)
669 				goto out_unpin;
670 
671 			if (igt_timeout(end_time,
672 					"%s timed out at offset %x with page-size %x\n",
673 					__func__, offset, page_size))
674 				break;
675 		}
676 
677 		i915_gem_object_lock(obj, NULL);
678 		i915_gem_object_unpin_pages(obj);
679 		__i915_gem_object_put_pages(obj);
680 		i915_gem_object_unlock(obj);
681 		i915_gem_object_put(obj);
682 	}
683 
684 	return 0;
685 
686 out_unpin:
687 	i915_gem_object_lock(obj, NULL);
688 	i915_gem_object_unpin_pages(obj);
689 	i915_gem_object_unlock(obj);
690 out_put:
691 	i915_gem_object_put(obj);
692 
693 	return err;
694 }
695 
696 static void close_object_list(struct list_head *objects,
697 			      struct i915_ppgtt *ppgtt)
698 {
699 	struct drm_i915_gem_object *obj, *on;
700 
701 	list_for_each_entry_safe(obj, on, objects, st_link) {
702 		list_del(&obj->st_link);
703 		i915_gem_object_lock(obj, NULL);
704 		i915_gem_object_unpin_pages(obj);
705 		__i915_gem_object_put_pages(obj);
706 		i915_gem_object_unlock(obj);
707 		i915_gem_object_put(obj);
708 	}
709 }
710 
711 static int igt_mock_ppgtt_huge_fill(void *arg)
712 {
713 	struct i915_ppgtt *ppgtt = arg;
714 	struct drm_i915_private *i915 = ppgtt->vm.i915;
715 	unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
716 	unsigned long page_num;
717 	bool single = false;
718 	LIST_HEAD(objects);
719 	IGT_TIMEOUT(end_time);
720 	int err = -ENODEV;
721 
722 	for_each_prime_number_from(page_num, 1, max_pages) {
723 		struct drm_i915_gem_object *obj;
724 		u64 size = page_num << PAGE_SHIFT;
725 		struct i915_vma *vma;
726 		unsigned int expected_gtt = 0;
727 		int i;
728 
729 		obj = fake_huge_pages_object(i915, size, single);
730 		if (IS_ERR(obj)) {
731 			err = PTR_ERR(obj);
732 			break;
733 		}
734 
735 		if (obj->base.size != size) {
736 			pr_err("obj->base.size=%zd, expected=%llu\n",
737 			       obj->base.size, size);
738 			i915_gem_object_put(obj);
739 			err = -EINVAL;
740 			break;
741 		}
742 
743 		err = i915_gem_object_pin_pages_unlocked(obj);
744 		if (err) {
745 			i915_gem_object_put(obj);
746 			break;
747 		}
748 
749 		list_add(&obj->st_link, &objects);
750 
751 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
752 		if (IS_ERR(vma)) {
753 			err = PTR_ERR(vma);
754 			break;
755 		}
756 
757 		err = i915_vma_pin(vma, 0, 0, PIN_USER);
758 		if (err)
759 			break;
760 
761 		err = igt_check_page_sizes(vma);
762 		if (err) {
763 			i915_vma_unpin(vma);
764 			break;
765 		}
766 
767 		/*
768 		 * Figure out the expected gtt page size knowing that we go from
769 		 * largest to smallest page size sg chunks, and that we align to
770 		 * the largest page size.
771 		 */
772 		for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
773 			unsigned int page_size = page_sizes[i];
774 
775 			if (HAS_PAGE_SIZES(i915, page_size) &&
776 			    size >= page_size) {
777 				expected_gtt |= page_size;
778 				size &= page_size-1;
779 			}
780 		}
781 
782 		GEM_BUG_ON(!expected_gtt);
783 		GEM_BUG_ON(size);
784 
785 		if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
786 			expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
787 
788 		i915_vma_unpin(vma);
789 
790 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
791 			if (!IS_ALIGNED(vma->node.start,
792 					I915_GTT_PAGE_SIZE_2M)) {
793 				pr_err("node.start(%llx) not aligned to 2M\n",
794 				       vma->node.start);
795 				err = -EINVAL;
796 				break;
797 			}
798 
799 			if (!IS_ALIGNED(vma->node.size,
800 					I915_GTT_PAGE_SIZE_2M)) {
801 				pr_err("node.size(%llx) not aligned to 2M\n",
802 				       vma->node.size);
803 				err = -EINVAL;
804 				break;
805 			}
806 		}
807 
808 		if (vma->page_sizes.gtt != expected_gtt) {
809 			pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
810 			       vma->page_sizes.gtt, expected_gtt,
811 			       obj->base.size, yesno(!!single));
812 			err = -EINVAL;
813 			break;
814 		}
815 
816 		if (igt_timeout(end_time,
817 				"%s timed out at size %zd\n",
818 				__func__, obj->base.size))
819 			break;
820 
821 		single = !single;
822 	}
823 
824 	close_object_list(&objects, ppgtt);
825 
826 	if (err == -ENOMEM || err == -ENOSPC)
827 		err = 0;
828 
829 	return err;
830 }
831 
832 static int igt_mock_ppgtt_64K(void *arg)
833 {
834 	struct i915_ppgtt *ppgtt = arg;
835 	struct drm_i915_private *i915 = ppgtt->vm.i915;
836 	struct drm_i915_gem_object *obj;
837 	const struct object_info {
838 		unsigned int size;
839 		unsigned int gtt;
840 		unsigned int offset;
841 	} objects[] = {
842 		/* Cases with forced padding/alignment */
843 		{
844 			.size = SZ_64K,
845 			.gtt = I915_GTT_PAGE_SIZE_64K,
846 			.offset = 0,
847 		},
848 		{
849 			.size = SZ_64K + SZ_4K,
850 			.gtt = I915_GTT_PAGE_SIZE_4K,
851 			.offset = 0,
852 		},
853 		{
854 			.size = SZ_64K - SZ_4K,
855 			.gtt = I915_GTT_PAGE_SIZE_4K,
856 			.offset = 0,
857 		},
858 		{
859 			.size = SZ_2M,
860 			.gtt = I915_GTT_PAGE_SIZE_64K,
861 			.offset = 0,
862 		},
863 		{
864 			.size = SZ_2M - SZ_4K,
865 			.gtt = I915_GTT_PAGE_SIZE_4K,
866 			.offset = 0,
867 		},
868 		{
869 			.size = SZ_2M + SZ_4K,
870 			.gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
871 			.offset = 0,
872 		},
873 		{
874 			.size = SZ_2M + SZ_64K,
875 			.gtt = I915_GTT_PAGE_SIZE_64K,
876 			.offset = 0,
877 		},
878 		{
879 			.size = SZ_2M - SZ_64K,
880 			.gtt = I915_GTT_PAGE_SIZE_64K,
881 			.offset = 0,
882 		},
883 		/* Try without any forced padding/alignment */
884 		{
885 			.size = SZ_64K,
886 			.offset = SZ_2M,
887 			.gtt = I915_GTT_PAGE_SIZE_4K,
888 		},
889 		{
890 			.size = SZ_128K,
891 			.offset = SZ_2M - SZ_64K,
892 			.gtt = I915_GTT_PAGE_SIZE_4K,
893 		},
894 	};
895 	struct i915_vma *vma;
896 	int i, single;
897 	int err;
898 
899 	/*
900 	 * Sanity check some of the trickiness with 64K pages -- either we can
901 	 * safely mark the whole page-table(2M block) as 64K, or we have to
902 	 * always fallback to 4K.
903 	 */
904 
905 	if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
906 		return 0;
907 
908 	for (i = 0; i < ARRAY_SIZE(objects); ++i) {
909 		unsigned int size = objects[i].size;
910 		unsigned int expected_gtt = objects[i].gtt;
911 		unsigned int offset = objects[i].offset;
912 		unsigned int flags = PIN_USER;
913 
914 		for (single = 0; single <= 1; single++) {
915 			obj = fake_huge_pages_object(i915, size, !!single);
916 			if (IS_ERR(obj))
917 				return PTR_ERR(obj);
918 
919 			err = i915_gem_object_pin_pages_unlocked(obj);
920 			if (err)
921 				goto out_object_put;
922 
923 			/*
924 			 * Disable 2M pages -- We only want to use 64K/4K pages
925 			 * for this test.
926 			 */
927 			obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
928 
929 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
930 			if (IS_ERR(vma)) {
931 				err = PTR_ERR(vma);
932 				goto out_object_unpin;
933 			}
934 
935 			if (offset)
936 				flags |= PIN_OFFSET_FIXED | offset;
937 
938 			err = i915_vma_pin(vma, 0, 0, flags);
939 			if (err)
940 				goto out_object_unpin;
941 
942 			err = igt_check_page_sizes(vma);
943 			if (err)
944 				goto out_vma_unpin;
945 
946 			if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
947 				if (!IS_ALIGNED(vma->node.start,
948 						I915_GTT_PAGE_SIZE_2M)) {
949 					pr_err("node.start(%llx) not aligned to 2M\n",
950 					       vma->node.start);
951 					err = -EINVAL;
952 					goto out_vma_unpin;
953 				}
954 
955 				if (!IS_ALIGNED(vma->node.size,
956 						I915_GTT_PAGE_SIZE_2M)) {
957 					pr_err("node.size(%llx) not aligned to 2M\n",
958 					       vma->node.size);
959 					err = -EINVAL;
960 					goto out_vma_unpin;
961 				}
962 			}
963 
964 			if (vma->page_sizes.gtt != expected_gtt) {
965 				pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
966 				       vma->page_sizes.gtt, expected_gtt, i,
967 				       yesno(!!single));
968 				err = -EINVAL;
969 				goto out_vma_unpin;
970 			}
971 
972 			i915_vma_unpin(vma);
973 			i915_gem_object_lock(obj, NULL);
974 			i915_gem_object_unpin_pages(obj);
975 			__i915_gem_object_put_pages(obj);
976 			i915_gem_object_unlock(obj);
977 			i915_gem_object_put(obj);
978 
979 			i915_gem_drain_freed_objects(i915);
980 		}
981 	}
982 
983 	return 0;
984 
985 out_vma_unpin:
986 	i915_vma_unpin(vma);
987 out_object_unpin:
988 	i915_gem_object_lock(obj, NULL);
989 	i915_gem_object_unpin_pages(obj);
990 	i915_gem_object_unlock(obj);
991 out_object_put:
992 	i915_gem_object_put(obj);
993 
994 	return err;
995 }
996 
997 static int gpu_write(struct intel_context *ce,
998 		     struct i915_vma *vma,
999 		     u32 dw,
1000 		     u32 val)
1001 {
1002 	int err;
1003 
1004 	i915_gem_object_lock(vma->obj, NULL);
1005 	err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1006 	i915_gem_object_unlock(vma->obj);
1007 	if (err)
1008 		return err;
1009 
1010 	return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
1011 			       vma->size >> PAGE_SHIFT, val);
1012 }
1013 
1014 static int
1015 __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1016 {
1017 	unsigned int needs_flush;
1018 	unsigned long n;
1019 	int err;
1020 
1021 	i915_gem_object_lock(obj, NULL);
1022 	err = i915_gem_object_prepare_read(obj, &needs_flush);
1023 	if (err)
1024 		goto err_unlock;
1025 
1026 	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
1027 		u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
1028 
1029 		if (needs_flush & CLFLUSH_BEFORE)
1030 			drm_clflush_virt_range(ptr, PAGE_SIZE);
1031 
1032 		if (ptr[dword] != val) {
1033 			pr_err("n=%lu ptr[%u]=%u, val=%u\n",
1034 			       n, dword, ptr[dword], val);
1035 			kunmap_atomic(ptr);
1036 			err = -EINVAL;
1037 			break;
1038 		}
1039 
1040 		kunmap_atomic(ptr);
1041 	}
1042 
1043 	i915_gem_object_finish_access(obj);
1044 err_unlock:
1045 	i915_gem_object_unlock(obj);
1046 
1047 	return err;
1048 }
1049 
1050 static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1051 {
1052 	unsigned long n = obj->base.size >> PAGE_SHIFT;
1053 	u32 *ptr;
1054 	int err;
1055 
1056 	err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
1057 	if (err)
1058 		return err;
1059 
1060 	ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1061 	if (IS_ERR(ptr))
1062 		return PTR_ERR(ptr);
1063 
1064 	ptr += dword;
1065 	while (n--) {
1066 		if (*ptr != val) {
1067 			pr_err("base[%u]=%08x, val=%08x\n",
1068 			       dword, *ptr, val);
1069 			err = -EINVAL;
1070 			break;
1071 		}
1072 
1073 		ptr += PAGE_SIZE / sizeof(*ptr);
1074 	}
1075 
1076 	i915_gem_object_unpin_map(obj);
1077 	return err;
1078 }
1079 
1080 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1081 {
1082 	if (i915_gem_object_has_struct_page(obj))
1083 		return __cpu_check_shmem(obj, dword, val);
1084 	else
1085 		return __cpu_check_vmap(obj, dword, val);
1086 }
1087 
1088 static int __igt_write_huge(struct intel_context *ce,
1089 			    struct drm_i915_gem_object *obj,
1090 			    u64 size, u64 offset,
1091 			    u32 dword, u32 val)
1092 {
1093 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1094 	struct i915_vma *vma;
1095 	int err;
1096 
1097 	vma = i915_vma_instance(obj, ce->vm, NULL);
1098 	if (IS_ERR(vma))
1099 		return PTR_ERR(vma);
1100 
1101 	err = i915_vma_pin(vma, size, 0, flags | offset);
1102 	if (err) {
1103 		/*
1104 		 * The ggtt may have some pages reserved so
1105 		 * refrain from erroring out.
1106 		 */
1107 		if (err == -ENOSPC && i915_is_ggtt(ce->vm))
1108 			err = 0;
1109 
1110 		return err;
1111 	}
1112 
1113 	err = igt_check_page_sizes(vma);
1114 	if (err)
1115 		goto out_vma_unpin;
1116 
1117 	err = gpu_write(ce, vma, dword, val);
1118 	if (err) {
1119 		pr_err("gpu-write failed at offset=%llx\n", offset);
1120 		goto out_vma_unpin;
1121 	}
1122 
1123 	err = cpu_check(obj, dword, val);
1124 	if (err) {
1125 		pr_err("cpu-check failed at offset=%llx\n", offset);
1126 		goto out_vma_unpin;
1127 	}
1128 
1129 out_vma_unpin:
1130 	i915_vma_unpin(vma);
1131 	return err;
1132 }
1133 
1134 static int igt_write_huge(struct drm_i915_private *i915,
1135 			  struct drm_i915_gem_object *obj)
1136 {
1137 	struct i915_gem_engines *engines;
1138 	struct i915_gem_engines_iter it;
1139 	struct intel_context *ce;
1140 	I915_RND_STATE(prng);
1141 	IGT_TIMEOUT(end_time);
1142 	unsigned int max_page_size;
1143 	unsigned int count;
1144 	struct i915_gem_context *ctx;
1145 	struct file *file;
1146 	u64 max;
1147 	u64 num;
1148 	u64 size;
1149 	int *order;
1150 	int i, n;
1151 	int err = 0;
1152 
1153 	file = mock_file(i915);
1154 	if (IS_ERR(file))
1155 		return PTR_ERR(file);
1156 
1157 	ctx = hugepage_ctx(i915, file);
1158 	if (IS_ERR(ctx)) {
1159 		err = PTR_ERR(ctx);
1160 		goto out;
1161 	}
1162 
1163 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1164 
1165 	size = obj->base.size;
1166 	if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1167 		size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1168 
1169 	n = 0;
1170 	count = 0;
1171 	max = U64_MAX;
1172 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1173 		count++;
1174 		if (!intel_engine_can_store_dword(ce->engine))
1175 			continue;
1176 
1177 		max = min(max, ce->vm->total);
1178 		n++;
1179 	}
1180 	i915_gem_context_unlock_engines(ctx);
1181 	if (!n)
1182 		goto out;
1183 
1184 	/*
1185 	 * To keep things interesting when alternating between engines in our
1186 	 * randomized order, lets also make feeding to the same engine a few
1187 	 * times in succession a possibility by enlarging the permutation array.
1188 	 */
1189 	order = i915_random_order(count * count, &prng);
1190 	if (!order)
1191 		return -ENOMEM;
1192 
1193 	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1194 	max = div_u64(max - size, max_page_size);
1195 
1196 	/*
1197 	 * Try various offsets in an ascending/descending fashion until we
1198 	 * timeout -- we want to avoid issues hidden by effectively always using
1199 	 * offset = 0.
1200 	 */
1201 	i = 0;
1202 	engines = i915_gem_context_lock_engines(ctx);
1203 	for_each_prime_number_from(num, 0, max) {
1204 		u64 offset_low = num * max_page_size;
1205 		u64 offset_high = (max - num) * max_page_size;
1206 		u32 dword = offset_in_page(num) / 4;
1207 		struct intel_context *ce;
1208 
1209 		ce = engines->engines[order[i] % engines->num_engines];
1210 		i = (i + 1) % (count * count);
1211 		if (!ce || !intel_engine_can_store_dword(ce->engine))
1212 			continue;
1213 
1214 		/*
1215 		 * In order to utilize 64K pages we need to both pad the vma
1216 		 * size and ensure the vma offset is at the start of the pt
1217 		 * boundary, however to improve coverage we opt for testing both
1218 		 * aligned and unaligned offsets.
1219 		 */
1220 		if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1221 			offset_low = round_down(offset_low,
1222 						I915_GTT_PAGE_SIZE_2M);
1223 
1224 		err = __igt_write_huge(ce, obj, size, offset_low,
1225 				       dword, num + 1);
1226 		if (err)
1227 			break;
1228 
1229 		err = __igt_write_huge(ce, obj, size, offset_high,
1230 				       dword, num + 1);
1231 		if (err)
1232 			break;
1233 
1234 		if (igt_timeout(end_time,
1235 				"%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1236 				__func__, ce->engine->name, offset_low, offset_high,
1237 				max_page_size))
1238 			break;
1239 	}
1240 	i915_gem_context_unlock_engines(ctx);
1241 
1242 	kfree(order);
1243 
1244 out:
1245 	fput(file);
1246 	return err;
1247 }
1248 
1249 typedef struct drm_i915_gem_object *
1250 (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
1251 
1252 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1253 {
1254 	return i915->mm.gemfs && has_transparent_hugepage();
1255 }
1256 
1257 static struct drm_i915_gem_object *
1258 igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
1259 {
1260 	if (!igt_can_allocate_thp(i915)) {
1261 		pr_info("%s missing THP support, skipping\n", __func__);
1262 		return ERR_PTR(-ENODEV);
1263 	}
1264 
1265 	return i915_gem_object_create_shmem(i915, size);
1266 }
1267 
1268 static struct drm_i915_gem_object *
1269 igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
1270 {
1271 	return i915_gem_object_create_internal(i915, size);
1272 }
1273 
1274 static struct drm_i915_gem_object *
1275 igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
1276 {
1277 	return huge_pages_object(i915, size, size);
1278 }
1279 
1280 static struct drm_i915_gem_object *
1281 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
1282 {
1283 	return i915_gem_object_create_lmem(i915, size, flags);
1284 }
1285 
1286 static u32 igt_random_size(struct rnd_state *prng,
1287 			   u32 min_page_size,
1288 			   u32 max_page_size)
1289 {
1290 	u64 mask;
1291 	u32 size;
1292 
1293 	GEM_BUG_ON(!is_power_of_2(min_page_size));
1294 	GEM_BUG_ON(!is_power_of_2(max_page_size));
1295 	GEM_BUG_ON(min_page_size < PAGE_SIZE);
1296 	GEM_BUG_ON(min_page_size > max_page_size);
1297 
1298 	mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
1299 	size = prandom_u32_state(prng) & mask;
1300 	if (size < min_page_size)
1301 		size |= min_page_size;
1302 
1303 	return size;
1304 }
1305 
1306 static int igt_ppgtt_smoke_huge(void *arg)
1307 {
1308 	struct drm_i915_private *i915 = arg;
1309 	struct drm_i915_gem_object *obj;
1310 	I915_RND_STATE(prng);
1311 	struct {
1312 		igt_create_fn fn;
1313 		u32 min;
1314 		u32 max;
1315 	} backends[] = {
1316 		{ igt_create_internal, SZ_64K, SZ_2M,  },
1317 		{ igt_create_shmem,    SZ_64K, SZ_32M, },
1318 		{ igt_create_local,    SZ_64K, SZ_1G,  },
1319 	};
1320 	int err;
1321 	int i;
1322 
1323 	/*
1324 	 * Sanity check that the HW uses huge pages correctly through our
1325 	 * various backends -- ensure that our writes land in the right place.
1326 	 */
1327 
1328 	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1329 		u32 min = backends[i].min;
1330 		u32 max = backends[i].max;
1331 		u32 size = max;
1332 
1333 try_again:
1334 		size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
1335 
1336 		obj = backends[i].fn(i915, size, 0);
1337 		if (IS_ERR(obj)) {
1338 			err = PTR_ERR(obj);
1339 			if (err == -E2BIG) {
1340 				size >>= 1;
1341 				goto try_again;
1342 			} else if (err == -ENODEV) {
1343 				err = 0;
1344 				continue;
1345 			}
1346 
1347 			return err;
1348 		}
1349 
1350 		err = i915_gem_object_pin_pages_unlocked(obj);
1351 		if (err) {
1352 			if (err == -ENXIO || err == -E2BIG) {
1353 				i915_gem_object_put(obj);
1354 				size >>= 1;
1355 				goto try_again;
1356 			}
1357 			goto out_put;
1358 		}
1359 
1360 		if (obj->mm.page_sizes.phys < min) {
1361 			pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
1362 				__func__, size, i);
1363 			err = -ENOMEM;
1364 			goto out_unpin;
1365 		}
1366 
1367 		err = igt_write_huge(i915, obj);
1368 		if (err) {
1369 			pr_err("%s write-huge failed with size=%u, i=%d\n",
1370 			       __func__, size, i);
1371 		}
1372 out_unpin:
1373 		i915_gem_object_lock(obj, NULL);
1374 		i915_gem_object_unpin_pages(obj);
1375 		__i915_gem_object_put_pages(obj);
1376 		i915_gem_object_unlock(obj);
1377 out_put:
1378 		i915_gem_object_put(obj);
1379 
1380 		if (err == -ENOMEM || err == -ENXIO)
1381 			err = 0;
1382 
1383 		if (err)
1384 			break;
1385 
1386 		cond_resched();
1387 	}
1388 
1389 	return err;
1390 }
1391 
1392 static int igt_ppgtt_sanity_check(void *arg)
1393 {
1394 	struct drm_i915_private *i915 = arg;
1395 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
1396 	struct {
1397 		igt_create_fn fn;
1398 		unsigned int flags;
1399 	} backends[] = {
1400 		{ igt_create_system, 0,                        },
1401 		{ igt_create_local,  0,                        },
1402 		{ igt_create_local,  I915_BO_ALLOC_CONTIGUOUS, },
1403 	};
1404 	struct {
1405 		u32 size;
1406 		u32 pages;
1407 	} combos[] = {
1408 		{ SZ_64K,		SZ_64K		},
1409 		{ SZ_2M,		SZ_2M		},
1410 		{ SZ_2M,		SZ_64K		},
1411 		{ SZ_2M - SZ_64K,	SZ_64K		},
1412 		{ SZ_2M - SZ_4K,	SZ_64K | SZ_4K	},
1413 		{ SZ_2M + SZ_4K,	SZ_64K | SZ_4K	},
1414 		{ SZ_2M + SZ_4K,	SZ_2M  | SZ_4K	},
1415 		{ SZ_2M + SZ_64K,	SZ_2M  | SZ_64K },
1416 	};
1417 	int i, j;
1418 	int err;
1419 
1420 	if (supported == I915_GTT_PAGE_SIZE_4K)
1421 		return 0;
1422 
1423 	/*
1424 	 * Sanity check that the HW behaves with a limited set of combinations.
1425 	 * We already have a bunch of randomised testing, which should give us
1426 	 * a decent amount of variation between runs, however we should keep
1427 	 * this to limit the chances of introducing a temporary regression, by
1428 	 * testing the most obvious cases that might make something blow up.
1429 	 */
1430 
1431 	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1432 		for (j = 0; j < ARRAY_SIZE(combos); ++j) {
1433 			struct drm_i915_gem_object *obj;
1434 			u32 size = combos[j].size;
1435 			u32 pages = combos[j].pages;
1436 
1437 			obj = backends[i].fn(i915, size, backends[i].flags);
1438 			if (IS_ERR(obj)) {
1439 				err = PTR_ERR(obj);
1440 				if (err == -ENODEV) {
1441 					pr_info("Device lacks local memory, skipping\n");
1442 					err = 0;
1443 					break;
1444 				}
1445 
1446 				return err;
1447 			}
1448 
1449 			err = i915_gem_object_pin_pages_unlocked(obj);
1450 			if (err) {
1451 				i915_gem_object_put(obj);
1452 				goto out;
1453 			}
1454 
1455 			GEM_BUG_ON(pages > obj->base.size);
1456 			pages = pages & supported;
1457 
1458 			if (pages)
1459 				obj->mm.page_sizes.sg = pages;
1460 
1461 			err = igt_write_huge(i915, obj);
1462 
1463 			i915_gem_object_lock(obj, NULL);
1464 			i915_gem_object_unpin_pages(obj);
1465 			__i915_gem_object_put_pages(obj);
1466 			i915_gem_object_unlock(obj);
1467 			i915_gem_object_put(obj);
1468 
1469 			if (err) {
1470 				pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
1471 				       __func__, size, pages, i, j);
1472 				goto out;
1473 			}
1474 		}
1475 
1476 		cond_resched();
1477 	}
1478 
1479 out:
1480 	if (err == -ENOMEM)
1481 		err = 0;
1482 
1483 	return err;
1484 }
1485 
1486 static int igt_tmpfs_fallback(void *arg)
1487 {
1488 	struct drm_i915_private *i915 = arg;
1489 	struct i915_address_space *vm;
1490 	struct i915_gem_context *ctx;
1491 	struct vfsmount *gemfs = i915->mm.gemfs;
1492 	struct drm_i915_gem_object *obj;
1493 	struct i915_vma *vma;
1494 	struct file *file;
1495 	u32 *vaddr;
1496 	int err = 0;
1497 
1498 	file = mock_file(i915);
1499 	if (IS_ERR(file))
1500 		return PTR_ERR(file);
1501 
1502 	ctx = hugepage_ctx(i915, file);
1503 	if (IS_ERR(ctx)) {
1504 		err = PTR_ERR(ctx);
1505 		goto out;
1506 	}
1507 	vm = i915_gem_context_get_eb_vm(ctx);
1508 
1509 	/*
1510 	 * Make sure that we don't burst into a ball of flames upon falling back
1511 	 * to tmpfs, which we rely on if on the off-chance we encouter a failure
1512 	 * when setting up gemfs.
1513 	 */
1514 
1515 	i915->mm.gemfs = NULL;
1516 
1517 	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
1518 	if (IS_ERR(obj)) {
1519 		err = PTR_ERR(obj);
1520 		goto out_restore;
1521 	}
1522 
1523 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
1524 	if (IS_ERR(vaddr)) {
1525 		err = PTR_ERR(vaddr);
1526 		goto out_put;
1527 	}
1528 	*vaddr = 0xdeadbeaf;
1529 
1530 	__i915_gem_object_flush_map(obj, 0, 64);
1531 	i915_gem_object_unpin_map(obj);
1532 
1533 	vma = i915_vma_instance(obj, vm, NULL);
1534 	if (IS_ERR(vma)) {
1535 		err = PTR_ERR(vma);
1536 		goto out_put;
1537 	}
1538 
1539 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
1540 	if (err)
1541 		goto out_put;
1542 
1543 	err = igt_check_page_sizes(vma);
1544 
1545 	i915_vma_unpin(vma);
1546 out_put:
1547 	i915_gem_object_put(obj);
1548 out_restore:
1549 	i915->mm.gemfs = gemfs;
1550 
1551 	i915_vm_put(vm);
1552 out:
1553 	fput(file);
1554 	return err;
1555 }
1556 
1557 static int igt_shrink_thp(void *arg)
1558 {
1559 	struct drm_i915_private *i915 = arg;
1560 	struct i915_address_space *vm;
1561 	struct i915_gem_context *ctx;
1562 	struct drm_i915_gem_object *obj;
1563 	struct i915_gem_engines_iter it;
1564 	struct intel_context *ce;
1565 	struct i915_vma *vma;
1566 	struct file *file;
1567 	unsigned int flags = PIN_USER;
1568 	unsigned int n;
1569 	bool should_swap;
1570 	int err;
1571 
1572 	if (!igt_can_allocate_thp(i915)) {
1573 		pr_info("missing THP support, skipping\n");
1574 		return 0;
1575 	}
1576 
1577 	file = mock_file(i915);
1578 	if (IS_ERR(file))
1579 		return PTR_ERR(file);
1580 
1581 	ctx = hugepage_ctx(i915, file);
1582 	if (IS_ERR(ctx)) {
1583 		err = PTR_ERR(ctx);
1584 		goto out;
1585 	}
1586 	vm = i915_gem_context_get_eb_vm(ctx);
1587 
1588 	/*
1589 	 * Sanity check shrinking huge-paged object -- make sure nothing blows
1590 	 * up.
1591 	 */
1592 
1593 	obj = i915_gem_object_create_shmem(i915, SZ_2M);
1594 	if (IS_ERR(obj)) {
1595 		err = PTR_ERR(obj);
1596 		goto out_vm;
1597 	}
1598 
1599 	vma = i915_vma_instance(obj, vm, NULL);
1600 	if (IS_ERR(vma)) {
1601 		err = PTR_ERR(vma);
1602 		goto out_put;
1603 	}
1604 
1605 	err = i915_vma_pin(vma, 0, 0, flags);
1606 	if (err)
1607 		goto out_put;
1608 
1609 	if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1610 		pr_info("failed to allocate THP, finishing test early\n");
1611 		goto out_unpin;
1612 	}
1613 
1614 	err = igt_check_page_sizes(vma);
1615 	if (err)
1616 		goto out_unpin;
1617 
1618 	n = 0;
1619 
1620 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1621 		if (!intel_engine_can_store_dword(ce->engine))
1622 			continue;
1623 
1624 		err = gpu_write(ce, vma, n++, 0xdeadbeaf);
1625 		if (err)
1626 			break;
1627 	}
1628 	i915_gem_context_unlock_engines(ctx);
1629 	/*
1630 	 * Nuke everything *before* we unpin the pages so we can be reasonably
1631 	 * sure that when later checking get_nr_swap_pages() that some random
1632 	 * leftover object doesn't steal the remaining swap space.
1633 	 */
1634 	i915_gem_shrink(NULL, i915, -1UL, NULL,
1635 			I915_SHRINK_BOUND |
1636 			I915_SHRINK_UNBOUND |
1637 			I915_SHRINK_ACTIVE);
1638 	i915_vma_unpin(vma);
1639 	if (err)
1640 		goto out_put;
1641 
1642 	/*
1643 	 * Now that the pages are *unpinned* shrinking should invoke
1644 	 * shmem to truncate our pages, if we have available swap.
1645 	 */
1646 	should_swap = get_nr_swap_pages() > 0;
1647 	i915_gem_shrink(NULL, i915, -1UL, NULL,
1648 			I915_SHRINK_BOUND |
1649 			I915_SHRINK_UNBOUND |
1650 			I915_SHRINK_ACTIVE |
1651 			I915_SHRINK_WRITEBACK);
1652 	if (should_swap == i915_gem_object_has_pages(obj)) {
1653 		pr_err("unexpected pages mismatch, should_swap=%s\n",
1654 		       yesno(should_swap));
1655 		err = -EINVAL;
1656 		goto out_put;
1657 	}
1658 
1659 	if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
1660 		pr_err("unexpected residual page-size bits, should_swap=%s\n",
1661 		       yesno(should_swap));
1662 		err = -EINVAL;
1663 		goto out_put;
1664 	}
1665 
1666 	err = i915_vma_pin(vma, 0, 0, flags);
1667 	if (err)
1668 		goto out_put;
1669 
1670 	while (n--) {
1671 		err = cpu_check(obj, n, 0xdeadbeaf);
1672 		if (err)
1673 			break;
1674 	}
1675 
1676 out_unpin:
1677 	i915_vma_unpin(vma);
1678 out_put:
1679 	i915_gem_object_put(obj);
1680 out_vm:
1681 	i915_vm_put(vm);
1682 out:
1683 	fput(file);
1684 	return err;
1685 }
1686 
1687 int i915_gem_huge_page_mock_selftests(void)
1688 {
1689 	static const struct i915_subtest tests[] = {
1690 		SUBTEST(igt_mock_exhaust_device_supported_pages),
1691 		SUBTEST(igt_mock_memory_region_huge_pages),
1692 		SUBTEST(igt_mock_ppgtt_misaligned_dma),
1693 		SUBTEST(igt_mock_ppgtt_huge_fill),
1694 		SUBTEST(igt_mock_ppgtt_64K),
1695 	};
1696 	struct drm_i915_private *dev_priv;
1697 	struct i915_ppgtt *ppgtt;
1698 	int err;
1699 
1700 	dev_priv = mock_gem_device();
1701 	if (!dev_priv)
1702 		return -ENOMEM;
1703 
1704 	/* Pretend to be a device which supports the 48b PPGTT */
1705 	mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
1706 	mkwrite_device_info(dev_priv)->ppgtt_size = 48;
1707 
1708 	ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
1709 	if (IS_ERR(ppgtt)) {
1710 		err = PTR_ERR(ppgtt);
1711 		goto out_unlock;
1712 	}
1713 
1714 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1715 		pr_err("failed to create 48b PPGTT\n");
1716 		err = -EINVAL;
1717 		goto out_put;
1718 	}
1719 
1720 	/* If we were ever hit this then it's time to mock the 64K scratch */
1721 	if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
1722 		pr_err("PPGTT missing 64K scratch page\n");
1723 		err = -EINVAL;
1724 		goto out_put;
1725 	}
1726 
1727 	err = i915_subtests(tests, ppgtt);
1728 
1729 out_put:
1730 	i915_vm_put(&ppgtt->vm);
1731 out_unlock:
1732 	mock_destroy_device(dev_priv);
1733 	return err;
1734 }
1735 
1736 int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
1737 {
1738 	static const struct i915_subtest tests[] = {
1739 		SUBTEST(igt_shrink_thp),
1740 		SUBTEST(igt_tmpfs_fallback),
1741 		SUBTEST(igt_ppgtt_smoke_huge),
1742 		SUBTEST(igt_ppgtt_sanity_check),
1743 	};
1744 
1745 	if (!HAS_PPGTT(i915)) {
1746 		pr_info("PPGTT not supported, skipping live-selftests\n");
1747 		return 0;
1748 	}
1749 
1750 	if (intel_gt_is_wedged(to_gt(i915)))
1751 		return 0;
1752 
1753 	return i915_live_subtests(tests, i915);
1754 }
1755