1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "i915_selftest.h"
10 
11 #include "gem/i915_gem_region.h"
12 #include "gem/i915_gem_lmem.h"
13 #include "gem/i915_gem_pm.h"
14 
15 #include "gt/intel_gt.h"
16 
17 #include "igt_gem_utils.h"
18 #include "mock_context.h"
19 
20 #include "selftests/mock_drm.h"
21 #include "selftests/mock_gem_device.h"
22 #include "selftests/mock_region.h"
23 #include "selftests/i915_random.h"
24 
25 static const unsigned int page_sizes[] = {
26 	I915_GTT_PAGE_SIZE_2M,
27 	I915_GTT_PAGE_SIZE_64K,
28 	I915_GTT_PAGE_SIZE_4K,
29 };
30 
31 static unsigned int get_largest_page_size(struct drm_i915_private *i915,
32 					  u64 rem)
33 {
34 	int i;
35 
36 	for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
37 		unsigned int page_size = page_sizes[i];
38 
39 		if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
40 			return page_size;
41 	}
42 
43 	return 0;
44 }
45 
46 static void huge_pages_free_pages(struct sg_table *st)
47 {
48 	struct scatterlist *sg;
49 
50 	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
51 		if (sg_page(sg))
52 			__free_pages(sg_page(sg), get_order(sg->length));
53 	}
54 
55 	sg_free_table(st);
56 	kfree(st);
57 }
58 
59 static int get_huge_pages(struct drm_i915_gem_object *obj)
60 {
61 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
62 	unsigned int page_mask = obj->mm.page_mask;
63 	struct sg_table *st;
64 	struct scatterlist *sg;
65 	unsigned int sg_page_sizes;
66 	u64 rem;
67 
68 	st = kmalloc(sizeof(*st), GFP);
69 	if (!st)
70 		return -ENOMEM;
71 
72 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
73 		kfree(st);
74 		return -ENOMEM;
75 	}
76 
77 	rem = obj->base.size;
78 	sg = st->sgl;
79 	st->nents = 0;
80 	sg_page_sizes = 0;
81 
82 	/*
83 	 * Our goal here is simple, we want to greedily fill the object from
84 	 * largest to smallest page-size, while ensuring that we use *every*
85 	 * page-size as per the given page-mask.
86 	 */
87 	do {
88 		unsigned int bit = ilog2(page_mask);
89 		unsigned int page_size = BIT(bit);
90 		int order = get_order(page_size);
91 
92 		do {
93 			struct page *page;
94 
95 			GEM_BUG_ON(order >= MAX_ORDER);
96 			page = alloc_pages(GFP | __GFP_ZERO, order);
97 			if (!page)
98 				goto err;
99 
100 			sg_set_page(sg, page, page_size, 0);
101 			sg_page_sizes |= page_size;
102 			st->nents++;
103 
104 			rem -= page_size;
105 			if (!rem) {
106 				sg_mark_end(sg);
107 				break;
108 			}
109 
110 			sg = __sg_next(sg);
111 		} while ((rem - ((page_size-1) & page_mask)) >= page_size);
112 
113 		page_mask &= (page_size-1);
114 	} while (page_mask);
115 
116 	if (i915_gem_gtt_prepare_pages(obj, st))
117 		goto err;
118 
119 	GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
120 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
121 
122 	return 0;
123 
124 err:
125 	sg_set_page(sg, NULL, 0, 0);
126 	sg_mark_end(sg);
127 	huge_pages_free_pages(st);
128 
129 	return -ENOMEM;
130 }
131 
132 static void put_huge_pages(struct drm_i915_gem_object *obj,
133 			   struct sg_table *pages)
134 {
135 	i915_gem_gtt_finish_pages(obj, pages);
136 	huge_pages_free_pages(pages);
137 
138 	obj->mm.dirty = false;
139 }
140 
141 static const struct drm_i915_gem_object_ops huge_page_ops = {
142 	.name = "huge-gem",
143 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
144 		 I915_GEM_OBJECT_IS_SHRINKABLE,
145 	.get_pages = get_huge_pages,
146 	.put_pages = put_huge_pages,
147 };
148 
149 static struct drm_i915_gem_object *
150 huge_pages_object(struct drm_i915_private *i915,
151 		  u64 size,
152 		  unsigned int page_mask)
153 {
154 	static struct lock_class_key lock_class;
155 	struct drm_i915_gem_object *obj;
156 
157 	GEM_BUG_ON(!size);
158 	GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
159 
160 	if (size >> PAGE_SHIFT > INT_MAX)
161 		return ERR_PTR(-E2BIG);
162 
163 	if (overflows_type(size, obj->base.size))
164 		return ERR_PTR(-E2BIG);
165 
166 	obj = i915_gem_object_alloc();
167 	if (!obj)
168 		return ERR_PTR(-ENOMEM);
169 
170 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
171 	i915_gem_object_init(obj, &huge_page_ops, &lock_class);
172 
173 	i915_gem_object_set_volatile(obj);
174 
175 	obj->write_domain = I915_GEM_DOMAIN_CPU;
176 	obj->read_domains = I915_GEM_DOMAIN_CPU;
177 	obj->cache_level = I915_CACHE_NONE;
178 
179 	obj->mm.page_mask = page_mask;
180 
181 	return obj;
182 }
183 
184 static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
185 {
186 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
187 	const u64 max_len = rounddown_pow_of_two(UINT_MAX);
188 	struct sg_table *st;
189 	struct scatterlist *sg;
190 	unsigned int sg_page_sizes;
191 	u64 rem;
192 
193 	st = kmalloc(sizeof(*st), GFP);
194 	if (!st)
195 		return -ENOMEM;
196 
197 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
198 		kfree(st);
199 		return -ENOMEM;
200 	}
201 
202 	/* Use optimal page sized chunks to fill in the sg table */
203 	rem = obj->base.size;
204 	sg = st->sgl;
205 	st->nents = 0;
206 	sg_page_sizes = 0;
207 	do {
208 		unsigned int page_size = get_largest_page_size(i915, rem);
209 		unsigned int len = min(page_size * div_u64(rem, page_size),
210 				       max_len);
211 
212 		GEM_BUG_ON(!page_size);
213 
214 		sg->offset = 0;
215 		sg->length = len;
216 		sg_dma_len(sg) = len;
217 		sg_dma_address(sg) = page_size;
218 
219 		sg_page_sizes |= len;
220 
221 		st->nents++;
222 
223 		rem -= len;
224 		if (!rem) {
225 			sg_mark_end(sg);
226 			break;
227 		}
228 
229 		sg = sg_next(sg);
230 	} while (1);
231 
232 	i915_sg_trim(st);
233 
234 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
235 
236 	return 0;
237 }
238 
239 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
240 {
241 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
242 	struct sg_table *st;
243 	struct scatterlist *sg;
244 	unsigned int page_size;
245 
246 	st = kmalloc(sizeof(*st), GFP);
247 	if (!st)
248 		return -ENOMEM;
249 
250 	if (sg_alloc_table(st, 1, GFP)) {
251 		kfree(st);
252 		return -ENOMEM;
253 	}
254 
255 	sg = st->sgl;
256 	st->nents = 1;
257 
258 	page_size = get_largest_page_size(i915, obj->base.size);
259 	GEM_BUG_ON(!page_size);
260 
261 	sg->offset = 0;
262 	sg->length = obj->base.size;
263 	sg_dma_len(sg) = obj->base.size;
264 	sg_dma_address(sg) = page_size;
265 
266 	__i915_gem_object_set_pages(obj, st, sg->length);
267 
268 	return 0;
269 #undef GFP
270 }
271 
272 static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
273 				 struct sg_table *pages)
274 {
275 	sg_free_table(pages);
276 	kfree(pages);
277 }
278 
279 static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
280 				struct sg_table *pages)
281 {
282 	fake_free_huge_pages(obj, pages);
283 	obj->mm.dirty = false;
284 }
285 
286 static const struct drm_i915_gem_object_ops fake_ops = {
287 	.name = "fake-gem",
288 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
289 	.get_pages = fake_get_huge_pages,
290 	.put_pages = fake_put_huge_pages,
291 };
292 
293 static const struct drm_i915_gem_object_ops fake_ops_single = {
294 	.name = "fake-gem",
295 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
296 	.get_pages = fake_get_huge_pages_single,
297 	.put_pages = fake_put_huge_pages,
298 };
299 
300 static struct drm_i915_gem_object *
301 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
302 {
303 	static struct lock_class_key lock_class;
304 	struct drm_i915_gem_object *obj;
305 
306 	GEM_BUG_ON(!size);
307 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
308 
309 	if (size >> PAGE_SHIFT > UINT_MAX)
310 		return ERR_PTR(-E2BIG);
311 
312 	if (overflows_type(size, obj->base.size))
313 		return ERR_PTR(-E2BIG);
314 
315 	obj = i915_gem_object_alloc();
316 	if (!obj)
317 		return ERR_PTR(-ENOMEM);
318 
319 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
320 
321 	if (single)
322 		i915_gem_object_init(obj, &fake_ops_single, &lock_class);
323 	else
324 		i915_gem_object_init(obj, &fake_ops, &lock_class);
325 
326 	i915_gem_object_set_volatile(obj);
327 
328 	obj->write_domain = I915_GEM_DOMAIN_CPU;
329 	obj->read_domains = I915_GEM_DOMAIN_CPU;
330 	obj->cache_level = I915_CACHE_NONE;
331 
332 	return obj;
333 }
334 
335 static int igt_check_page_sizes(struct i915_vma *vma)
336 {
337 	struct drm_i915_private *i915 = vma->vm->i915;
338 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
339 	struct drm_i915_gem_object *obj = vma->obj;
340 	int err;
341 
342 	/* We have to wait for the async bind to complete before our asserts */
343 	err = i915_vma_sync(vma);
344 	if (err)
345 		return err;
346 
347 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
348 		pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
349 		       vma->page_sizes.sg & ~supported, supported);
350 		err = -EINVAL;
351 	}
352 
353 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
354 		pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
355 		       vma->page_sizes.gtt & ~supported, supported);
356 		err = -EINVAL;
357 	}
358 
359 	if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
360 		pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
361 		       vma->page_sizes.phys, obj->mm.page_sizes.phys);
362 		err = -EINVAL;
363 	}
364 
365 	if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
366 		pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
367 		       vma->page_sizes.sg, obj->mm.page_sizes.sg);
368 		err = -EINVAL;
369 	}
370 
371 	/*
372 	 * The dma-api is like a box of chocolates when it comes to the
373 	 * alignment of dma addresses, however for LMEM we have total control
374 	 * and so can guarantee alignment, likewise when we allocate our blocks
375 	 * they should appear in descending order, and if we know that we align
376 	 * to the largest page size for the GTT address, we should be able to
377 	 * assert that if we see 2M physical pages then we should also get 2M
378 	 * GTT pages. If we don't then something might be wrong in our
379 	 * construction of the backing pages.
380 	 *
381 	 * Maintaining alignment is required to utilise huge pages in the ppGGT.
382 	 */
383 	if (i915_gem_object_is_lmem(obj) &&
384 	    IS_ALIGNED(vma->node.start, SZ_2M) &&
385 	    vma->page_sizes.sg & SZ_2M &&
386 	    vma->page_sizes.gtt < SZ_2M) {
387 		pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
388 		       vma->page_sizes.sg, vma->page_sizes.gtt);
389 		err = -EINVAL;
390 	}
391 
392 	if (obj->mm.page_sizes.gtt) {
393 		pr_err("obj->page_sizes.gtt(%u) should never be set\n",
394 		       obj->mm.page_sizes.gtt);
395 		err = -EINVAL;
396 	}
397 
398 	return err;
399 }
400 
401 static int igt_mock_exhaust_device_supported_pages(void *arg)
402 {
403 	struct i915_ppgtt *ppgtt = arg;
404 	struct drm_i915_private *i915 = ppgtt->vm.i915;
405 	unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
406 	struct drm_i915_gem_object *obj;
407 	struct i915_vma *vma;
408 	int i, j, single;
409 	int err;
410 
411 	/*
412 	 * Sanity check creating objects with every valid page support
413 	 * combination for our mock device.
414 	 */
415 
416 	for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
417 		unsigned int combination = SZ_4K; /* Required for ppGTT */
418 
419 		for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
420 			if (i & BIT(j))
421 				combination |= page_sizes[j];
422 		}
423 
424 		mkwrite_device_info(i915)->page_sizes = combination;
425 
426 		for (single = 0; single <= 1; ++single) {
427 			obj = fake_huge_pages_object(i915, combination, !!single);
428 			if (IS_ERR(obj)) {
429 				err = PTR_ERR(obj);
430 				goto out_device;
431 			}
432 
433 			if (obj->base.size != combination) {
434 				pr_err("obj->base.size=%zu, expected=%u\n",
435 				       obj->base.size, combination);
436 				err = -EINVAL;
437 				goto out_put;
438 			}
439 
440 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
441 			if (IS_ERR(vma)) {
442 				err = PTR_ERR(vma);
443 				goto out_put;
444 			}
445 
446 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
447 			if (err)
448 				goto out_put;
449 
450 			err = igt_check_page_sizes(vma);
451 
452 			if (vma->page_sizes.sg != combination) {
453 				pr_err("page_sizes.sg=%u, expected=%u\n",
454 				       vma->page_sizes.sg, combination);
455 				err = -EINVAL;
456 			}
457 
458 			i915_vma_unpin(vma);
459 			i915_gem_object_put(obj);
460 
461 			if (err)
462 				goto out_device;
463 		}
464 	}
465 
466 	goto out_device;
467 
468 out_put:
469 	i915_gem_object_put(obj);
470 out_device:
471 	mkwrite_device_info(i915)->page_sizes = saved_mask;
472 
473 	return err;
474 }
475 
476 static int igt_mock_memory_region_huge_pages(void *arg)
477 {
478 	const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
479 	struct i915_ppgtt *ppgtt = arg;
480 	struct drm_i915_private *i915 = ppgtt->vm.i915;
481 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
482 	struct intel_memory_region *mem;
483 	struct drm_i915_gem_object *obj;
484 	struct i915_vma *vma;
485 	int bit;
486 	int err = 0;
487 
488 	mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
489 	if (IS_ERR(mem)) {
490 		pr_err("%s failed to create memory region\n", __func__);
491 		return PTR_ERR(mem);
492 	}
493 
494 	for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
495 		unsigned int page_size = BIT(bit);
496 		resource_size_t phys;
497 		int i;
498 
499 		for (i = 0; i < ARRAY_SIZE(flags); ++i) {
500 			obj = i915_gem_object_create_region(mem, page_size,
501 							    flags[i]);
502 			if (IS_ERR(obj)) {
503 				err = PTR_ERR(obj);
504 				goto out_region;
505 			}
506 
507 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
508 			if (IS_ERR(vma)) {
509 				err = PTR_ERR(vma);
510 				goto out_put;
511 			}
512 
513 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
514 			if (err)
515 				goto out_put;
516 
517 			err = igt_check_page_sizes(vma);
518 			if (err)
519 				goto out_unpin;
520 
521 			phys = i915_gem_object_get_dma_address(obj, 0);
522 			if (!IS_ALIGNED(phys, page_size)) {
523 				pr_err("%s addr misaligned(%pa) page_size=%u\n",
524 				       __func__, &phys, page_size);
525 				err = -EINVAL;
526 				goto out_unpin;
527 			}
528 
529 			if (vma->page_sizes.gtt != page_size) {
530 				pr_err("%s page_sizes.gtt=%u, expected=%u\n",
531 				       __func__, vma->page_sizes.gtt,
532 				       page_size);
533 				err = -EINVAL;
534 				goto out_unpin;
535 			}
536 
537 			i915_vma_unpin(vma);
538 			__i915_gem_object_put_pages(obj);
539 			i915_gem_object_put(obj);
540 		}
541 	}
542 
543 	goto out_region;
544 
545 out_unpin:
546 	i915_vma_unpin(vma);
547 out_put:
548 	i915_gem_object_put(obj);
549 out_region:
550 	intel_memory_region_put(mem);
551 	return err;
552 }
553 
554 static int igt_mock_ppgtt_misaligned_dma(void *arg)
555 {
556 	struct i915_ppgtt *ppgtt = arg;
557 	struct drm_i915_private *i915 = ppgtt->vm.i915;
558 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
559 	struct drm_i915_gem_object *obj;
560 	int bit;
561 	int err;
562 
563 	/*
564 	 * Sanity check dma misalignment for huge pages -- the dma addresses we
565 	 * insert into the paging structures need to always respect the page
566 	 * size alignment.
567 	 */
568 
569 	bit = ilog2(I915_GTT_PAGE_SIZE_64K);
570 
571 	for_each_set_bit_from(bit, &supported,
572 			      ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
573 		IGT_TIMEOUT(end_time);
574 		unsigned int page_size = BIT(bit);
575 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
576 		unsigned int offset;
577 		unsigned int size =
578 			round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
579 		struct i915_vma *vma;
580 
581 		obj = fake_huge_pages_object(i915, size, true);
582 		if (IS_ERR(obj))
583 			return PTR_ERR(obj);
584 
585 		if (obj->base.size != size) {
586 			pr_err("obj->base.size=%zu, expected=%u\n",
587 			       obj->base.size, size);
588 			err = -EINVAL;
589 			goto out_put;
590 		}
591 
592 		err = i915_gem_object_pin_pages(obj);
593 		if (err)
594 			goto out_put;
595 
596 		/* Force the page size for this object */
597 		obj->mm.page_sizes.sg = page_size;
598 
599 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
600 		if (IS_ERR(vma)) {
601 			err = PTR_ERR(vma);
602 			goto out_unpin;
603 		}
604 
605 		err = i915_vma_pin(vma, 0, 0, flags);
606 		if (err)
607 			goto out_unpin;
608 
609 
610 		err = igt_check_page_sizes(vma);
611 
612 		if (vma->page_sizes.gtt != page_size) {
613 			pr_err("page_sizes.gtt=%u, expected %u\n",
614 			       vma->page_sizes.gtt, page_size);
615 			err = -EINVAL;
616 		}
617 
618 		i915_vma_unpin(vma);
619 
620 		if (err)
621 			goto out_unpin;
622 
623 		/*
624 		 * Try all the other valid offsets until the next
625 		 * boundary -- should always fall back to using 4K
626 		 * pages.
627 		 */
628 		for (offset = 4096; offset < page_size; offset += 4096) {
629 			err = i915_vma_unbind(vma);
630 			if (err)
631 				goto out_unpin;
632 
633 			err = i915_vma_pin(vma, 0, 0, flags | offset);
634 			if (err)
635 				goto out_unpin;
636 
637 			err = igt_check_page_sizes(vma);
638 
639 			if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
640 				pr_err("page_sizes.gtt=%u, expected %llu\n",
641 				       vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
642 				err = -EINVAL;
643 			}
644 
645 			i915_vma_unpin(vma);
646 
647 			if (err)
648 				goto out_unpin;
649 
650 			if (igt_timeout(end_time,
651 					"%s timed out at offset %x with page-size %x\n",
652 					__func__, offset, page_size))
653 				break;
654 		}
655 
656 		i915_gem_object_unpin_pages(obj);
657 		__i915_gem_object_put_pages(obj);
658 		i915_gem_object_put(obj);
659 	}
660 
661 	return 0;
662 
663 out_unpin:
664 	i915_gem_object_unpin_pages(obj);
665 out_put:
666 	i915_gem_object_put(obj);
667 
668 	return err;
669 }
670 
671 static void close_object_list(struct list_head *objects,
672 			      struct i915_ppgtt *ppgtt)
673 {
674 	struct drm_i915_gem_object *obj, *on;
675 
676 	list_for_each_entry_safe(obj, on, objects, st_link) {
677 		list_del(&obj->st_link);
678 		i915_gem_object_unpin_pages(obj);
679 		__i915_gem_object_put_pages(obj);
680 		i915_gem_object_put(obj);
681 	}
682 }
683 
684 static int igt_mock_ppgtt_huge_fill(void *arg)
685 {
686 	struct i915_ppgtt *ppgtt = arg;
687 	struct drm_i915_private *i915 = ppgtt->vm.i915;
688 	unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
689 	unsigned long page_num;
690 	bool single = false;
691 	LIST_HEAD(objects);
692 	IGT_TIMEOUT(end_time);
693 	int err = -ENODEV;
694 
695 	for_each_prime_number_from(page_num, 1, max_pages) {
696 		struct drm_i915_gem_object *obj;
697 		u64 size = page_num << PAGE_SHIFT;
698 		struct i915_vma *vma;
699 		unsigned int expected_gtt = 0;
700 		int i;
701 
702 		obj = fake_huge_pages_object(i915, size, single);
703 		if (IS_ERR(obj)) {
704 			err = PTR_ERR(obj);
705 			break;
706 		}
707 
708 		if (obj->base.size != size) {
709 			pr_err("obj->base.size=%zd, expected=%llu\n",
710 			       obj->base.size, size);
711 			i915_gem_object_put(obj);
712 			err = -EINVAL;
713 			break;
714 		}
715 
716 		err = i915_gem_object_pin_pages(obj);
717 		if (err) {
718 			i915_gem_object_put(obj);
719 			break;
720 		}
721 
722 		list_add(&obj->st_link, &objects);
723 
724 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
725 		if (IS_ERR(vma)) {
726 			err = PTR_ERR(vma);
727 			break;
728 		}
729 
730 		err = i915_vma_pin(vma, 0, 0, PIN_USER);
731 		if (err)
732 			break;
733 
734 		err = igt_check_page_sizes(vma);
735 		if (err) {
736 			i915_vma_unpin(vma);
737 			break;
738 		}
739 
740 		/*
741 		 * Figure out the expected gtt page size knowing that we go from
742 		 * largest to smallest page size sg chunks, and that we align to
743 		 * the largest page size.
744 		 */
745 		for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
746 			unsigned int page_size = page_sizes[i];
747 
748 			if (HAS_PAGE_SIZES(i915, page_size) &&
749 			    size >= page_size) {
750 				expected_gtt |= page_size;
751 				size &= page_size-1;
752 			}
753 		}
754 
755 		GEM_BUG_ON(!expected_gtt);
756 		GEM_BUG_ON(size);
757 
758 		if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
759 			expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
760 
761 		i915_vma_unpin(vma);
762 
763 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
764 			if (!IS_ALIGNED(vma->node.start,
765 					I915_GTT_PAGE_SIZE_2M)) {
766 				pr_err("node.start(%llx) not aligned to 2M\n",
767 				       vma->node.start);
768 				err = -EINVAL;
769 				break;
770 			}
771 
772 			if (!IS_ALIGNED(vma->node.size,
773 					I915_GTT_PAGE_SIZE_2M)) {
774 				pr_err("node.size(%llx) not aligned to 2M\n",
775 				       vma->node.size);
776 				err = -EINVAL;
777 				break;
778 			}
779 		}
780 
781 		if (vma->page_sizes.gtt != expected_gtt) {
782 			pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
783 			       vma->page_sizes.gtt, expected_gtt,
784 			       obj->base.size, yesno(!!single));
785 			err = -EINVAL;
786 			break;
787 		}
788 
789 		if (igt_timeout(end_time,
790 				"%s timed out at size %zd\n",
791 				__func__, obj->base.size))
792 			break;
793 
794 		single = !single;
795 	}
796 
797 	close_object_list(&objects, ppgtt);
798 
799 	if (err == -ENOMEM || err == -ENOSPC)
800 		err = 0;
801 
802 	return err;
803 }
804 
805 static int igt_mock_ppgtt_64K(void *arg)
806 {
807 	struct i915_ppgtt *ppgtt = arg;
808 	struct drm_i915_private *i915 = ppgtt->vm.i915;
809 	struct drm_i915_gem_object *obj;
810 	const struct object_info {
811 		unsigned int size;
812 		unsigned int gtt;
813 		unsigned int offset;
814 	} objects[] = {
815 		/* Cases with forced padding/alignment */
816 		{
817 			.size = SZ_64K,
818 			.gtt = I915_GTT_PAGE_SIZE_64K,
819 			.offset = 0,
820 		},
821 		{
822 			.size = SZ_64K + SZ_4K,
823 			.gtt = I915_GTT_PAGE_SIZE_4K,
824 			.offset = 0,
825 		},
826 		{
827 			.size = SZ_64K - SZ_4K,
828 			.gtt = I915_GTT_PAGE_SIZE_4K,
829 			.offset = 0,
830 		},
831 		{
832 			.size = SZ_2M,
833 			.gtt = I915_GTT_PAGE_SIZE_64K,
834 			.offset = 0,
835 		},
836 		{
837 			.size = SZ_2M - SZ_4K,
838 			.gtt = I915_GTT_PAGE_SIZE_4K,
839 			.offset = 0,
840 		},
841 		{
842 			.size = SZ_2M + SZ_4K,
843 			.gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
844 			.offset = 0,
845 		},
846 		{
847 			.size = SZ_2M + SZ_64K,
848 			.gtt = I915_GTT_PAGE_SIZE_64K,
849 			.offset = 0,
850 		},
851 		{
852 			.size = SZ_2M - SZ_64K,
853 			.gtt = I915_GTT_PAGE_SIZE_64K,
854 			.offset = 0,
855 		},
856 		/* Try without any forced padding/alignment */
857 		{
858 			.size = SZ_64K,
859 			.offset = SZ_2M,
860 			.gtt = I915_GTT_PAGE_SIZE_4K,
861 		},
862 		{
863 			.size = SZ_128K,
864 			.offset = SZ_2M - SZ_64K,
865 			.gtt = I915_GTT_PAGE_SIZE_4K,
866 		},
867 	};
868 	struct i915_vma *vma;
869 	int i, single;
870 	int err;
871 
872 	/*
873 	 * Sanity check some of the trickiness with 64K pages -- either we can
874 	 * safely mark the whole page-table(2M block) as 64K, or we have to
875 	 * always fallback to 4K.
876 	 */
877 
878 	if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
879 		return 0;
880 
881 	for (i = 0; i < ARRAY_SIZE(objects); ++i) {
882 		unsigned int size = objects[i].size;
883 		unsigned int expected_gtt = objects[i].gtt;
884 		unsigned int offset = objects[i].offset;
885 		unsigned int flags = PIN_USER;
886 
887 		for (single = 0; single <= 1; single++) {
888 			obj = fake_huge_pages_object(i915, size, !!single);
889 			if (IS_ERR(obj))
890 				return PTR_ERR(obj);
891 
892 			err = i915_gem_object_pin_pages(obj);
893 			if (err)
894 				goto out_object_put;
895 
896 			/*
897 			 * Disable 2M pages -- We only want to use 64K/4K pages
898 			 * for this test.
899 			 */
900 			obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
901 
902 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
903 			if (IS_ERR(vma)) {
904 				err = PTR_ERR(vma);
905 				goto out_object_unpin;
906 			}
907 
908 			if (offset)
909 				flags |= PIN_OFFSET_FIXED | offset;
910 
911 			err = i915_vma_pin(vma, 0, 0, flags);
912 			if (err)
913 				goto out_object_unpin;
914 
915 			err = igt_check_page_sizes(vma);
916 			if (err)
917 				goto out_vma_unpin;
918 
919 			if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
920 				if (!IS_ALIGNED(vma->node.start,
921 						I915_GTT_PAGE_SIZE_2M)) {
922 					pr_err("node.start(%llx) not aligned to 2M\n",
923 					       vma->node.start);
924 					err = -EINVAL;
925 					goto out_vma_unpin;
926 				}
927 
928 				if (!IS_ALIGNED(vma->node.size,
929 						I915_GTT_PAGE_SIZE_2M)) {
930 					pr_err("node.size(%llx) not aligned to 2M\n",
931 					       vma->node.size);
932 					err = -EINVAL;
933 					goto out_vma_unpin;
934 				}
935 			}
936 
937 			if (vma->page_sizes.gtt != expected_gtt) {
938 				pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
939 				       vma->page_sizes.gtt, expected_gtt, i,
940 				       yesno(!!single));
941 				err = -EINVAL;
942 				goto out_vma_unpin;
943 			}
944 
945 			i915_vma_unpin(vma);
946 			i915_gem_object_unpin_pages(obj);
947 			__i915_gem_object_put_pages(obj);
948 			i915_gem_object_put(obj);
949 		}
950 	}
951 
952 	return 0;
953 
954 out_vma_unpin:
955 	i915_vma_unpin(vma);
956 out_object_unpin:
957 	i915_gem_object_unpin_pages(obj);
958 out_object_put:
959 	i915_gem_object_put(obj);
960 
961 	return err;
962 }
963 
964 static int gpu_write(struct intel_context *ce,
965 		     struct i915_vma *vma,
966 		     u32 dw,
967 		     u32 val)
968 {
969 	int err;
970 
971 	i915_gem_object_lock(vma->obj, NULL);
972 	err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
973 	i915_gem_object_unlock(vma->obj);
974 	if (err)
975 		return err;
976 
977 	return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
978 			       vma->size >> PAGE_SHIFT, val);
979 }
980 
981 static int
982 __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
983 {
984 	unsigned int needs_flush;
985 	unsigned long n;
986 	int err;
987 
988 	i915_gem_object_lock(obj, NULL);
989 	err = i915_gem_object_prepare_read(obj, &needs_flush);
990 	if (err)
991 		goto err_unlock;
992 
993 	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
994 		u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
995 
996 		if (needs_flush & CLFLUSH_BEFORE)
997 			drm_clflush_virt_range(ptr, PAGE_SIZE);
998 
999 		if (ptr[dword] != val) {
1000 			pr_err("n=%lu ptr[%u]=%u, val=%u\n",
1001 			       n, dword, ptr[dword], val);
1002 			kunmap_atomic(ptr);
1003 			err = -EINVAL;
1004 			break;
1005 		}
1006 
1007 		kunmap_atomic(ptr);
1008 	}
1009 
1010 	i915_gem_object_finish_access(obj);
1011 err_unlock:
1012 	i915_gem_object_unlock(obj);
1013 
1014 	return err;
1015 }
1016 
1017 static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1018 {
1019 	unsigned long n = obj->base.size >> PAGE_SHIFT;
1020 	u32 *ptr;
1021 	int err;
1022 
1023 	err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
1024 	if (err)
1025 		return err;
1026 
1027 	ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
1028 	if (IS_ERR(ptr))
1029 		return PTR_ERR(ptr);
1030 
1031 	ptr += dword;
1032 	while (n--) {
1033 		if (*ptr != val) {
1034 			pr_err("base[%u]=%08x, val=%08x\n",
1035 			       dword, *ptr, val);
1036 			err = -EINVAL;
1037 			break;
1038 		}
1039 
1040 		ptr += PAGE_SIZE / sizeof(*ptr);
1041 	}
1042 
1043 	i915_gem_object_unpin_map(obj);
1044 	return err;
1045 }
1046 
1047 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1048 {
1049 	if (i915_gem_object_has_struct_page(obj))
1050 		return __cpu_check_shmem(obj, dword, val);
1051 	else
1052 		return __cpu_check_vmap(obj, dword, val);
1053 }
1054 
1055 static int __igt_write_huge(struct intel_context *ce,
1056 			    struct drm_i915_gem_object *obj,
1057 			    u64 size, u64 offset,
1058 			    u32 dword, u32 val)
1059 {
1060 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1061 	struct i915_vma *vma;
1062 	int err;
1063 
1064 	vma = i915_vma_instance(obj, ce->vm, NULL);
1065 	if (IS_ERR(vma))
1066 		return PTR_ERR(vma);
1067 
1068 	err = i915_vma_unbind(vma);
1069 	if (err)
1070 		return err;
1071 
1072 	err = i915_vma_pin(vma, size, 0, flags | offset);
1073 	if (err) {
1074 		/*
1075 		 * The ggtt may have some pages reserved so
1076 		 * refrain from erroring out.
1077 		 */
1078 		if (err == -ENOSPC && i915_is_ggtt(ce->vm))
1079 			err = 0;
1080 
1081 		return err;
1082 	}
1083 
1084 	err = igt_check_page_sizes(vma);
1085 	if (err)
1086 		goto out_vma_unpin;
1087 
1088 	err = gpu_write(ce, vma, dword, val);
1089 	if (err) {
1090 		pr_err("gpu-write failed at offset=%llx\n", offset);
1091 		goto out_vma_unpin;
1092 	}
1093 
1094 	err = cpu_check(obj, dword, val);
1095 	if (err) {
1096 		pr_err("cpu-check failed at offset=%llx\n", offset);
1097 		goto out_vma_unpin;
1098 	}
1099 
1100 out_vma_unpin:
1101 	i915_vma_unpin(vma);
1102 	return err;
1103 }
1104 
1105 static int igt_write_huge(struct i915_gem_context *ctx,
1106 			  struct drm_i915_gem_object *obj)
1107 {
1108 	struct i915_gem_engines *engines;
1109 	struct i915_gem_engines_iter it;
1110 	struct intel_context *ce;
1111 	I915_RND_STATE(prng);
1112 	IGT_TIMEOUT(end_time);
1113 	unsigned int max_page_size;
1114 	unsigned int count;
1115 	u64 max;
1116 	u64 num;
1117 	u64 size;
1118 	int *order;
1119 	int i, n;
1120 	int err = 0;
1121 
1122 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1123 
1124 	size = obj->base.size;
1125 	if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1126 		size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1127 
1128 	n = 0;
1129 	count = 0;
1130 	max = U64_MAX;
1131 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1132 		count++;
1133 		if (!intel_engine_can_store_dword(ce->engine))
1134 			continue;
1135 
1136 		max = min(max, ce->vm->total);
1137 		n++;
1138 	}
1139 	i915_gem_context_unlock_engines(ctx);
1140 	if (!n)
1141 		return 0;
1142 
1143 	/*
1144 	 * To keep things interesting when alternating between engines in our
1145 	 * randomized order, lets also make feeding to the same engine a few
1146 	 * times in succession a possibility by enlarging the permutation array.
1147 	 */
1148 	order = i915_random_order(count * count, &prng);
1149 	if (!order)
1150 		return -ENOMEM;
1151 
1152 	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1153 	max = div_u64(max - size, max_page_size);
1154 
1155 	/*
1156 	 * Try various offsets in an ascending/descending fashion until we
1157 	 * timeout -- we want to avoid issues hidden by effectively always using
1158 	 * offset = 0.
1159 	 */
1160 	i = 0;
1161 	engines = i915_gem_context_lock_engines(ctx);
1162 	for_each_prime_number_from(num, 0, max) {
1163 		u64 offset_low = num * max_page_size;
1164 		u64 offset_high = (max - num) * max_page_size;
1165 		u32 dword = offset_in_page(num) / 4;
1166 		struct intel_context *ce;
1167 
1168 		ce = engines->engines[order[i] % engines->num_engines];
1169 		i = (i + 1) % (count * count);
1170 		if (!ce || !intel_engine_can_store_dword(ce->engine))
1171 			continue;
1172 
1173 		/*
1174 		 * In order to utilize 64K pages we need to both pad the vma
1175 		 * size and ensure the vma offset is at the start of the pt
1176 		 * boundary, however to improve coverage we opt for testing both
1177 		 * aligned and unaligned offsets.
1178 		 */
1179 		if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1180 			offset_low = round_down(offset_low,
1181 						I915_GTT_PAGE_SIZE_2M);
1182 
1183 		err = __igt_write_huge(ce, obj, size, offset_low,
1184 				       dword, num + 1);
1185 		if (err)
1186 			break;
1187 
1188 		err = __igt_write_huge(ce, obj, size, offset_high,
1189 				       dword, num + 1);
1190 		if (err)
1191 			break;
1192 
1193 		if (igt_timeout(end_time,
1194 				"%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1195 				__func__, ce->engine->name, offset_low, offset_high,
1196 				max_page_size))
1197 			break;
1198 	}
1199 	i915_gem_context_unlock_engines(ctx);
1200 
1201 	kfree(order);
1202 
1203 	return err;
1204 }
1205 
1206 typedef struct drm_i915_gem_object *
1207 (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
1208 
1209 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1210 {
1211 	return i915->mm.gemfs && has_transparent_hugepage();
1212 }
1213 
1214 static struct drm_i915_gem_object *
1215 igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
1216 {
1217 	if (!igt_can_allocate_thp(i915)) {
1218 		pr_info("%s missing THP support, skipping\n", __func__);
1219 		return ERR_PTR(-ENODEV);
1220 	}
1221 
1222 	return i915_gem_object_create_shmem(i915, size);
1223 }
1224 
1225 static struct drm_i915_gem_object *
1226 igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
1227 {
1228 	return i915_gem_object_create_internal(i915, size);
1229 }
1230 
1231 static struct drm_i915_gem_object *
1232 igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
1233 {
1234 	return huge_pages_object(i915, size, size);
1235 }
1236 
1237 static struct drm_i915_gem_object *
1238 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
1239 {
1240 	return i915_gem_object_create_lmem(i915, size, flags);
1241 }
1242 
1243 static u32 igt_random_size(struct rnd_state *prng,
1244 			   u32 min_page_size,
1245 			   u32 max_page_size)
1246 {
1247 	u64 mask;
1248 	u32 size;
1249 
1250 	GEM_BUG_ON(!is_power_of_2(min_page_size));
1251 	GEM_BUG_ON(!is_power_of_2(max_page_size));
1252 	GEM_BUG_ON(min_page_size < PAGE_SIZE);
1253 	GEM_BUG_ON(min_page_size > max_page_size);
1254 
1255 	mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
1256 	size = prandom_u32_state(prng) & mask;
1257 	if (size < min_page_size)
1258 		size |= min_page_size;
1259 
1260 	return size;
1261 }
1262 
1263 static int igt_ppgtt_smoke_huge(void *arg)
1264 {
1265 	struct i915_gem_context *ctx = arg;
1266 	struct drm_i915_private *i915 = ctx->i915;
1267 	struct drm_i915_gem_object *obj;
1268 	I915_RND_STATE(prng);
1269 	struct {
1270 		igt_create_fn fn;
1271 		u32 min;
1272 		u32 max;
1273 	} backends[] = {
1274 		{ igt_create_internal, SZ_64K, SZ_2M,  },
1275 		{ igt_create_shmem,    SZ_64K, SZ_32M, },
1276 		{ igt_create_local,    SZ_64K, SZ_1G,  },
1277 	};
1278 	int err;
1279 	int i;
1280 
1281 	/*
1282 	 * Sanity check that the HW uses huge pages correctly through our
1283 	 * various backends -- ensure that our writes land in the right place.
1284 	 */
1285 
1286 	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1287 		u32 min = backends[i].min;
1288 		u32 max = backends[i].max;
1289 		u32 size = max;
1290 try_again:
1291 		size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
1292 
1293 		obj = backends[i].fn(i915, size, 0);
1294 		if (IS_ERR(obj)) {
1295 			err = PTR_ERR(obj);
1296 			if (err == -E2BIG) {
1297 				size >>= 1;
1298 				goto try_again;
1299 			} else if (err == -ENODEV) {
1300 				err = 0;
1301 				continue;
1302 			}
1303 
1304 			return err;
1305 		}
1306 
1307 		err = i915_gem_object_pin_pages(obj);
1308 		if (err) {
1309 			if (err == -ENXIO || err == -E2BIG) {
1310 				i915_gem_object_put(obj);
1311 				size >>= 1;
1312 				goto try_again;
1313 			}
1314 			goto out_put;
1315 		}
1316 
1317 		if (obj->mm.page_sizes.phys < min) {
1318 			pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
1319 				__func__, size, i);
1320 			err = -ENOMEM;
1321 			goto out_unpin;
1322 		}
1323 
1324 		err = igt_write_huge(ctx, obj);
1325 		if (err) {
1326 			pr_err("%s write-huge failed with size=%u, i=%d\n",
1327 			       __func__, size, i);
1328 		}
1329 out_unpin:
1330 		i915_gem_object_unpin_pages(obj);
1331 		__i915_gem_object_put_pages(obj);
1332 out_put:
1333 		i915_gem_object_put(obj);
1334 
1335 		if (err == -ENOMEM || err == -ENXIO)
1336 			err = 0;
1337 
1338 		if (err)
1339 			break;
1340 
1341 		cond_resched();
1342 	}
1343 
1344 	return err;
1345 }
1346 
1347 static int igt_ppgtt_sanity_check(void *arg)
1348 {
1349 	struct i915_gem_context *ctx = arg;
1350 	struct drm_i915_private *i915 = ctx->i915;
1351 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
1352 	struct {
1353 		igt_create_fn fn;
1354 		unsigned int flags;
1355 	} backends[] = {
1356 		{ igt_create_system, 0,                        },
1357 		{ igt_create_local,  0,                        },
1358 		{ igt_create_local,  I915_BO_ALLOC_CONTIGUOUS, },
1359 	};
1360 	struct {
1361 		u32 size;
1362 		u32 pages;
1363 	} combos[] = {
1364 		{ SZ_64K,		SZ_64K		},
1365 		{ SZ_2M,		SZ_2M		},
1366 		{ SZ_2M,		SZ_64K		},
1367 		{ SZ_2M - SZ_64K,	SZ_64K		},
1368 		{ SZ_2M - SZ_4K,	SZ_64K | SZ_4K	},
1369 		{ SZ_2M + SZ_4K,	SZ_64K | SZ_4K	},
1370 		{ SZ_2M + SZ_4K,	SZ_2M  | SZ_4K	},
1371 		{ SZ_2M + SZ_64K,	SZ_2M  | SZ_64K },
1372 	};
1373 	int i, j;
1374 	int err;
1375 
1376 	if (supported == I915_GTT_PAGE_SIZE_4K)
1377 		return 0;
1378 
1379 	/*
1380 	 * Sanity check that the HW behaves with a limited set of combinations.
1381 	 * We already have a bunch of randomised testing, which should give us
1382 	 * a decent amount of variation between runs, however we should keep
1383 	 * this to limit the chances of introducing a temporary regression, by
1384 	 * testing the most obvious cases that might make something blow up.
1385 	 */
1386 
1387 	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1388 		for (j = 0; j < ARRAY_SIZE(combos); ++j) {
1389 			struct drm_i915_gem_object *obj;
1390 			u32 size = combos[j].size;
1391 			u32 pages = combos[j].pages;
1392 
1393 			obj = backends[i].fn(i915, size, backends[i].flags);
1394 			if (IS_ERR(obj)) {
1395 				err = PTR_ERR(obj);
1396 				if (err == -ENODEV) {
1397 					pr_info("Device lacks local memory, skipping\n");
1398 					err = 0;
1399 					break;
1400 				}
1401 
1402 				return err;
1403 			}
1404 
1405 			err = i915_gem_object_pin_pages(obj);
1406 			if (err) {
1407 				i915_gem_object_put(obj);
1408 				goto out;
1409 			}
1410 
1411 			GEM_BUG_ON(pages > obj->base.size);
1412 			pages = pages & supported;
1413 
1414 			if (pages)
1415 				obj->mm.page_sizes.sg = pages;
1416 
1417 			err = igt_write_huge(ctx, obj);
1418 
1419 			i915_gem_object_unpin_pages(obj);
1420 			__i915_gem_object_put_pages(obj);
1421 			i915_gem_object_put(obj);
1422 
1423 			if (err) {
1424 				pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
1425 				       __func__, size, pages, i, j);
1426 				goto out;
1427 			}
1428 		}
1429 
1430 		cond_resched();
1431 	}
1432 
1433 out:
1434 	if (err == -ENOMEM)
1435 		err = 0;
1436 
1437 	return err;
1438 }
1439 
1440 static int igt_tmpfs_fallback(void *arg)
1441 {
1442 	struct i915_gem_context *ctx = arg;
1443 	struct drm_i915_private *i915 = ctx->i915;
1444 	struct vfsmount *gemfs = i915->mm.gemfs;
1445 	struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
1446 	struct drm_i915_gem_object *obj;
1447 	struct i915_vma *vma;
1448 	u32 *vaddr;
1449 	int err = 0;
1450 
1451 	/*
1452 	 * Make sure that we don't burst into a ball of flames upon falling back
1453 	 * to tmpfs, which we rely on if on the off-chance we encouter a failure
1454 	 * when setting up gemfs.
1455 	 */
1456 
1457 	i915->mm.gemfs = NULL;
1458 
1459 	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
1460 	if (IS_ERR(obj)) {
1461 		err = PTR_ERR(obj);
1462 		goto out_restore;
1463 	}
1464 
1465 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1466 	if (IS_ERR(vaddr)) {
1467 		err = PTR_ERR(vaddr);
1468 		goto out_put;
1469 	}
1470 	*vaddr = 0xdeadbeaf;
1471 
1472 	__i915_gem_object_flush_map(obj, 0, 64);
1473 	i915_gem_object_unpin_map(obj);
1474 
1475 	vma = i915_vma_instance(obj, vm, NULL);
1476 	if (IS_ERR(vma)) {
1477 		err = PTR_ERR(vma);
1478 		goto out_put;
1479 	}
1480 
1481 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
1482 	if (err)
1483 		goto out_put;
1484 
1485 	err = igt_check_page_sizes(vma);
1486 
1487 	i915_vma_unpin(vma);
1488 out_put:
1489 	i915_gem_object_put(obj);
1490 out_restore:
1491 	i915->mm.gemfs = gemfs;
1492 
1493 	i915_vm_put(vm);
1494 	return err;
1495 }
1496 
1497 static int igt_shrink_thp(void *arg)
1498 {
1499 	struct i915_gem_context *ctx = arg;
1500 	struct drm_i915_private *i915 = ctx->i915;
1501 	struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
1502 	struct drm_i915_gem_object *obj;
1503 	struct i915_gem_engines_iter it;
1504 	struct intel_context *ce;
1505 	struct i915_vma *vma;
1506 	unsigned int flags = PIN_USER;
1507 	unsigned int n;
1508 	int err = 0;
1509 
1510 	/*
1511 	 * Sanity check shrinking huge-paged object -- make sure nothing blows
1512 	 * up.
1513 	 */
1514 
1515 	if (!igt_can_allocate_thp(i915)) {
1516 		pr_info("missing THP support, skipping\n");
1517 		goto out_vm;
1518 	}
1519 
1520 	obj = i915_gem_object_create_shmem(i915, SZ_2M);
1521 	if (IS_ERR(obj)) {
1522 		err = PTR_ERR(obj);
1523 		goto out_vm;
1524 	}
1525 
1526 	vma = i915_vma_instance(obj, vm, NULL);
1527 	if (IS_ERR(vma)) {
1528 		err = PTR_ERR(vma);
1529 		goto out_put;
1530 	}
1531 
1532 	err = i915_vma_pin(vma, 0, 0, flags);
1533 	if (err)
1534 		goto out_put;
1535 
1536 	if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1537 		pr_info("failed to allocate THP, finishing test early\n");
1538 		goto out_unpin;
1539 	}
1540 
1541 	err = igt_check_page_sizes(vma);
1542 	if (err)
1543 		goto out_unpin;
1544 
1545 	n = 0;
1546 
1547 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1548 		if (!intel_engine_can_store_dword(ce->engine))
1549 			continue;
1550 
1551 		err = gpu_write(ce, vma, n++, 0xdeadbeaf);
1552 		if (err)
1553 			break;
1554 	}
1555 	i915_gem_context_unlock_engines(ctx);
1556 	i915_vma_unpin(vma);
1557 	if (err)
1558 		goto out_put;
1559 
1560 	/*
1561 	 * Now that the pages are *unpinned* shrink-all should invoke
1562 	 * shmem to truncate our pages.
1563 	 */
1564 	i915_gem_shrink_all(i915);
1565 	if (i915_gem_object_has_pages(obj)) {
1566 		pr_err("shrink-all didn't truncate the pages\n");
1567 		err = -EINVAL;
1568 		goto out_put;
1569 	}
1570 
1571 	if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
1572 		pr_err("residual page-size bits left\n");
1573 		err = -EINVAL;
1574 		goto out_put;
1575 	}
1576 
1577 	err = i915_vma_pin(vma, 0, 0, flags);
1578 	if (err)
1579 		goto out_put;
1580 
1581 	while (n--) {
1582 		err = cpu_check(obj, n, 0xdeadbeaf);
1583 		if (err)
1584 			break;
1585 	}
1586 
1587 out_unpin:
1588 	i915_vma_unpin(vma);
1589 out_put:
1590 	i915_gem_object_put(obj);
1591 out_vm:
1592 	i915_vm_put(vm);
1593 
1594 	return err;
1595 }
1596 
1597 int i915_gem_huge_page_mock_selftests(void)
1598 {
1599 	static const struct i915_subtest tests[] = {
1600 		SUBTEST(igt_mock_exhaust_device_supported_pages),
1601 		SUBTEST(igt_mock_memory_region_huge_pages),
1602 		SUBTEST(igt_mock_ppgtt_misaligned_dma),
1603 		SUBTEST(igt_mock_ppgtt_huge_fill),
1604 		SUBTEST(igt_mock_ppgtt_64K),
1605 	};
1606 	struct drm_i915_private *dev_priv;
1607 	struct i915_ppgtt *ppgtt;
1608 	int err;
1609 
1610 	dev_priv = mock_gem_device();
1611 	if (!dev_priv)
1612 		return -ENOMEM;
1613 
1614 	/* Pretend to be a device which supports the 48b PPGTT */
1615 	mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
1616 	mkwrite_device_info(dev_priv)->ppgtt_size = 48;
1617 
1618 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
1619 	if (IS_ERR(ppgtt)) {
1620 		err = PTR_ERR(ppgtt);
1621 		goto out_unlock;
1622 	}
1623 
1624 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1625 		pr_err("failed to create 48b PPGTT\n");
1626 		err = -EINVAL;
1627 		goto out_put;
1628 	}
1629 
1630 	/* If we were ever hit this then it's time to mock the 64K scratch */
1631 	if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
1632 		pr_err("PPGTT missing 64K scratch page\n");
1633 		err = -EINVAL;
1634 		goto out_put;
1635 	}
1636 
1637 	err = i915_subtests(tests, ppgtt);
1638 
1639 out_put:
1640 	i915_vm_put(&ppgtt->vm);
1641 out_unlock:
1642 	mock_destroy_device(dev_priv);
1643 	return err;
1644 }
1645 
1646 int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
1647 {
1648 	static const struct i915_subtest tests[] = {
1649 		SUBTEST(igt_shrink_thp),
1650 		SUBTEST(igt_tmpfs_fallback),
1651 		SUBTEST(igt_ppgtt_smoke_huge),
1652 		SUBTEST(igt_ppgtt_sanity_check),
1653 	};
1654 	struct i915_gem_context *ctx;
1655 	struct i915_address_space *vm;
1656 	struct file *file;
1657 	int err;
1658 
1659 	if (!HAS_PPGTT(i915)) {
1660 		pr_info("PPGTT not supported, skipping live-selftests\n");
1661 		return 0;
1662 	}
1663 
1664 	if (intel_gt_is_wedged(&i915->gt))
1665 		return 0;
1666 
1667 	file = mock_file(i915);
1668 	if (IS_ERR(file))
1669 		return PTR_ERR(file);
1670 
1671 	ctx = live_context(i915, file);
1672 	if (IS_ERR(ctx)) {
1673 		err = PTR_ERR(ctx);
1674 		goto out_file;
1675 	}
1676 
1677 	mutex_lock(&ctx->mutex);
1678 	vm = i915_gem_context_vm(ctx);
1679 	if (vm)
1680 		WRITE_ONCE(vm->scrub_64K, true);
1681 	mutex_unlock(&ctx->mutex);
1682 
1683 	err = i915_subtests(tests, ctx);
1684 
1685 out_file:
1686 	fput(file);
1687 	return err;
1688 }
1689