1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "i915_selftest.h"
10 
11 #include "gem/i915_gem_region.h"
12 #include "gem/i915_gem_lmem.h"
13 #include "gem/i915_gem_pm.h"
14 
15 #include "gt/intel_gt.h"
16 
17 #include "igt_gem_utils.h"
18 #include "mock_context.h"
19 
20 #include "selftests/mock_drm.h"
21 #include "selftests/mock_gem_device.h"
22 #include "selftests/mock_region.h"
23 #include "selftests/i915_random.h"
24 
25 static const unsigned int page_sizes[] = {
26 	I915_GTT_PAGE_SIZE_2M,
27 	I915_GTT_PAGE_SIZE_64K,
28 	I915_GTT_PAGE_SIZE_4K,
29 };
30 
31 static unsigned int get_largest_page_size(struct drm_i915_private *i915,
32 					  u64 rem)
33 {
34 	int i;
35 
36 	for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
37 		unsigned int page_size = page_sizes[i];
38 
39 		if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
40 			return page_size;
41 	}
42 
43 	return 0;
44 }
45 
46 static void huge_pages_free_pages(struct sg_table *st)
47 {
48 	struct scatterlist *sg;
49 
50 	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
51 		if (sg_page(sg))
52 			__free_pages(sg_page(sg), get_order(sg->length));
53 	}
54 
55 	sg_free_table(st);
56 	kfree(st);
57 }
58 
59 static int get_huge_pages(struct drm_i915_gem_object *obj)
60 {
61 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
62 	unsigned int page_mask = obj->mm.page_mask;
63 	struct sg_table *st;
64 	struct scatterlist *sg;
65 	unsigned int sg_page_sizes;
66 	u64 rem;
67 
68 	st = kmalloc(sizeof(*st), GFP);
69 	if (!st)
70 		return -ENOMEM;
71 
72 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
73 		kfree(st);
74 		return -ENOMEM;
75 	}
76 
77 	rem = obj->base.size;
78 	sg = st->sgl;
79 	st->nents = 0;
80 	sg_page_sizes = 0;
81 
82 	/*
83 	 * Our goal here is simple, we want to greedily fill the object from
84 	 * largest to smallest page-size, while ensuring that we use *every*
85 	 * page-size as per the given page-mask.
86 	 */
87 	do {
88 		unsigned int bit = ilog2(page_mask);
89 		unsigned int page_size = BIT(bit);
90 		int order = get_order(page_size);
91 
92 		do {
93 			struct page *page;
94 
95 			GEM_BUG_ON(order >= MAX_ORDER);
96 			page = alloc_pages(GFP | __GFP_ZERO, order);
97 			if (!page)
98 				goto err;
99 
100 			sg_set_page(sg, page, page_size, 0);
101 			sg_page_sizes |= page_size;
102 			st->nents++;
103 
104 			rem -= page_size;
105 			if (!rem) {
106 				sg_mark_end(sg);
107 				break;
108 			}
109 
110 			sg = __sg_next(sg);
111 		} while ((rem - ((page_size-1) & page_mask)) >= page_size);
112 
113 		page_mask &= (page_size-1);
114 	} while (page_mask);
115 
116 	if (i915_gem_gtt_prepare_pages(obj, st))
117 		goto err;
118 
119 	GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
120 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
121 
122 	return 0;
123 
124 err:
125 	sg_set_page(sg, NULL, 0, 0);
126 	sg_mark_end(sg);
127 	huge_pages_free_pages(st);
128 
129 	return -ENOMEM;
130 }
131 
132 static void put_huge_pages(struct drm_i915_gem_object *obj,
133 			   struct sg_table *pages)
134 {
135 	i915_gem_gtt_finish_pages(obj, pages);
136 	huge_pages_free_pages(pages);
137 
138 	obj->mm.dirty = false;
139 }
140 
141 static const struct drm_i915_gem_object_ops huge_page_ops = {
142 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
143 		 I915_GEM_OBJECT_IS_SHRINKABLE,
144 	.get_pages = get_huge_pages,
145 	.put_pages = put_huge_pages,
146 };
147 
148 static struct drm_i915_gem_object *
149 huge_pages_object(struct drm_i915_private *i915,
150 		  u64 size,
151 		  unsigned int page_mask)
152 {
153 	static struct lock_class_key lock_class;
154 	struct drm_i915_gem_object *obj;
155 
156 	GEM_BUG_ON(!size);
157 	GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
158 
159 	if (size >> PAGE_SHIFT > INT_MAX)
160 		return ERR_PTR(-E2BIG);
161 
162 	if (overflows_type(size, obj->base.size))
163 		return ERR_PTR(-E2BIG);
164 
165 	obj = i915_gem_object_alloc();
166 	if (!obj)
167 		return ERR_PTR(-ENOMEM);
168 
169 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
170 	i915_gem_object_init(obj, &huge_page_ops, &lock_class);
171 
172 	i915_gem_object_set_volatile(obj);
173 
174 	obj->write_domain = I915_GEM_DOMAIN_CPU;
175 	obj->read_domains = I915_GEM_DOMAIN_CPU;
176 	obj->cache_level = I915_CACHE_NONE;
177 
178 	obj->mm.page_mask = page_mask;
179 
180 	return obj;
181 }
182 
183 static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
184 {
185 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
186 	const u64 max_len = rounddown_pow_of_two(UINT_MAX);
187 	struct sg_table *st;
188 	struct scatterlist *sg;
189 	unsigned int sg_page_sizes;
190 	u64 rem;
191 
192 	st = kmalloc(sizeof(*st), GFP);
193 	if (!st)
194 		return -ENOMEM;
195 
196 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
197 		kfree(st);
198 		return -ENOMEM;
199 	}
200 
201 	/* Use optimal page sized chunks to fill in the sg table */
202 	rem = obj->base.size;
203 	sg = st->sgl;
204 	st->nents = 0;
205 	sg_page_sizes = 0;
206 	do {
207 		unsigned int page_size = get_largest_page_size(i915, rem);
208 		unsigned int len = min(page_size * div_u64(rem, page_size),
209 				       max_len);
210 
211 		GEM_BUG_ON(!page_size);
212 
213 		sg->offset = 0;
214 		sg->length = len;
215 		sg_dma_len(sg) = len;
216 		sg_dma_address(sg) = page_size;
217 
218 		sg_page_sizes |= len;
219 
220 		st->nents++;
221 
222 		rem -= len;
223 		if (!rem) {
224 			sg_mark_end(sg);
225 			break;
226 		}
227 
228 		sg = sg_next(sg);
229 	} while (1);
230 
231 	i915_sg_trim(st);
232 
233 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
234 
235 	return 0;
236 }
237 
238 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
239 {
240 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
241 	struct sg_table *st;
242 	struct scatterlist *sg;
243 	unsigned int page_size;
244 
245 	st = kmalloc(sizeof(*st), GFP);
246 	if (!st)
247 		return -ENOMEM;
248 
249 	if (sg_alloc_table(st, 1, GFP)) {
250 		kfree(st);
251 		return -ENOMEM;
252 	}
253 
254 	sg = st->sgl;
255 	st->nents = 1;
256 
257 	page_size = get_largest_page_size(i915, obj->base.size);
258 	GEM_BUG_ON(!page_size);
259 
260 	sg->offset = 0;
261 	sg->length = obj->base.size;
262 	sg_dma_len(sg) = obj->base.size;
263 	sg_dma_address(sg) = page_size;
264 
265 	__i915_gem_object_set_pages(obj, st, sg->length);
266 
267 	return 0;
268 #undef GFP
269 }
270 
271 static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
272 				 struct sg_table *pages)
273 {
274 	sg_free_table(pages);
275 	kfree(pages);
276 }
277 
278 static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
279 				struct sg_table *pages)
280 {
281 	fake_free_huge_pages(obj, pages);
282 	obj->mm.dirty = false;
283 }
284 
285 static const struct drm_i915_gem_object_ops fake_ops = {
286 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
287 	.get_pages = fake_get_huge_pages,
288 	.put_pages = fake_put_huge_pages,
289 };
290 
291 static const struct drm_i915_gem_object_ops fake_ops_single = {
292 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
293 	.get_pages = fake_get_huge_pages_single,
294 	.put_pages = fake_put_huge_pages,
295 };
296 
297 static struct drm_i915_gem_object *
298 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
299 {
300 	static struct lock_class_key lock_class;
301 	struct drm_i915_gem_object *obj;
302 
303 	GEM_BUG_ON(!size);
304 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
305 
306 	if (size >> PAGE_SHIFT > UINT_MAX)
307 		return ERR_PTR(-E2BIG);
308 
309 	if (overflows_type(size, obj->base.size))
310 		return ERR_PTR(-E2BIG);
311 
312 	obj = i915_gem_object_alloc();
313 	if (!obj)
314 		return ERR_PTR(-ENOMEM);
315 
316 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
317 
318 	if (single)
319 		i915_gem_object_init(obj, &fake_ops_single, &lock_class);
320 	else
321 		i915_gem_object_init(obj, &fake_ops, &lock_class);
322 
323 	i915_gem_object_set_volatile(obj);
324 
325 	obj->write_domain = I915_GEM_DOMAIN_CPU;
326 	obj->read_domains = I915_GEM_DOMAIN_CPU;
327 	obj->cache_level = I915_CACHE_NONE;
328 
329 	return obj;
330 }
331 
332 static int igt_check_page_sizes(struct i915_vma *vma)
333 {
334 	struct drm_i915_private *i915 = vma->vm->i915;
335 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
336 	struct drm_i915_gem_object *obj = vma->obj;
337 	int err;
338 
339 	/* We have to wait for the async bind to complete before our asserts */
340 	err = i915_vma_sync(vma);
341 	if (err)
342 		return err;
343 
344 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
345 		pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
346 		       vma->page_sizes.sg & ~supported, supported);
347 		err = -EINVAL;
348 	}
349 
350 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
351 		pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
352 		       vma->page_sizes.gtt & ~supported, supported);
353 		err = -EINVAL;
354 	}
355 
356 	if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
357 		pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
358 		       vma->page_sizes.phys, obj->mm.page_sizes.phys);
359 		err = -EINVAL;
360 	}
361 
362 	if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
363 		pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
364 		       vma->page_sizes.sg, obj->mm.page_sizes.sg);
365 		err = -EINVAL;
366 	}
367 
368 	if (obj->mm.page_sizes.gtt) {
369 		pr_err("obj->page_sizes.gtt(%u) should never be set\n",
370 		       obj->mm.page_sizes.gtt);
371 		err = -EINVAL;
372 	}
373 
374 	return err;
375 }
376 
377 static int igt_mock_exhaust_device_supported_pages(void *arg)
378 {
379 	struct i915_ppgtt *ppgtt = arg;
380 	struct drm_i915_private *i915 = ppgtt->vm.i915;
381 	unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
382 	struct drm_i915_gem_object *obj;
383 	struct i915_vma *vma;
384 	int i, j, single;
385 	int err;
386 
387 	/*
388 	 * Sanity check creating objects with every valid page support
389 	 * combination for our mock device.
390 	 */
391 
392 	for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
393 		unsigned int combination = 0;
394 
395 		for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
396 			if (i & BIT(j))
397 				combination |= page_sizes[j];
398 		}
399 
400 		mkwrite_device_info(i915)->page_sizes = combination;
401 
402 		for (single = 0; single <= 1; ++single) {
403 			obj = fake_huge_pages_object(i915, combination, !!single);
404 			if (IS_ERR(obj)) {
405 				err = PTR_ERR(obj);
406 				goto out_device;
407 			}
408 
409 			if (obj->base.size != combination) {
410 				pr_err("obj->base.size=%zu, expected=%u\n",
411 				       obj->base.size, combination);
412 				err = -EINVAL;
413 				goto out_put;
414 			}
415 
416 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
417 			if (IS_ERR(vma)) {
418 				err = PTR_ERR(vma);
419 				goto out_put;
420 			}
421 
422 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
423 			if (err)
424 				goto out_close;
425 
426 			err = igt_check_page_sizes(vma);
427 
428 			if (vma->page_sizes.sg != combination) {
429 				pr_err("page_sizes.sg=%u, expected=%u\n",
430 				       vma->page_sizes.sg, combination);
431 				err = -EINVAL;
432 			}
433 
434 			i915_vma_unpin(vma);
435 			i915_vma_close(vma);
436 
437 			i915_gem_object_put(obj);
438 
439 			if (err)
440 				goto out_device;
441 		}
442 	}
443 
444 	goto out_device;
445 
446 out_close:
447 	i915_vma_close(vma);
448 out_put:
449 	i915_gem_object_put(obj);
450 out_device:
451 	mkwrite_device_info(i915)->page_sizes = saved_mask;
452 
453 	return err;
454 }
455 
456 static int igt_mock_memory_region_huge_pages(void *arg)
457 {
458 	const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
459 	struct i915_ppgtt *ppgtt = arg;
460 	struct drm_i915_private *i915 = ppgtt->vm.i915;
461 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
462 	struct intel_memory_region *mem;
463 	struct drm_i915_gem_object *obj;
464 	struct i915_vma *vma;
465 	int bit;
466 	int err = 0;
467 
468 	mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
469 	if (IS_ERR(mem)) {
470 		pr_err("%s failed to create memory region\n", __func__);
471 		return PTR_ERR(mem);
472 	}
473 
474 	for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
475 		unsigned int page_size = BIT(bit);
476 		resource_size_t phys;
477 		int i;
478 
479 		for (i = 0; i < ARRAY_SIZE(flags); ++i) {
480 			obj = i915_gem_object_create_region(mem, page_size,
481 							    flags[i]);
482 			if (IS_ERR(obj)) {
483 				err = PTR_ERR(obj);
484 				goto out_region;
485 			}
486 
487 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
488 			if (IS_ERR(vma)) {
489 				err = PTR_ERR(vma);
490 				goto out_put;
491 			}
492 
493 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
494 			if (err)
495 				goto out_close;
496 
497 			err = igt_check_page_sizes(vma);
498 			if (err)
499 				goto out_unpin;
500 
501 			phys = i915_gem_object_get_dma_address(obj, 0);
502 			if (!IS_ALIGNED(phys, page_size)) {
503 				pr_err("%s addr misaligned(%pa) page_size=%u\n",
504 				       __func__, &phys, page_size);
505 				err = -EINVAL;
506 				goto out_unpin;
507 			}
508 
509 			if (vma->page_sizes.gtt != page_size) {
510 				pr_err("%s page_sizes.gtt=%u, expected=%u\n",
511 				       __func__, vma->page_sizes.gtt,
512 				       page_size);
513 				err = -EINVAL;
514 				goto out_unpin;
515 			}
516 
517 			i915_vma_unpin(vma);
518 			i915_vma_close(vma);
519 
520 			__i915_gem_object_put_pages(obj);
521 			i915_gem_object_put(obj);
522 		}
523 	}
524 
525 	goto out_region;
526 
527 out_unpin:
528 	i915_vma_unpin(vma);
529 out_close:
530 	i915_vma_close(vma);
531 out_put:
532 	i915_gem_object_put(obj);
533 out_region:
534 	intel_memory_region_put(mem);
535 	return err;
536 }
537 
538 static int igt_mock_ppgtt_misaligned_dma(void *arg)
539 {
540 	struct i915_ppgtt *ppgtt = arg;
541 	struct drm_i915_private *i915 = ppgtt->vm.i915;
542 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
543 	struct drm_i915_gem_object *obj;
544 	int bit;
545 	int err;
546 
547 	/*
548 	 * Sanity check dma misalignment for huge pages -- the dma addresses we
549 	 * insert into the paging structures need to always respect the page
550 	 * size alignment.
551 	 */
552 
553 	bit = ilog2(I915_GTT_PAGE_SIZE_64K);
554 
555 	for_each_set_bit_from(bit, &supported,
556 			      ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
557 		IGT_TIMEOUT(end_time);
558 		unsigned int page_size = BIT(bit);
559 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
560 		unsigned int offset;
561 		unsigned int size =
562 			round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
563 		struct i915_vma *vma;
564 
565 		obj = fake_huge_pages_object(i915, size, true);
566 		if (IS_ERR(obj))
567 			return PTR_ERR(obj);
568 
569 		if (obj->base.size != size) {
570 			pr_err("obj->base.size=%zu, expected=%u\n",
571 			       obj->base.size, size);
572 			err = -EINVAL;
573 			goto out_put;
574 		}
575 
576 		err = i915_gem_object_pin_pages(obj);
577 		if (err)
578 			goto out_put;
579 
580 		/* Force the page size for this object */
581 		obj->mm.page_sizes.sg = page_size;
582 
583 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
584 		if (IS_ERR(vma)) {
585 			err = PTR_ERR(vma);
586 			goto out_unpin;
587 		}
588 
589 		err = i915_vma_pin(vma, 0, 0, flags);
590 		if (err) {
591 			i915_vma_close(vma);
592 			goto out_unpin;
593 		}
594 
595 
596 		err = igt_check_page_sizes(vma);
597 
598 		if (vma->page_sizes.gtt != page_size) {
599 			pr_err("page_sizes.gtt=%u, expected %u\n",
600 			       vma->page_sizes.gtt, page_size);
601 			err = -EINVAL;
602 		}
603 
604 		i915_vma_unpin(vma);
605 
606 		if (err) {
607 			i915_vma_close(vma);
608 			goto out_unpin;
609 		}
610 
611 		/*
612 		 * Try all the other valid offsets until the next
613 		 * boundary -- should always fall back to using 4K
614 		 * pages.
615 		 */
616 		for (offset = 4096; offset < page_size; offset += 4096) {
617 			err = i915_vma_unbind(vma);
618 			if (err) {
619 				i915_vma_close(vma);
620 				goto out_unpin;
621 			}
622 
623 			err = i915_vma_pin(vma, 0, 0, flags | offset);
624 			if (err) {
625 				i915_vma_close(vma);
626 				goto out_unpin;
627 			}
628 
629 			err = igt_check_page_sizes(vma);
630 
631 			if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
632 				pr_err("page_sizes.gtt=%u, expected %llu\n",
633 				       vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
634 				err = -EINVAL;
635 			}
636 
637 			i915_vma_unpin(vma);
638 
639 			if (err) {
640 				i915_vma_close(vma);
641 				goto out_unpin;
642 			}
643 
644 			if (igt_timeout(end_time,
645 					"%s timed out at offset %x with page-size %x\n",
646 					__func__, offset, page_size))
647 				break;
648 		}
649 
650 		i915_vma_close(vma);
651 
652 		i915_gem_object_unpin_pages(obj);
653 		__i915_gem_object_put_pages(obj);
654 		i915_gem_object_put(obj);
655 	}
656 
657 	return 0;
658 
659 out_unpin:
660 	i915_gem_object_unpin_pages(obj);
661 out_put:
662 	i915_gem_object_put(obj);
663 
664 	return err;
665 }
666 
667 static void close_object_list(struct list_head *objects,
668 			      struct i915_ppgtt *ppgtt)
669 {
670 	struct drm_i915_gem_object *obj, *on;
671 
672 	list_for_each_entry_safe(obj, on, objects, st_link) {
673 		struct i915_vma *vma;
674 
675 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
676 		if (!IS_ERR(vma))
677 			i915_vma_close(vma);
678 
679 		list_del(&obj->st_link);
680 		i915_gem_object_unpin_pages(obj);
681 		__i915_gem_object_put_pages(obj);
682 		i915_gem_object_put(obj);
683 	}
684 }
685 
686 static int igt_mock_ppgtt_huge_fill(void *arg)
687 {
688 	struct i915_ppgtt *ppgtt = arg;
689 	struct drm_i915_private *i915 = ppgtt->vm.i915;
690 	unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
691 	unsigned long page_num;
692 	bool single = false;
693 	LIST_HEAD(objects);
694 	IGT_TIMEOUT(end_time);
695 	int err = -ENODEV;
696 
697 	for_each_prime_number_from(page_num, 1, max_pages) {
698 		struct drm_i915_gem_object *obj;
699 		u64 size = page_num << PAGE_SHIFT;
700 		struct i915_vma *vma;
701 		unsigned int expected_gtt = 0;
702 		int i;
703 
704 		obj = fake_huge_pages_object(i915, size, single);
705 		if (IS_ERR(obj)) {
706 			err = PTR_ERR(obj);
707 			break;
708 		}
709 
710 		if (obj->base.size != size) {
711 			pr_err("obj->base.size=%zd, expected=%llu\n",
712 			       obj->base.size, size);
713 			i915_gem_object_put(obj);
714 			err = -EINVAL;
715 			break;
716 		}
717 
718 		err = i915_gem_object_pin_pages(obj);
719 		if (err) {
720 			i915_gem_object_put(obj);
721 			break;
722 		}
723 
724 		list_add(&obj->st_link, &objects);
725 
726 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
727 		if (IS_ERR(vma)) {
728 			err = PTR_ERR(vma);
729 			break;
730 		}
731 
732 		err = i915_vma_pin(vma, 0, 0, PIN_USER);
733 		if (err)
734 			break;
735 
736 		err = igt_check_page_sizes(vma);
737 		if (err) {
738 			i915_vma_unpin(vma);
739 			break;
740 		}
741 
742 		/*
743 		 * Figure out the expected gtt page size knowing that we go from
744 		 * largest to smallest page size sg chunks, and that we align to
745 		 * the largest page size.
746 		 */
747 		for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
748 			unsigned int page_size = page_sizes[i];
749 
750 			if (HAS_PAGE_SIZES(i915, page_size) &&
751 			    size >= page_size) {
752 				expected_gtt |= page_size;
753 				size &= page_size-1;
754 			}
755 		}
756 
757 		GEM_BUG_ON(!expected_gtt);
758 		GEM_BUG_ON(size);
759 
760 		if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
761 			expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
762 
763 		i915_vma_unpin(vma);
764 
765 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
766 			if (!IS_ALIGNED(vma->node.start,
767 					I915_GTT_PAGE_SIZE_2M)) {
768 				pr_err("node.start(%llx) not aligned to 2M\n",
769 				       vma->node.start);
770 				err = -EINVAL;
771 				break;
772 			}
773 
774 			if (!IS_ALIGNED(vma->node.size,
775 					I915_GTT_PAGE_SIZE_2M)) {
776 				pr_err("node.size(%llx) not aligned to 2M\n",
777 				       vma->node.size);
778 				err = -EINVAL;
779 				break;
780 			}
781 		}
782 
783 		if (vma->page_sizes.gtt != expected_gtt) {
784 			pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
785 			       vma->page_sizes.gtt, expected_gtt,
786 			       obj->base.size, yesno(!!single));
787 			err = -EINVAL;
788 			break;
789 		}
790 
791 		if (igt_timeout(end_time,
792 				"%s timed out at size %zd\n",
793 				__func__, obj->base.size))
794 			break;
795 
796 		single = !single;
797 	}
798 
799 	close_object_list(&objects, ppgtt);
800 
801 	if (err == -ENOMEM || err == -ENOSPC)
802 		err = 0;
803 
804 	return err;
805 }
806 
807 static int igt_mock_ppgtt_64K(void *arg)
808 {
809 	struct i915_ppgtt *ppgtt = arg;
810 	struct drm_i915_private *i915 = ppgtt->vm.i915;
811 	struct drm_i915_gem_object *obj;
812 	const struct object_info {
813 		unsigned int size;
814 		unsigned int gtt;
815 		unsigned int offset;
816 	} objects[] = {
817 		/* Cases with forced padding/alignment */
818 		{
819 			.size = SZ_64K,
820 			.gtt = I915_GTT_PAGE_SIZE_64K,
821 			.offset = 0,
822 		},
823 		{
824 			.size = SZ_64K + SZ_4K,
825 			.gtt = I915_GTT_PAGE_SIZE_4K,
826 			.offset = 0,
827 		},
828 		{
829 			.size = SZ_64K - SZ_4K,
830 			.gtt = I915_GTT_PAGE_SIZE_4K,
831 			.offset = 0,
832 		},
833 		{
834 			.size = SZ_2M,
835 			.gtt = I915_GTT_PAGE_SIZE_64K,
836 			.offset = 0,
837 		},
838 		{
839 			.size = SZ_2M - SZ_4K,
840 			.gtt = I915_GTT_PAGE_SIZE_4K,
841 			.offset = 0,
842 		},
843 		{
844 			.size = SZ_2M + SZ_4K,
845 			.gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
846 			.offset = 0,
847 		},
848 		{
849 			.size = SZ_2M + SZ_64K,
850 			.gtt = I915_GTT_PAGE_SIZE_64K,
851 			.offset = 0,
852 		},
853 		{
854 			.size = SZ_2M - SZ_64K,
855 			.gtt = I915_GTT_PAGE_SIZE_64K,
856 			.offset = 0,
857 		},
858 		/* Try without any forced padding/alignment */
859 		{
860 			.size = SZ_64K,
861 			.offset = SZ_2M,
862 			.gtt = I915_GTT_PAGE_SIZE_4K,
863 		},
864 		{
865 			.size = SZ_128K,
866 			.offset = SZ_2M - SZ_64K,
867 			.gtt = I915_GTT_PAGE_SIZE_4K,
868 		},
869 	};
870 	struct i915_vma *vma;
871 	int i, single;
872 	int err;
873 
874 	/*
875 	 * Sanity check some of the trickiness with 64K pages -- either we can
876 	 * safely mark the whole page-table(2M block) as 64K, or we have to
877 	 * always fallback to 4K.
878 	 */
879 
880 	if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
881 		return 0;
882 
883 	for (i = 0; i < ARRAY_SIZE(objects); ++i) {
884 		unsigned int size = objects[i].size;
885 		unsigned int expected_gtt = objects[i].gtt;
886 		unsigned int offset = objects[i].offset;
887 		unsigned int flags = PIN_USER;
888 
889 		for (single = 0; single <= 1; single++) {
890 			obj = fake_huge_pages_object(i915, size, !!single);
891 			if (IS_ERR(obj))
892 				return PTR_ERR(obj);
893 
894 			err = i915_gem_object_pin_pages(obj);
895 			if (err)
896 				goto out_object_put;
897 
898 			/*
899 			 * Disable 2M pages -- We only want to use 64K/4K pages
900 			 * for this test.
901 			 */
902 			obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
903 
904 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
905 			if (IS_ERR(vma)) {
906 				err = PTR_ERR(vma);
907 				goto out_object_unpin;
908 			}
909 
910 			if (offset)
911 				flags |= PIN_OFFSET_FIXED | offset;
912 
913 			err = i915_vma_pin(vma, 0, 0, flags);
914 			if (err)
915 				goto out_vma_close;
916 
917 			err = igt_check_page_sizes(vma);
918 			if (err)
919 				goto out_vma_unpin;
920 
921 			if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
922 				if (!IS_ALIGNED(vma->node.start,
923 						I915_GTT_PAGE_SIZE_2M)) {
924 					pr_err("node.start(%llx) not aligned to 2M\n",
925 					       vma->node.start);
926 					err = -EINVAL;
927 					goto out_vma_unpin;
928 				}
929 
930 				if (!IS_ALIGNED(vma->node.size,
931 						I915_GTT_PAGE_SIZE_2M)) {
932 					pr_err("node.size(%llx) not aligned to 2M\n",
933 					       vma->node.size);
934 					err = -EINVAL;
935 					goto out_vma_unpin;
936 				}
937 			}
938 
939 			if (vma->page_sizes.gtt != expected_gtt) {
940 				pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
941 				       vma->page_sizes.gtt, expected_gtt, i,
942 				       yesno(!!single));
943 				err = -EINVAL;
944 				goto out_vma_unpin;
945 			}
946 
947 			i915_vma_unpin(vma);
948 			i915_vma_close(vma);
949 
950 			i915_gem_object_unpin_pages(obj);
951 			__i915_gem_object_put_pages(obj);
952 			i915_gem_object_put(obj);
953 		}
954 	}
955 
956 	return 0;
957 
958 out_vma_unpin:
959 	i915_vma_unpin(vma);
960 out_vma_close:
961 	i915_vma_close(vma);
962 out_object_unpin:
963 	i915_gem_object_unpin_pages(obj);
964 out_object_put:
965 	i915_gem_object_put(obj);
966 
967 	return err;
968 }
969 
970 static int gpu_write(struct intel_context *ce,
971 		     struct i915_vma *vma,
972 		     u32 dw,
973 		     u32 val)
974 {
975 	int err;
976 
977 	i915_gem_object_lock(vma->obj);
978 	err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
979 	i915_gem_object_unlock(vma->obj);
980 	if (err)
981 		return err;
982 
983 	return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
984 			       vma->size >> PAGE_SHIFT, val);
985 }
986 
987 static int
988 __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
989 {
990 	unsigned int needs_flush;
991 	unsigned long n;
992 	int err;
993 
994 	err = i915_gem_object_prepare_read(obj, &needs_flush);
995 	if (err)
996 		return err;
997 
998 	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
999 		u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
1000 
1001 		if (needs_flush & CLFLUSH_BEFORE)
1002 			drm_clflush_virt_range(ptr, PAGE_SIZE);
1003 
1004 		if (ptr[dword] != val) {
1005 			pr_err("n=%lu ptr[%u]=%u, val=%u\n",
1006 			       n, dword, ptr[dword], val);
1007 			kunmap_atomic(ptr);
1008 			err = -EINVAL;
1009 			break;
1010 		}
1011 
1012 		kunmap_atomic(ptr);
1013 	}
1014 
1015 	i915_gem_object_finish_access(obj);
1016 
1017 	return err;
1018 }
1019 
1020 static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1021 {
1022 	unsigned long n = obj->base.size >> PAGE_SHIFT;
1023 	u32 *ptr;
1024 	int err;
1025 
1026 	err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
1027 	if (err)
1028 		return err;
1029 
1030 	ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
1031 	if (IS_ERR(ptr))
1032 		return PTR_ERR(ptr);
1033 
1034 	ptr += dword;
1035 	while (n--) {
1036 		if (*ptr != val) {
1037 			pr_err("base[%u]=%08x, val=%08x\n",
1038 			       dword, *ptr, val);
1039 			err = -EINVAL;
1040 			break;
1041 		}
1042 
1043 		ptr += PAGE_SIZE / sizeof(*ptr);
1044 	}
1045 
1046 	i915_gem_object_unpin_map(obj);
1047 	return err;
1048 }
1049 
1050 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1051 {
1052 	if (i915_gem_object_has_struct_page(obj))
1053 		return __cpu_check_shmem(obj, dword, val);
1054 	else
1055 		return __cpu_check_vmap(obj, dword, val);
1056 }
1057 
1058 static int __igt_write_huge(struct intel_context *ce,
1059 			    struct drm_i915_gem_object *obj,
1060 			    u64 size, u64 offset,
1061 			    u32 dword, u32 val)
1062 {
1063 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1064 	struct i915_vma *vma;
1065 	int err;
1066 
1067 	vma = i915_vma_instance(obj, ce->vm, NULL);
1068 	if (IS_ERR(vma))
1069 		return PTR_ERR(vma);
1070 
1071 	err = i915_vma_unbind(vma);
1072 	if (err)
1073 		goto out_vma_close;
1074 
1075 	err = i915_vma_pin(vma, size, 0, flags | offset);
1076 	if (err) {
1077 		/*
1078 		 * The ggtt may have some pages reserved so
1079 		 * refrain from erroring out.
1080 		 */
1081 		if (err == -ENOSPC && i915_is_ggtt(ce->vm))
1082 			err = 0;
1083 
1084 		goto out_vma_close;
1085 	}
1086 
1087 	err = igt_check_page_sizes(vma);
1088 	if (err)
1089 		goto out_vma_unpin;
1090 
1091 	err = gpu_write(ce, vma, dword, val);
1092 	if (err) {
1093 		pr_err("gpu-write failed at offset=%llx\n", offset);
1094 		goto out_vma_unpin;
1095 	}
1096 
1097 	err = cpu_check(obj, dword, val);
1098 	if (err) {
1099 		pr_err("cpu-check failed at offset=%llx\n", offset);
1100 		goto out_vma_unpin;
1101 	}
1102 
1103 out_vma_unpin:
1104 	i915_vma_unpin(vma);
1105 out_vma_close:
1106 	__i915_vma_put(vma);
1107 	return err;
1108 }
1109 
1110 static int igt_write_huge(struct i915_gem_context *ctx,
1111 			  struct drm_i915_gem_object *obj)
1112 {
1113 	struct i915_gem_engines *engines;
1114 	struct i915_gem_engines_iter it;
1115 	struct intel_context *ce;
1116 	I915_RND_STATE(prng);
1117 	IGT_TIMEOUT(end_time);
1118 	unsigned int max_page_size;
1119 	unsigned int count;
1120 	u64 max;
1121 	u64 num;
1122 	u64 size;
1123 	int *order;
1124 	int i, n;
1125 	int err = 0;
1126 
1127 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1128 
1129 	size = obj->base.size;
1130 	if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1131 		size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1132 
1133 	n = 0;
1134 	count = 0;
1135 	max = U64_MAX;
1136 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1137 		count++;
1138 		if (!intel_engine_can_store_dword(ce->engine))
1139 			continue;
1140 
1141 		max = min(max, ce->vm->total);
1142 		n++;
1143 	}
1144 	i915_gem_context_unlock_engines(ctx);
1145 	if (!n)
1146 		return 0;
1147 
1148 	/*
1149 	 * To keep things interesting when alternating between engines in our
1150 	 * randomized order, lets also make feeding to the same engine a few
1151 	 * times in succession a possibility by enlarging the permutation array.
1152 	 */
1153 	order = i915_random_order(count * count, &prng);
1154 	if (!order)
1155 		return -ENOMEM;
1156 
1157 	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1158 	max = div_u64(max - size, max_page_size);
1159 
1160 	/*
1161 	 * Try various offsets in an ascending/descending fashion until we
1162 	 * timeout -- we want to avoid issues hidden by effectively always using
1163 	 * offset = 0.
1164 	 */
1165 	i = 0;
1166 	engines = i915_gem_context_lock_engines(ctx);
1167 	for_each_prime_number_from(num, 0, max) {
1168 		u64 offset_low = num * max_page_size;
1169 		u64 offset_high = (max - num) * max_page_size;
1170 		u32 dword = offset_in_page(num) / 4;
1171 		struct intel_context *ce;
1172 
1173 		ce = engines->engines[order[i] % engines->num_engines];
1174 		i = (i + 1) % (count * count);
1175 		if (!ce || !intel_engine_can_store_dword(ce->engine))
1176 			continue;
1177 
1178 		/*
1179 		 * In order to utilize 64K pages we need to both pad the vma
1180 		 * size and ensure the vma offset is at the start of the pt
1181 		 * boundary, however to improve coverage we opt for testing both
1182 		 * aligned and unaligned offsets.
1183 		 */
1184 		if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1185 			offset_low = round_down(offset_low,
1186 						I915_GTT_PAGE_SIZE_2M);
1187 
1188 		err = __igt_write_huge(ce, obj, size, offset_low,
1189 				       dword, num + 1);
1190 		if (err)
1191 			break;
1192 
1193 		err = __igt_write_huge(ce, obj, size, offset_high,
1194 				       dword, num + 1);
1195 		if (err)
1196 			break;
1197 
1198 		if (igt_timeout(end_time,
1199 				"%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1200 				__func__, ce->engine->name, offset_low, offset_high,
1201 				max_page_size))
1202 			break;
1203 	}
1204 	i915_gem_context_unlock_engines(ctx);
1205 
1206 	kfree(order);
1207 
1208 	return err;
1209 }
1210 
1211 typedef struct drm_i915_gem_object *
1212 (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
1213 
1214 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1215 {
1216 	return i915->mm.gemfs && has_transparent_hugepage();
1217 }
1218 
1219 static struct drm_i915_gem_object *
1220 igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
1221 {
1222 	if (!igt_can_allocate_thp(i915)) {
1223 		pr_info("%s missing THP support, skipping\n", __func__);
1224 		return ERR_PTR(-ENODEV);
1225 	}
1226 
1227 	return i915_gem_object_create_shmem(i915, size);
1228 }
1229 
1230 static struct drm_i915_gem_object *
1231 igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
1232 {
1233 	return i915_gem_object_create_internal(i915, size);
1234 }
1235 
1236 static struct drm_i915_gem_object *
1237 igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
1238 {
1239 	return huge_pages_object(i915, size, size);
1240 }
1241 
1242 static struct drm_i915_gem_object *
1243 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
1244 {
1245 	return i915_gem_object_create_lmem(i915, size, flags);
1246 }
1247 
1248 static u32 igt_random_size(struct rnd_state *prng,
1249 			   u32 min_page_size,
1250 			   u32 max_page_size)
1251 {
1252 	u64 mask;
1253 	u32 size;
1254 
1255 	GEM_BUG_ON(!is_power_of_2(min_page_size));
1256 	GEM_BUG_ON(!is_power_of_2(max_page_size));
1257 	GEM_BUG_ON(min_page_size < PAGE_SIZE);
1258 	GEM_BUG_ON(min_page_size > max_page_size);
1259 
1260 	mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
1261 	size = prandom_u32_state(prng) & mask;
1262 	if (size < min_page_size)
1263 		size |= min_page_size;
1264 
1265 	return size;
1266 }
1267 
1268 static int igt_ppgtt_smoke_huge(void *arg)
1269 {
1270 	struct i915_gem_context *ctx = arg;
1271 	struct drm_i915_private *i915 = ctx->i915;
1272 	struct drm_i915_gem_object *obj;
1273 	I915_RND_STATE(prng);
1274 	struct {
1275 		igt_create_fn fn;
1276 		u32 min;
1277 		u32 max;
1278 	} backends[] = {
1279 		{ igt_create_internal, SZ_64K, SZ_2M,  },
1280 		{ igt_create_shmem,    SZ_64K, SZ_32M, },
1281 		{ igt_create_local,    SZ_64K, SZ_1G,  },
1282 	};
1283 	int err;
1284 	int i;
1285 
1286 	/*
1287 	 * Sanity check that the HW uses huge pages correctly through our
1288 	 * various backends -- ensure that our writes land in the right place.
1289 	 */
1290 
1291 	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1292 		u32 min = backends[i].min;
1293 		u32 max = backends[i].max;
1294 		u32 size = max;
1295 try_again:
1296 		size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
1297 
1298 		obj = backends[i].fn(i915, size, 0);
1299 		if (IS_ERR(obj)) {
1300 			err = PTR_ERR(obj);
1301 			if (err == -E2BIG) {
1302 				size >>= 1;
1303 				goto try_again;
1304 			} else if (err == -ENODEV) {
1305 				err = 0;
1306 				continue;
1307 			}
1308 
1309 			return err;
1310 		}
1311 
1312 		err = i915_gem_object_pin_pages(obj);
1313 		if (err) {
1314 			if (err == -ENXIO || err == -E2BIG) {
1315 				i915_gem_object_put(obj);
1316 				size >>= 1;
1317 				goto try_again;
1318 			}
1319 			goto out_put;
1320 		}
1321 
1322 		if (obj->mm.page_sizes.phys < min) {
1323 			pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
1324 				__func__, size, i);
1325 			err = -ENOMEM;
1326 			goto out_unpin;
1327 		}
1328 
1329 		err = igt_write_huge(ctx, obj);
1330 		if (err) {
1331 			pr_err("%s write-huge failed with size=%u, i=%d\n",
1332 			       __func__, size, i);
1333 		}
1334 out_unpin:
1335 		i915_gem_object_unpin_pages(obj);
1336 		__i915_gem_object_put_pages(obj);
1337 out_put:
1338 		i915_gem_object_put(obj);
1339 
1340 		if (err == -ENOMEM || err == -ENXIO)
1341 			err = 0;
1342 
1343 		if (err)
1344 			break;
1345 
1346 		cond_resched();
1347 	}
1348 
1349 	return err;
1350 }
1351 
1352 static int igt_ppgtt_sanity_check(void *arg)
1353 {
1354 	struct i915_gem_context *ctx = arg;
1355 	struct drm_i915_private *i915 = ctx->i915;
1356 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
1357 	struct {
1358 		igt_create_fn fn;
1359 		unsigned int flags;
1360 	} backends[] = {
1361 		{ igt_create_system, 0,                        },
1362 		{ igt_create_local,  I915_BO_ALLOC_CONTIGUOUS, },
1363 	};
1364 	struct {
1365 		u32 size;
1366 		u32 pages;
1367 	} combos[] = {
1368 		{ SZ_64K,		SZ_64K		},
1369 		{ SZ_2M,		SZ_2M		},
1370 		{ SZ_2M,		SZ_64K		},
1371 		{ SZ_2M - SZ_64K,	SZ_64K		},
1372 		{ SZ_2M - SZ_4K,	SZ_64K | SZ_4K	},
1373 		{ SZ_2M + SZ_4K,	SZ_64K | SZ_4K	},
1374 		{ SZ_2M + SZ_4K,	SZ_2M  | SZ_4K	},
1375 		{ SZ_2M + SZ_64K,	SZ_2M  | SZ_64K },
1376 	};
1377 	int i, j;
1378 	int err;
1379 
1380 	if (supported == I915_GTT_PAGE_SIZE_4K)
1381 		return 0;
1382 
1383 	/*
1384 	 * Sanity check that the HW behaves with a limited set of combinations.
1385 	 * We already have a bunch of randomised testing, which should give us
1386 	 * a decent amount of variation between runs, however we should keep
1387 	 * this to limit the chances of introducing a temporary regression, by
1388 	 * testing the most obvious cases that might make something blow up.
1389 	 */
1390 
1391 	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1392 		for (j = 0; j < ARRAY_SIZE(combos); ++j) {
1393 			struct drm_i915_gem_object *obj;
1394 			u32 size = combos[j].size;
1395 			u32 pages = combos[j].pages;
1396 
1397 			obj = backends[i].fn(i915, size, backends[i].flags);
1398 			if (IS_ERR(obj)) {
1399 				err = PTR_ERR(obj);
1400 				if (err == -ENODEV) {
1401 					pr_info("Device lacks local memory, skipping\n");
1402 					err = 0;
1403 					break;
1404 				}
1405 
1406 				return err;
1407 			}
1408 
1409 			err = i915_gem_object_pin_pages(obj);
1410 			if (err) {
1411 				i915_gem_object_put(obj);
1412 				goto out;
1413 			}
1414 
1415 			GEM_BUG_ON(pages > obj->base.size);
1416 			pages = pages & supported;
1417 
1418 			if (pages)
1419 				obj->mm.page_sizes.sg = pages;
1420 
1421 			err = igt_write_huge(ctx, obj);
1422 
1423 			i915_gem_object_unpin_pages(obj);
1424 			__i915_gem_object_put_pages(obj);
1425 			i915_gem_object_put(obj);
1426 
1427 			if (err) {
1428 				pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
1429 				       __func__, size, pages, i, j);
1430 				goto out;
1431 			}
1432 		}
1433 
1434 		cond_resched();
1435 	}
1436 
1437 out:
1438 	if (err == -ENOMEM)
1439 		err = 0;
1440 
1441 	return err;
1442 }
1443 
1444 static int igt_ppgtt_pin_update(void *arg)
1445 {
1446 	struct i915_gem_context *ctx = arg;
1447 	struct drm_i915_private *dev_priv = ctx->i915;
1448 	unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
1449 	struct drm_i915_gem_object *obj;
1450 	struct i915_gem_engines_iter it;
1451 	struct i915_address_space *vm;
1452 	struct intel_context *ce;
1453 	struct i915_vma *vma;
1454 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1455 	unsigned int n;
1456 	int first, last;
1457 	int err = 0;
1458 
1459 	/*
1460 	 * Make sure there's no funny business when doing a PIN_UPDATE -- in the
1461 	 * past we had a subtle issue with being able to incorrectly do multiple
1462 	 * alloc va ranges on the same object when doing a PIN_UPDATE, which
1463 	 * resulted in some pretty nasty bugs, though only when using
1464 	 * huge-gtt-pages.
1465 	 */
1466 
1467 	vm = i915_gem_context_get_vm_rcu(ctx);
1468 	if (!i915_vm_is_4lvl(vm)) {
1469 		pr_info("48b PPGTT not supported, skipping\n");
1470 		goto out_vm;
1471 	}
1472 
1473 	first = ilog2(I915_GTT_PAGE_SIZE_64K);
1474 	last = ilog2(I915_GTT_PAGE_SIZE_2M);
1475 
1476 	for_each_set_bit_from(first, &supported, last + 1) {
1477 		unsigned int page_size = BIT(first);
1478 
1479 		obj = i915_gem_object_create_internal(dev_priv, page_size);
1480 		if (IS_ERR(obj))
1481 			return PTR_ERR(obj);
1482 
1483 		vma = i915_vma_instance(obj, vm, NULL);
1484 		if (IS_ERR(vma)) {
1485 			err = PTR_ERR(vma);
1486 			goto out_put;
1487 		}
1488 
1489 		err = i915_vma_pin(vma, SZ_2M, 0, flags);
1490 		if (err)
1491 			goto out_close;
1492 
1493 		if (vma->page_sizes.sg < page_size) {
1494 			pr_info("Unable to allocate page-size %x, finishing test early\n",
1495 				page_size);
1496 			goto out_unpin;
1497 		}
1498 
1499 		err = igt_check_page_sizes(vma);
1500 		if (err)
1501 			goto out_unpin;
1502 
1503 		if (vma->page_sizes.gtt != page_size) {
1504 			dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
1505 
1506 			/*
1507 			 * The only valid reason for this to ever fail would be
1508 			 * if the dma-mapper screwed us over when we did the
1509 			 * dma_map_sg(), since it has the final say over the dma
1510 			 * address.
1511 			 */
1512 			if (IS_ALIGNED(addr, page_size)) {
1513 				pr_err("page_sizes.gtt=%u, expected=%u\n",
1514 				       vma->page_sizes.gtt, page_size);
1515 				err = -EINVAL;
1516 			} else {
1517 				pr_info("dma address misaligned, finishing test early\n");
1518 			}
1519 
1520 			goto out_unpin;
1521 		}
1522 
1523 		err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL);
1524 		if (err)
1525 			goto out_unpin;
1526 
1527 		i915_vma_unpin(vma);
1528 		i915_vma_close(vma);
1529 
1530 		i915_gem_object_put(obj);
1531 	}
1532 
1533 	obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
1534 	if (IS_ERR(obj))
1535 		return PTR_ERR(obj);
1536 
1537 	vma = i915_vma_instance(obj, vm, NULL);
1538 	if (IS_ERR(vma)) {
1539 		err = PTR_ERR(vma);
1540 		goto out_put;
1541 	}
1542 
1543 	err = i915_vma_pin(vma, 0, 0, flags);
1544 	if (err)
1545 		goto out_close;
1546 
1547 	/*
1548 	 * Make sure we don't end up with something like where the pde is still
1549 	 * pointing to the 2M page, and the pt we just filled-in is dangling --
1550 	 * we can check this by writing to the first page where it would then
1551 	 * land in the now stale 2M page.
1552 	 */
1553 
1554 	n = 0;
1555 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1556 		if (!intel_engine_can_store_dword(ce->engine))
1557 			continue;
1558 
1559 		err = gpu_write(ce, vma, n++, 0xdeadbeaf);
1560 		if (err)
1561 			break;
1562 	}
1563 	i915_gem_context_unlock_engines(ctx);
1564 	if (err)
1565 		goto out_unpin;
1566 
1567 	while (n--) {
1568 		err = cpu_check(obj, n, 0xdeadbeaf);
1569 		if (err)
1570 			goto out_unpin;
1571 	}
1572 
1573 out_unpin:
1574 	i915_vma_unpin(vma);
1575 out_close:
1576 	i915_vma_close(vma);
1577 out_put:
1578 	i915_gem_object_put(obj);
1579 out_vm:
1580 	i915_vm_put(vm);
1581 
1582 	return err;
1583 }
1584 
1585 static int igt_tmpfs_fallback(void *arg)
1586 {
1587 	struct i915_gem_context *ctx = arg;
1588 	struct drm_i915_private *i915 = ctx->i915;
1589 	struct vfsmount *gemfs = i915->mm.gemfs;
1590 	struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
1591 	struct drm_i915_gem_object *obj;
1592 	struct i915_vma *vma;
1593 	u32 *vaddr;
1594 	int err = 0;
1595 
1596 	/*
1597 	 * Make sure that we don't burst into a ball of flames upon falling back
1598 	 * to tmpfs, which we rely on if on the off-chance we encouter a failure
1599 	 * when setting up gemfs.
1600 	 */
1601 
1602 	i915->mm.gemfs = NULL;
1603 
1604 	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
1605 	if (IS_ERR(obj)) {
1606 		err = PTR_ERR(obj);
1607 		goto out_restore;
1608 	}
1609 
1610 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1611 	if (IS_ERR(vaddr)) {
1612 		err = PTR_ERR(vaddr);
1613 		goto out_put;
1614 	}
1615 	*vaddr = 0xdeadbeaf;
1616 
1617 	__i915_gem_object_flush_map(obj, 0, 64);
1618 	i915_gem_object_unpin_map(obj);
1619 
1620 	vma = i915_vma_instance(obj, vm, NULL);
1621 	if (IS_ERR(vma)) {
1622 		err = PTR_ERR(vma);
1623 		goto out_put;
1624 	}
1625 
1626 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
1627 	if (err)
1628 		goto out_close;
1629 
1630 	err = igt_check_page_sizes(vma);
1631 
1632 	i915_vma_unpin(vma);
1633 out_close:
1634 	i915_vma_close(vma);
1635 out_put:
1636 	i915_gem_object_put(obj);
1637 out_restore:
1638 	i915->mm.gemfs = gemfs;
1639 
1640 	i915_vm_put(vm);
1641 	return err;
1642 }
1643 
1644 static int igt_shrink_thp(void *arg)
1645 {
1646 	struct i915_gem_context *ctx = arg;
1647 	struct drm_i915_private *i915 = ctx->i915;
1648 	struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
1649 	struct drm_i915_gem_object *obj;
1650 	struct i915_gem_engines_iter it;
1651 	struct intel_context *ce;
1652 	struct i915_vma *vma;
1653 	unsigned int flags = PIN_USER;
1654 	unsigned int n;
1655 	int err = 0;
1656 
1657 	/*
1658 	 * Sanity check shrinking huge-paged object -- make sure nothing blows
1659 	 * up.
1660 	 */
1661 
1662 	if (!igt_can_allocate_thp(i915)) {
1663 		pr_info("missing THP support, skipping\n");
1664 		goto out_vm;
1665 	}
1666 
1667 	obj = i915_gem_object_create_shmem(i915, SZ_2M);
1668 	if (IS_ERR(obj)) {
1669 		err = PTR_ERR(obj);
1670 		goto out_vm;
1671 	}
1672 
1673 	vma = i915_vma_instance(obj, vm, NULL);
1674 	if (IS_ERR(vma)) {
1675 		err = PTR_ERR(vma);
1676 		goto out_put;
1677 	}
1678 
1679 	err = i915_vma_pin(vma, 0, 0, flags);
1680 	if (err)
1681 		goto out_close;
1682 
1683 	if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1684 		pr_info("failed to allocate THP, finishing test early\n");
1685 		goto out_unpin;
1686 	}
1687 
1688 	err = igt_check_page_sizes(vma);
1689 	if (err)
1690 		goto out_unpin;
1691 
1692 	n = 0;
1693 
1694 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1695 		if (!intel_engine_can_store_dword(ce->engine))
1696 			continue;
1697 
1698 		err = gpu_write(ce, vma, n++, 0xdeadbeaf);
1699 		if (err)
1700 			break;
1701 	}
1702 	i915_gem_context_unlock_engines(ctx);
1703 	i915_vma_unpin(vma);
1704 	if (err)
1705 		goto out_close;
1706 
1707 	/*
1708 	 * Now that the pages are *unpinned* shrink-all should invoke
1709 	 * shmem to truncate our pages.
1710 	 */
1711 	i915_gem_shrink_all(i915);
1712 	if (i915_gem_object_has_pages(obj)) {
1713 		pr_err("shrink-all didn't truncate the pages\n");
1714 		err = -EINVAL;
1715 		goto out_close;
1716 	}
1717 
1718 	if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
1719 		pr_err("residual page-size bits left\n");
1720 		err = -EINVAL;
1721 		goto out_close;
1722 	}
1723 
1724 	err = i915_vma_pin(vma, 0, 0, flags);
1725 	if (err)
1726 		goto out_close;
1727 
1728 	while (n--) {
1729 		err = cpu_check(obj, n, 0xdeadbeaf);
1730 		if (err)
1731 			break;
1732 	}
1733 
1734 out_unpin:
1735 	i915_vma_unpin(vma);
1736 out_close:
1737 	i915_vma_close(vma);
1738 out_put:
1739 	i915_gem_object_put(obj);
1740 out_vm:
1741 	i915_vm_put(vm);
1742 
1743 	return err;
1744 }
1745 
1746 int i915_gem_huge_page_mock_selftests(void)
1747 {
1748 	static const struct i915_subtest tests[] = {
1749 		SUBTEST(igt_mock_exhaust_device_supported_pages),
1750 		SUBTEST(igt_mock_memory_region_huge_pages),
1751 		SUBTEST(igt_mock_ppgtt_misaligned_dma),
1752 		SUBTEST(igt_mock_ppgtt_huge_fill),
1753 		SUBTEST(igt_mock_ppgtt_64K),
1754 	};
1755 	struct drm_i915_private *dev_priv;
1756 	struct i915_ppgtt *ppgtt;
1757 	int err;
1758 
1759 	dev_priv = mock_gem_device();
1760 	if (!dev_priv)
1761 		return -ENOMEM;
1762 
1763 	/* Pretend to be a device which supports the 48b PPGTT */
1764 	mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
1765 	mkwrite_device_info(dev_priv)->ppgtt_size = 48;
1766 
1767 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
1768 	if (IS_ERR(ppgtt)) {
1769 		err = PTR_ERR(ppgtt);
1770 		goto out_unlock;
1771 	}
1772 
1773 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1774 		pr_err("failed to create 48b PPGTT\n");
1775 		err = -EINVAL;
1776 		goto out_close;
1777 	}
1778 
1779 	/* If we were ever hit this then it's time to mock the 64K scratch */
1780 	if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
1781 		pr_err("PPGTT missing 64K scratch page\n");
1782 		err = -EINVAL;
1783 		goto out_close;
1784 	}
1785 
1786 	err = i915_subtests(tests, ppgtt);
1787 
1788 out_close:
1789 	i915_vm_put(&ppgtt->vm);
1790 
1791 out_unlock:
1792 	drm_dev_put(&dev_priv->drm);
1793 	return err;
1794 }
1795 
1796 int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
1797 {
1798 	static const struct i915_subtest tests[] = {
1799 		SUBTEST(igt_shrink_thp),
1800 		SUBTEST(igt_ppgtt_pin_update),
1801 		SUBTEST(igt_tmpfs_fallback),
1802 		SUBTEST(igt_ppgtt_smoke_huge),
1803 		SUBTEST(igt_ppgtt_sanity_check),
1804 	};
1805 	struct i915_gem_context *ctx;
1806 	struct i915_address_space *vm;
1807 	struct file *file;
1808 	int err;
1809 
1810 	if (!HAS_PPGTT(i915)) {
1811 		pr_info("PPGTT not supported, skipping live-selftests\n");
1812 		return 0;
1813 	}
1814 
1815 	if (intel_gt_is_wedged(&i915->gt))
1816 		return 0;
1817 
1818 	file = mock_file(i915);
1819 	if (IS_ERR(file))
1820 		return PTR_ERR(file);
1821 
1822 	ctx = live_context(i915, file);
1823 	if (IS_ERR(ctx)) {
1824 		err = PTR_ERR(ctx);
1825 		goto out_file;
1826 	}
1827 
1828 	mutex_lock(&ctx->mutex);
1829 	vm = i915_gem_context_vm(ctx);
1830 	if (vm)
1831 		WRITE_ONCE(vm->scrub_64K, true);
1832 	mutex_unlock(&ctx->mutex);
1833 
1834 	err = i915_subtests(tests, ctx);
1835 
1836 out_file:
1837 	fput(file);
1838 	return err;
1839 }
1840