1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "i915_selftest.h"
10 
11 #include "gem/i915_gem_region.h"
12 #include "gem/i915_gem_lmem.h"
13 #include "gem/i915_gem_pm.h"
14 
15 #include "gt/intel_gt.h"
16 
17 #include "igt_gem_utils.h"
18 #include "mock_context.h"
19 
20 #include "selftests/mock_drm.h"
21 #include "selftests/mock_gem_device.h"
22 #include "selftests/mock_region.h"
23 #include "selftests/i915_random.h"
24 
25 static const unsigned int page_sizes[] = {
26 	I915_GTT_PAGE_SIZE_2M,
27 	I915_GTT_PAGE_SIZE_64K,
28 	I915_GTT_PAGE_SIZE_4K,
29 };
30 
31 static unsigned int get_largest_page_size(struct drm_i915_private *i915,
32 					  u64 rem)
33 {
34 	int i;
35 
36 	for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
37 		unsigned int page_size = page_sizes[i];
38 
39 		if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
40 			return page_size;
41 	}
42 
43 	return 0;
44 }
45 
46 static void huge_pages_free_pages(struct sg_table *st)
47 {
48 	struct scatterlist *sg;
49 
50 	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
51 		if (sg_page(sg))
52 			__free_pages(sg_page(sg), get_order(sg->length));
53 	}
54 
55 	sg_free_table(st);
56 	kfree(st);
57 }
58 
59 static int get_huge_pages(struct drm_i915_gem_object *obj)
60 {
61 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
62 	unsigned int page_mask = obj->mm.page_mask;
63 	struct sg_table *st;
64 	struct scatterlist *sg;
65 	unsigned int sg_page_sizes;
66 	u64 rem;
67 
68 	st = kmalloc(sizeof(*st), GFP);
69 	if (!st)
70 		return -ENOMEM;
71 
72 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
73 		kfree(st);
74 		return -ENOMEM;
75 	}
76 
77 	rem = obj->base.size;
78 	sg = st->sgl;
79 	st->nents = 0;
80 	sg_page_sizes = 0;
81 
82 	/*
83 	 * Our goal here is simple, we want to greedily fill the object from
84 	 * largest to smallest page-size, while ensuring that we use *every*
85 	 * page-size as per the given page-mask.
86 	 */
87 	do {
88 		unsigned int bit = ilog2(page_mask);
89 		unsigned int page_size = BIT(bit);
90 		int order = get_order(page_size);
91 
92 		do {
93 			struct page *page;
94 
95 			GEM_BUG_ON(order >= MAX_ORDER);
96 			page = alloc_pages(GFP | __GFP_ZERO, order);
97 			if (!page)
98 				goto err;
99 
100 			sg_set_page(sg, page, page_size, 0);
101 			sg_page_sizes |= page_size;
102 			st->nents++;
103 
104 			rem -= page_size;
105 			if (!rem) {
106 				sg_mark_end(sg);
107 				break;
108 			}
109 
110 			sg = __sg_next(sg);
111 		} while ((rem - ((page_size-1) & page_mask)) >= page_size);
112 
113 		page_mask &= (page_size-1);
114 	} while (page_mask);
115 
116 	if (i915_gem_gtt_prepare_pages(obj, st))
117 		goto err;
118 
119 	GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
120 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
121 
122 	return 0;
123 
124 err:
125 	sg_set_page(sg, NULL, 0, 0);
126 	sg_mark_end(sg);
127 	huge_pages_free_pages(st);
128 
129 	return -ENOMEM;
130 }
131 
132 static void put_huge_pages(struct drm_i915_gem_object *obj,
133 			   struct sg_table *pages)
134 {
135 	i915_gem_gtt_finish_pages(obj, pages);
136 	huge_pages_free_pages(pages);
137 
138 	obj->mm.dirty = false;
139 }
140 
141 static const struct drm_i915_gem_object_ops huge_page_ops = {
142 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
143 		 I915_GEM_OBJECT_IS_SHRINKABLE,
144 	.get_pages = get_huge_pages,
145 	.put_pages = put_huge_pages,
146 };
147 
148 static struct drm_i915_gem_object *
149 huge_pages_object(struct drm_i915_private *i915,
150 		  u64 size,
151 		  unsigned int page_mask)
152 {
153 	static struct lock_class_key lock_class;
154 	struct drm_i915_gem_object *obj;
155 
156 	GEM_BUG_ON(!size);
157 	GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
158 
159 	if (size >> PAGE_SHIFT > INT_MAX)
160 		return ERR_PTR(-E2BIG);
161 
162 	if (overflows_type(size, obj->base.size))
163 		return ERR_PTR(-E2BIG);
164 
165 	obj = i915_gem_object_alloc();
166 	if (!obj)
167 		return ERR_PTR(-ENOMEM);
168 
169 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
170 	i915_gem_object_init(obj, &huge_page_ops, &lock_class);
171 
172 	i915_gem_object_set_volatile(obj);
173 
174 	obj->write_domain = I915_GEM_DOMAIN_CPU;
175 	obj->read_domains = I915_GEM_DOMAIN_CPU;
176 	obj->cache_level = I915_CACHE_NONE;
177 
178 	obj->mm.page_mask = page_mask;
179 
180 	return obj;
181 }
182 
183 static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
184 {
185 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
186 	const u64 max_len = rounddown_pow_of_two(UINT_MAX);
187 	struct sg_table *st;
188 	struct scatterlist *sg;
189 	unsigned int sg_page_sizes;
190 	u64 rem;
191 
192 	st = kmalloc(sizeof(*st), GFP);
193 	if (!st)
194 		return -ENOMEM;
195 
196 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
197 		kfree(st);
198 		return -ENOMEM;
199 	}
200 
201 	/* Use optimal page sized chunks to fill in the sg table */
202 	rem = obj->base.size;
203 	sg = st->sgl;
204 	st->nents = 0;
205 	sg_page_sizes = 0;
206 	do {
207 		unsigned int page_size = get_largest_page_size(i915, rem);
208 		unsigned int len = min(page_size * div_u64(rem, page_size),
209 				       max_len);
210 
211 		GEM_BUG_ON(!page_size);
212 
213 		sg->offset = 0;
214 		sg->length = len;
215 		sg_dma_len(sg) = len;
216 		sg_dma_address(sg) = page_size;
217 
218 		sg_page_sizes |= len;
219 
220 		st->nents++;
221 
222 		rem -= len;
223 		if (!rem) {
224 			sg_mark_end(sg);
225 			break;
226 		}
227 
228 		sg = sg_next(sg);
229 	} while (1);
230 
231 	i915_sg_trim(st);
232 
233 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
234 
235 	return 0;
236 }
237 
238 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
239 {
240 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
241 	struct sg_table *st;
242 	struct scatterlist *sg;
243 	unsigned int page_size;
244 
245 	st = kmalloc(sizeof(*st), GFP);
246 	if (!st)
247 		return -ENOMEM;
248 
249 	if (sg_alloc_table(st, 1, GFP)) {
250 		kfree(st);
251 		return -ENOMEM;
252 	}
253 
254 	sg = st->sgl;
255 	st->nents = 1;
256 
257 	page_size = get_largest_page_size(i915, obj->base.size);
258 	GEM_BUG_ON(!page_size);
259 
260 	sg->offset = 0;
261 	sg->length = obj->base.size;
262 	sg_dma_len(sg) = obj->base.size;
263 	sg_dma_address(sg) = page_size;
264 
265 	__i915_gem_object_set_pages(obj, st, sg->length);
266 
267 	return 0;
268 #undef GFP
269 }
270 
271 static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
272 				 struct sg_table *pages)
273 {
274 	sg_free_table(pages);
275 	kfree(pages);
276 }
277 
278 static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
279 				struct sg_table *pages)
280 {
281 	fake_free_huge_pages(obj, pages);
282 	obj->mm.dirty = false;
283 }
284 
285 static const struct drm_i915_gem_object_ops fake_ops = {
286 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
287 	.get_pages = fake_get_huge_pages,
288 	.put_pages = fake_put_huge_pages,
289 };
290 
291 static const struct drm_i915_gem_object_ops fake_ops_single = {
292 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
293 	.get_pages = fake_get_huge_pages_single,
294 	.put_pages = fake_put_huge_pages,
295 };
296 
297 static struct drm_i915_gem_object *
298 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
299 {
300 	static struct lock_class_key lock_class;
301 	struct drm_i915_gem_object *obj;
302 
303 	GEM_BUG_ON(!size);
304 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
305 
306 	if (size >> PAGE_SHIFT > UINT_MAX)
307 		return ERR_PTR(-E2BIG);
308 
309 	if (overflows_type(size, obj->base.size))
310 		return ERR_PTR(-E2BIG);
311 
312 	obj = i915_gem_object_alloc();
313 	if (!obj)
314 		return ERR_PTR(-ENOMEM);
315 
316 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
317 
318 	if (single)
319 		i915_gem_object_init(obj, &fake_ops_single, &lock_class);
320 	else
321 		i915_gem_object_init(obj, &fake_ops, &lock_class);
322 
323 	i915_gem_object_set_volatile(obj);
324 
325 	obj->write_domain = I915_GEM_DOMAIN_CPU;
326 	obj->read_domains = I915_GEM_DOMAIN_CPU;
327 	obj->cache_level = I915_CACHE_NONE;
328 
329 	return obj;
330 }
331 
332 static int igt_check_page_sizes(struct i915_vma *vma)
333 {
334 	struct drm_i915_private *i915 = vma->vm->i915;
335 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
336 	struct drm_i915_gem_object *obj = vma->obj;
337 	int err;
338 
339 	/* We have to wait for the async bind to complete before our asserts */
340 	err = i915_vma_sync(vma);
341 	if (err)
342 		return err;
343 
344 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
345 		pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
346 		       vma->page_sizes.sg & ~supported, supported);
347 		err = -EINVAL;
348 	}
349 
350 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
351 		pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
352 		       vma->page_sizes.gtt & ~supported, supported);
353 		err = -EINVAL;
354 	}
355 
356 	if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
357 		pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
358 		       vma->page_sizes.phys, obj->mm.page_sizes.phys);
359 		err = -EINVAL;
360 	}
361 
362 	if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
363 		pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
364 		       vma->page_sizes.sg, obj->mm.page_sizes.sg);
365 		err = -EINVAL;
366 	}
367 
368 	if (obj->mm.page_sizes.gtt) {
369 		pr_err("obj->page_sizes.gtt(%u) should never be set\n",
370 		       obj->mm.page_sizes.gtt);
371 		err = -EINVAL;
372 	}
373 
374 	return err;
375 }
376 
377 static int igt_mock_exhaust_device_supported_pages(void *arg)
378 {
379 	struct i915_ppgtt *ppgtt = arg;
380 	struct drm_i915_private *i915 = ppgtt->vm.i915;
381 	unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
382 	struct drm_i915_gem_object *obj;
383 	struct i915_vma *vma;
384 	int i, j, single;
385 	int err;
386 
387 	/*
388 	 * Sanity check creating objects with every valid page support
389 	 * combination for our mock device.
390 	 */
391 
392 	for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
393 		unsigned int combination = 0;
394 
395 		for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
396 			if (i & BIT(j))
397 				combination |= page_sizes[j];
398 		}
399 
400 		mkwrite_device_info(i915)->page_sizes = combination;
401 
402 		for (single = 0; single <= 1; ++single) {
403 			obj = fake_huge_pages_object(i915, combination, !!single);
404 			if (IS_ERR(obj)) {
405 				err = PTR_ERR(obj);
406 				goto out_device;
407 			}
408 
409 			if (obj->base.size != combination) {
410 				pr_err("obj->base.size=%zu, expected=%u\n",
411 				       obj->base.size, combination);
412 				err = -EINVAL;
413 				goto out_put;
414 			}
415 
416 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
417 			if (IS_ERR(vma)) {
418 				err = PTR_ERR(vma);
419 				goto out_put;
420 			}
421 
422 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
423 			if (err)
424 				goto out_put;
425 
426 			err = igt_check_page_sizes(vma);
427 
428 			if (vma->page_sizes.sg != combination) {
429 				pr_err("page_sizes.sg=%u, expected=%u\n",
430 				       vma->page_sizes.sg, combination);
431 				err = -EINVAL;
432 			}
433 
434 			i915_vma_unpin(vma);
435 			i915_gem_object_put(obj);
436 
437 			if (err)
438 				goto out_device;
439 		}
440 	}
441 
442 	goto out_device;
443 
444 out_put:
445 	i915_gem_object_put(obj);
446 out_device:
447 	mkwrite_device_info(i915)->page_sizes = saved_mask;
448 
449 	return err;
450 }
451 
452 static int igt_mock_memory_region_huge_pages(void *arg)
453 {
454 	const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
455 	struct i915_ppgtt *ppgtt = arg;
456 	struct drm_i915_private *i915 = ppgtt->vm.i915;
457 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
458 	struct intel_memory_region *mem;
459 	struct drm_i915_gem_object *obj;
460 	struct i915_vma *vma;
461 	int bit;
462 	int err = 0;
463 
464 	mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
465 	if (IS_ERR(mem)) {
466 		pr_err("%s failed to create memory region\n", __func__);
467 		return PTR_ERR(mem);
468 	}
469 
470 	for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
471 		unsigned int page_size = BIT(bit);
472 		resource_size_t phys;
473 		int i;
474 
475 		for (i = 0; i < ARRAY_SIZE(flags); ++i) {
476 			obj = i915_gem_object_create_region(mem, page_size,
477 							    flags[i]);
478 			if (IS_ERR(obj)) {
479 				err = PTR_ERR(obj);
480 				goto out_region;
481 			}
482 
483 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
484 			if (IS_ERR(vma)) {
485 				err = PTR_ERR(vma);
486 				goto out_put;
487 			}
488 
489 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
490 			if (err)
491 				goto out_put;
492 
493 			err = igt_check_page_sizes(vma);
494 			if (err)
495 				goto out_unpin;
496 
497 			phys = i915_gem_object_get_dma_address(obj, 0);
498 			if (!IS_ALIGNED(phys, page_size)) {
499 				pr_err("%s addr misaligned(%pa) page_size=%u\n",
500 				       __func__, &phys, page_size);
501 				err = -EINVAL;
502 				goto out_unpin;
503 			}
504 
505 			if (vma->page_sizes.gtt != page_size) {
506 				pr_err("%s page_sizes.gtt=%u, expected=%u\n",
507 				       __func__, vma->page_sizes.gtt,
508 				       page_size);
509 				err = -EINVAL;
510 				goto out_unpin;
511 			}
512 
513 			i915_vma_unpin(vma);
514 			__i915_gem_object_put_pages(obj);
515 			i915_gem_object_put(obj);
516 		}
517 	}
518 
519 	goto out_region;
520 
521 out_unpin:
522 	i915_vma_unpin(vma);
523 out_put:
524 	i915_gem_object_put(obj);
525 out_region:
526 	intel_memory_region_put(mem);
527 	return err;
528 }
529 
530 static int igt_mock_ppgtt_misaligned_dma(void *arg)
531 {
532 	struct i915_ppgtt *ppgtt = arg;
533 	struct drm_i915_private *i915 = ppgtt->vm.i915;
534 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
535 	struct drm_i915_gem_object *obj;
536 	int bit;
537 	int err;
538 
539 	/*
540 	 * Sanity check dma misalignment for huge pages -- the dma addresses we
541 	 * insert into the paging structures need to always respect the page
542 	 * size alignment.
543 	 */
544 
545 	bit = ilog2(I915_GTT_PAGE_SIZE_64K);
546 
547 	for_each_set_bit_from(bit, &supported,
548 			      ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
549 		IGT_TIMEOUT(end_time);
550 		unsigned int page_size = BIT(bit);
551 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
552 		unsigned int offset;
553 		unsigned int size =
554 			round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
555 		struct i915_vma *vma;
556 
557 		obj = fake_huge_pages_object(i915, size, true);
558 		if (IS_ERR(obj))
559 			return PTR_ERR(obj);
560 
561 		if (obj->base.size != size) {
562 			pr_err("obj->base.size=%zu, expected=%u\n",
563 			       obj->base.size, size);
564 			err = -EINVAL;
565 			goto out_put;
566 		}
567 
568 		err = i915_gem_object_pin_pages(obj);
569 		if (err)
570 			goto out_put;
571 
572 		/* Force the page size for this object */
573 		obj->mm.page_sizes.sg = page_size;
574 
575 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
576 		if (IS_ERR(vma)) {
577 			err = PTR_ERR(vma);
578 			goto out_unpin;
579 		}
580 
581 		err = i915_vma_pin(vma, 0, 0, flags);
582 		if (err)
583 			goto out_unpin;
584 
585 
586 		err = igt_check_page_sizes(vma);
587 
588 		if (vma->page_sizes.gtt != page_size) {
589 			pr_err("page_sizes.gtt=%u, expected %u\n",
590 			       vma->page_sizes.gtt, page_size);
591 			err = -EINVAL;
592 		}
593 
594 		i915_vma_unpin(vma);
595 
596 		if (err)
597 			goto out_unpin;
598 
599 		/*
600 		 * Try all the other valid offsets until the next
601 		 * boundary -- should always fall back to using 4K
602 		 * pages.
603 		 */
604 		for (offset = 4096; offset < page_size; offset += 4096) {
605 			err = i915_vma_unbind(vma);
606 			if (err)
607 				goto out_unpin;
608 
609 			err = i915_vma_pin(vma, 0, 0, flags | offset);
610 			if (err)
611 				goto out_unpin;
612 
613 			err = igt_check_page_sizes(vma);
614 
615 			if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
616 				pr_err("page_sizes.gtt=%u, expected %llu\n",
617 				       vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
618 				err = -EINVAL;
619 			}
620 
621 			i915_vma_unpin(vma);
622 
623 			if (err)
624 				goto out_unpin;
625 
626 			if (igt_timeout(end_time,
627 					"%s timed out at offset %x with page-size %x\n",
628 					__func__, offset, page_size))
629 				break;
630 		}
631 
632 		i915_gem_object_unpin_pages(obj);
633 		__i915_gem_object_put_pages(obj);
634 		i915_gem_object_put(obj);
635 	}
636 
637 	return 0;
638 
639 out_unpin:
640 	i915_gem_object_unpin_pages(obj);
641 out_put:
642 	i915_gem_object_put(obj);
643 
644 	return err;
645 }
646 
647 static void close_object_list(struct list_head *objects,
648 			      struct i915_ppgtt *ppgtt)
649 {
650 	struct drm_i915_gem_object *obj, *on;
651 
652 	list_for_each_entry_safe(obj, on, objects, st_link) {
653 		list_del(&obj->st_link);
654 		i915_gem_object_unpin_pages(obj);
655 		__i915_gem_object_put_pages(obj);
656 		i915_gem_object_put(obj);
657 	}
658 }
659 
660 static int igt_mock_ppgtt_huge_fill(void *arg)
661 {
662 	struct i915_ppgtt *ppgtt = arg;
663 	struct drm_i915_private *i915 = ppgtt->vm.i915;
664 	unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
665 	unsigned long page_num;
666 	bool single = false;
667 	LIST_HEAD(objects);
668 	IGT_TIMEOUT(end_time);
669 	int err = -ENODEV;
670 
671 	for_each_prime_number_from(page_num, 1, max_pages) {
672 		struct drm_i915_gem_object *obj;
673 		u64 size = page_num << PAGE_SHIFT;
674 		struct i915_vma *vma;
675 		unsigned int expected_gtt = 0;
676 		int i;
677 
678 		obj = fake_huge_pages_object(i915, size, single);
679 		if (IS_ERR(obj)) {
680 			err = PTR_ERR(obj);
681 			break;
682 		}
683 
684 		if (obj->base.size != size) {
685 			pr_err("obj->base.size=%zd, expected=%llu\n",
686 			       obj->base.size, size);
687 			i915_gem_object_put(obj);
688 			err = -EINVAL;
689 			break;
690 		}
691 
692 		err = i915_gem_object_pin_pages(obj);
693 		if (err) {
694 			i915_gem_object_put(obj);
695 			break;
696 		}
697 
698 		list_add(&obj->st_link, &objects);
699 
700 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
701 		if (IS_ERR(vma)) {
702 			err = PTR_ERR(vma);
703 			break;
704 		}
705 
706 		err = i915_vma_pin(vma, 0, 0, PIN_USER);
707 		if (err)
708 			break;
709 
710 		err = igt_check_page_sizes(vma);
711 		if (err) {
712 			i915_vma_unpin(vma);
713 			break;
714 		}
715 
716 		/*
717 		 * Figure out the expected gtt page size knowing that we go from
718 		 * largest to smallest page size sg chunks, and that we align to
719 		 * the largest page size.
720 		 */
721 		for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
722 			unsigned int page_size = page_sizes[i];
723 
724 			if (HAS_PAGE_SIZES(i915, page_size) &&
725 			    size >= page_size) {
726 				expected_gtt |= page_size;
727 				size &= page_size-1;
728 			}
729 		}
730 
731 		GEM_BUG_ON(!expected_gtt);
732 		GEM_BUG_ON(size);
733 
734 		if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
735 			expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
736 
737 		i915_vma_unpin(vma);
738 
739 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
740 			if (!IS_ALIGNED(vma->node.start,
741 					I915_GTT_PAGE_SIZE_2M)) {
742 				pr_err("node.start(%llx) not aligned to 2M\n",
743 				       vma->node.start);
744 				err = -EINVAL;
745 				break;
746 			}
747 
748 			if (!IS_ALIGNED(vma->node.size,
749 					I915_GTT_PAGE_SIZE_2M)) {
750 				pr_err("node.size(%llx) not aligned to 2M\n",
751 				       vma->node.size);
752 				err = -EINVAL;
753 				break;
754 			}
755 		}
756 
757 		if (vma->page_sizes.gtt != expected_gtt) {
758 			pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
759 			       vma->page_sizes.gtt, expected_gtt,
760 			       obj->base.size, yesno(!!single));
761 			err = -EINVAL;
762 			break;
763 		}
764 
765 		if (igt_timeout(end_time,
766 				"%s timed out at size %zd\n",
767 				__func__, obj->base.size))
768 			break;
769 
770 		single = !single;
771 	}
772 
773 	close_object_list(&objects, ppgtt);
774 
775 	if (err == -ENOMEM || err == -ENOSPC)
776 		err = 0;
777 
778 	return err;
779 }
780 
781 static int igt_mock_ppgtt_64K(void *arg)
782 {
783 	struct i915_ppgtt *ppgtt = arg;
784 	struct drm_i915_private *i915 = ppgtt->vm.i915;
785 	struct drm_i915_gem_object *obj;
786 	const struct object_info {
787 		unsigned int size;
788 		unsigned int gtt;
789 		unsigned int offset;
790 	} objects[] = {
791 		/* Cases with forced padding/alignment */
792 		{
793 			.size = SZ_64K,
794 			.gtt = I915_GTT_PAGE_SIZE_64K,
795 			.offset = 0,
796 		},
797 		{
798 			.size = SZ_64K + SZ_4K,
799 			.gtt = I915_GTT_PAGE_SIZE_4K,
800 			.offset = 0,
801 		},
802 		{
803 			.size = SZ_64K - SZ_4K,
804 			.gtt = I915_GTT_PAGE_SIZE_4K,
805 			.offset = 0,
806 		},
807 		{
808 			.size = SZ_2M,
809 			.gtt = I915_GTT_PAGE_SIZE_64K,
810 			.offset = 0,
811 		},
812 		{
813 			.size = SZ_2M - SZ_4K,
814 			.gtt = I915_GTT_PAGE_SIZE_4K,
815 			.offset = 0,
816 		},
817 		{
818 			.size = SZ_2M + SZ_4K,
819 			.gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
820 			.offset = 0,
821 		},
822 		{
823 			.size = SZ_2M + SZ_64K,
824 			.gtt = I915_GTT_PAGE_SIZE_64K,
825 			.offset = 0,
826 		},
827 		{
828 			.size = SZ_2M - SZ_64K,
829 			.gtt = I915_GTT_PAGE_SIZE_64K,
830 			.offset = 0,
831 		},
832 		/* Try without any forced padding/alignment */
833 		{
834 			.size = SZ_64K,
835 			.offset = SZ_2M,
836 			.gtt = I915_GTT_PAGE_SIZE_4K,
837 		},
838 		{
839 			.size = SZ_128K,
840 			.offset = SZ_2M - SZ_64K,
841 			.gtt = I915_GTT_PAGE_SIZE_4K,
842 		},
843 	};
844 	struct i915_vma *vma;
845 	int i, single;
846 	int err;
847 
848 	/*
849 	 * Sanity check some of the trickiness with 64K pages -- either we can
850 	 * safely mark the whole page-table(2M block) as 64K, or we have to
851 	 * always fallback to 4K.
852 	 */
853 
854 	if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
855 		return 0;
856 
857 	for (i = 0; i < ARRAY_SIZE(objects); ++i) {
858 		unsigned int size = objects[i].size;
859 		unsigned int expected_gtt = objects[i].gtt;
860 		unsigned int offset = objects[i].offset;
861 		unsigned int flags = PIN_USER;
862 
863 		for (single = 0; single <= 1; single++) {
864 			obj = fake_huge_pages_object(i915, size, !!single);
865 			if (IS_ERR(obj))
866 				return PTR_ERR(obj);
867 
868 			err = i915_gem_object_pin_pages(obj);
869 			if (err)
870 				goto out_object_put;
871 
872 			/*
873 			 * Disable 2M pages -- We only want to use 64K/4K pages
874 			 * for this test.
875 			 */
876 			obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
877 
878 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
879 			if (IS_ERR(vma)) {
880 				err = PTR_ERR(vma);
881 				goto out_object_unpin;
882 			}
883 
884 			if (offset)
885 				flags |= PIN_OFFSET_FIXED | offset;
886 
887 			err = i915_vma_pin(vma, 0, 0, flags);
888 			if (err)
889 				goto out_object_unpin;
890 
891 			err = igt_check_page_sizes(vma);
892 			if (err)
893 				goto out_vma_unpin;
894 
895 			if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
896 				if (!IS_ALIGNED(vma->node.start,
897 						I915_GTT_PAGE_SIZE_2M)) {
898 					pr_err("node.start(%llx) not aligned to 2M\n",
899 					       vma->node.start);
900 					err = -EINVAL;
901 					goto out_vma_unpin;
902 				}
903 
904 				if (!IS_ALIGNED(vma->node.size,
905 						I915_GTT_PAGE_SIZE_2M)) {
906 					pr_err("node.size(%llx) not aligned to 2M\n",
907 					       vma->node.size);
908 					err = -EINVAL;
909 					goto out_vma_unpin;
910 				}
911 			}
912 
913 			if (vma->page_sizes.gtt != expected_gtt) {
914 				pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
915 				       vma->page_sizes.gtt, expected_gtt, i,
916 				       yesno(!!single));
917 				err = -EINVAL;
918 				goto out_vma_unpin;
919 			}
920 
921 			i915_vma_unpin(vma);
922 			i915_gem_object_unpin_pages(obj);
923 			__i915_gem_object_put_pages(obj);
924 			i915_gem_object_put(obj);
925 		}
926 	}
927 
928 	return 0;
929 
930 out_vma_unpin:
931 	i915_vma_unpin(vma);
932 out_object_unpin:
933 	i915_gem_object_unpin_pages(obj);
934 out_object_put:
935 	i915_gem_object_put(obj);
936 
937 	return err;
938 }
939 
940 static int gpu_write(struct intel_context *ce,
941 		     struct i915_vma *vma,
942 		     u32 dw,
943 		     u32 val)
944 {
945 	int err;
946 
947 	i915_gem_object_lock(vma->obj);
948 	err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
949 	i915_gem_object_unlock(vma->obj);
950 	if (err)
951 		return err;
952 
953 	return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
954 			       vma->size >> PAGE_SHIFT, val);
955 }
956 
957 static int
958 __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
959 {
960 	unsigned int needs_flush;
961 	unsigned long n;
962 	int err;
963 
964 	err = i915_gem_object_prepare_read(obj, &needs_flush);
965 	if (err)
966 		return err;
967 
968 	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
969 		u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
970 
971 		if (needs_flush & CLFLUSH_BEFORE)
972 			drm_clflush_virt_range(ptr, PAGE_SIZE);
973 
974 		if (ptr[dword] != val) {
975 			pr_err("n=%lu ptr[%u]=%u, val=%u\n",
976 			       n, dword, ptr[dword], val);
977 			kunmap_atomic(ptr);
978 			err = -EINVAL;
979 			break;
980 		}
981 
982 		kunmap_atomic(ptr);
983 	}
984 
985 	i915_gem_object_finish_access(obj);
986 
987 	return err;
988 }
989 
990 static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
991 {
992 	unsigned long n = obj->base.size >> PAGE_SHIFT;
993 	u32 *ptr;
994 	int err;
995 
996 	err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
997 	if (err)
998 		return err;
999 
1000 	ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
1001 	if (IS_ERR(ptr))
1002 		return PTR_ERR(ptr);
1003 
1004 	ptr += dword;
1005 	while (n--) {
1006 		if (*ptr != val) {
1007 			pr_err("base[%u]=%08x, val=%08x\n",
1008 			       dword, *ptr, val);
1009 			err = -EINVAL;
1010 			break;
1011 		}
1012 
1013 		ptr += PAGE_SIZE / sizeof(*ptr);
1014 	}
1015 
1016 	i915_gem_object_unpin_map(obj);
1017 	return err;
1018 }
1019 
1020 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1021 {
1022 	if (i915_gem_object_has_struct_page(obj))
1023 		return __cpu_check_shmem(obj, dword, val);
1024 	else
1025 		return __cpu_check_vmap(obj, dword, val);
1026 }
1027 
1028 static int __igt_write_huge(struct intel_context *ce,
1029 			    struct drm_i915_gem_object *obj,
1030 			    u64 size, u64 offset,
1031 			    u32 dword, u32 val)
1032 {
1033 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1034 	struct i915_vma *vma;
1035 	int err;
1036 
1037 	vma = i915_vma_instance(obj, ce->vm, NULL);
1038 	if (IS_ERR(vma))
1039 		return PTR_ERR(vma);
1040 
1041 	err = i915_vma_unbind(vma);
1042 	if (err)
1043 		return err;
1044 
1045 	err = i915_vma_pin(vma, size, 0, flags | offset);
1046 	if (err) {
1047 		/*
1048 		 * The ggtt may have some pages reserved so
1049 		 * refrain from erroring out.
1050 		 */
1051 		if (err == -ENOSPC && i915_is_ggtt(ce->vm))
1052 			err = 0;
1053 
1054 		return err;
1055 	}
1056 
1057 	err = igt_check_page_sizes(vma);
1058 	if (err)
1059 		goto out_vma_unpin;
1060 
1061 	err = gpu_write(ce, vma, dword, val);
1062 	if (err) {
1063 		pr_err("gpu-write failed at offset=%llx\n", offset);
1064 		goto out_vma_unpin;
1065 	}
1066 
1067 	err = cpu_check(obj, dword, val);
1068 	if (err) {
1069 		pr_err("cpu-check failed at offset=%llx\n", offset);
1070 		goto out_vma_unpin;
1071 	}
1072 
1073 out_vma_unpin:
1074 	i915_vma_unpin(vma);
1075 	return err;
1076 }
1077 
1078 static int igt_write_huge(struct i915_gem_context *ctx,
1079 			  struct drm_i915_gem_object *obj)
1080 {
1081 	struct i915_gem_engines *engines;
1082 	struct i915_gem_engines_iter it;
1083 	struct intel_context *ce;
1084 	I915_RND_STATE(prng);
1085 	IGT_TIMEOUT(end_time);
1086 	unsigned int max_page_size;
1087 	unsigned int count;
1088 	u64 max;
1089 	u64 num;
1090 	u64 size;
1091 	int *order;
1092 	int i, n;
1093 	int err = 0;
1094 
1095 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1096 
1097 	size = obj->base.size;
1098 	if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1099 		size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1100 
1101 	n = 0;
1102 	count = 0;
1103 	max = U64_MAX;
1104 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1105 		count++;
1106 		if (!intel_engine_can_store_dword(ce->engine))
1107 			continue;
1108 
1109 		max = min(max, ce->vm->total);
1110 		n++;
1111 	}
1112 	i915_gem_context_unlock_engines(ctx);
1113 	if (!n)
1114 		return 0;
1115 
1116 	/*
1117 	 * To keep things interesting when alternating between engines in our
1118 	 * randomized order, lets also make feeding to the same engine a few
1119 	 * times in succession a possibility by enlarging the permutation array.
1120 	 */
1121 	order = i915_random_order(count * count, &prng);
1122 	if (!order)
1123 		return -ENOMEM;
1124 
1125 	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1126 	max = div_u64(max - size, max_page_size);
1127 
1128 	/*
1129 	 * Try various offsets in an ascending/descending fashion until we
1130 	 * timeout -- we want to avoid issues hidden by effectively always using
1131 	 * offset = 0.
1132 	 */
1133 	i = 0;
1134 	engines = i915_gem_context_lock_engines(ctx);
1135 	for_each_prime_number_from(num, 0, max) {
1136 		u64 offset_low = num * max_page_size;
1137 		u64 offset_high = (max - num) * max_page_size;
1138 		u32 dword = offset_in_page(num) / 4;
1139 		struct intel_context *ce;
1140 
1141 		ce = engines->engines[order[i] % engines->num_engines];
1142 		i = (i + 1) % (count * count);
1143 		if (!ce || !intel_engine_can_store_dword(ce->engine))
1144 			continue;
1145 
1146 		/*
1147 		 * In order to utilize 64K pages we need to both pad the vma
1148 		 * size and ensure the vma offset is at the start of the pt
1149 		 * boundary, however to improve coverage we opt for testing both
1150 		 * aligned and unaligned offsets.
1151 		 */
1152 		if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1153 			offset_low = round_down(offset_low,
1154 						I915_GTT_PAGE_SIZE_2M);
1155 
1156 		err = __igt_write_huge(ce, obj, size, offset_low,
1157 				       dword, num + 1);
1158 		if (err)
1159 			break;
1160 
1161 		err = __igt_write_huge(ce, obj, size, offset_high,
1162 				       dword, num + 1);
1163 		if (err)
1164 			break;
1165 
1166 		if (igt_timeout(end_time,
1167 				"%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1168 				__func__, ce->engine->name, offset_low, offset_high,
1169 				max_page_size))
1170 			break;
1171 	}
1172 	i915_gem_context_unlock_engines(ctx);
1173 
1174 	kfree(order);
1175 
1176 	return err;
1177 }
1178 
1179 typedef struct drm_i915_gem_object *
1180 (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
1181 
1182 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1183 {
1184 	return i915->mm.gemfs && has_transparent_hugepage();
1185 }
1186 
1187 static struct drm_i915_gem_object *
1188 igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
1189 {
1190 	if (!igt_can_allocate_thp(i915)) {
1191 		pr_info("%s missing THP support, skipping\n", __func__);
1192 		return ERR_PTR(-ENODEV);
1193 	}
1194 
1195 	return i915_gem_object_create_shmem(i915, size);
1196 }
1197 
1198 static struct drm_i915_gem_object *
1199 igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
1200 {
1201 	return i915_gem_object_create_internal(i915, size);
1202 }
1203 
1204 static struct drm_i915_gem_object *
1205 igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
1206 {
1207 	return huge_pages_object(i915, size, size);
1208 }
1209 
1210 static struct drm_i915_gem_object *
1211 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
1212 {
1213 	return i915_gem_object_create_lmem(i915, size, flags);
1214 }
1215 
1216 static u32 igt_random_size(struct rnd_state *prng,
1217 			   u32 min_page_size,
1218 			   u32 max_page_size)
1219 {
1220 	u64 mask;
1221 	u32 size;
1222 
1223 	GEM_BUG_ON(!is_power_of_2(min_page_size));
1224 	GEM_BUG_ON(!is_power_of_2(max_page_size));
1225 	GEM_BUG_ON(min_page_size < PAGE_SIZE);
1226 	GEM_BUG_ON(min_page_size > max_page_size);
1227 
1228 	mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
1229 	size = prandom_u32_state(prng) & mask;
1230 	if (size < min_page_size)
1231 		size |= min_page_size;
1232 
1233 	return size;
1234 }
1235 
1236 static int igt_ppgtt_smoke_huge(void *arg)
1237 {
1238 	struct i915_gem_context *ctx = arg;
1239 	struct drm_i915_private *i915 = ctx->i915;
1240 	struct drm_i915_gem_object *obj;
1241 	I915_RND_STATE(prng);
1242 	struct {
1243 		igt_create_fn fn;
1244 		u32 min;
1245 		u32 max;
1246 	} backends[] = {
1247 		{ igt_create_internal, SZ_64K, SZ_2M,  },
1248 		{ igt_create_shmem,    SZ_64K, SZ_32M, },
1249 		{ igt_create_local,    SZ_64K, SZ_1G,  },
1250 	};
1251 	int err;
1252 	int i;
1253 
1254 	/*
1255 	 * Sanity check that the HW uses huge pages correctly through our
1256 	 * various backends -- ensure that our writes land in the right place.
1257 	 */
1258 
1259 	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1260 		u32 min = backends[i].min;
1261 		u32 max = backends[i].max;
1262 		u32 size = max;
1263 try_again:
1264 		size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
1265 
1266 		obj = backends[i].fn(i915, size, 0);
1267 		if (IS_ERR(obj)) {
1268 			err = PTR_ERR(obj);
1269 			if (err == -E2BIG) {
1270 				size >>= 1;
1271 				goto try_again;
1272 			} else if (err == -ENODEV) {
1273 				err = 0;
1274 				continue;
1275 			}
1276 
1277 			return err;
1278 		}
1279 
1280 		err = i915_gem_object_pin_pages(obj);
1281 		if (err) {
1282 			if (err == -ENXIO || err == -E2BIG) {
1283 				i915_gem_object_put(obj);
1284 				size >>= 1;
1285 				goto try_again;
1286 			}
1287 			goto out_put;
1288 		}
1289 
1290 		if (obj->mm.page_sizes.phys < min) {
1291 			pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
1292 				__func__, size, i);
1293 			err = -ENOMEM;
1294 			goto out_unpin;
1295 		}
1296 
1297 		err = igt_write_huge(ctx, obj);
1298 		if (err) {
1299 			pr_err("%s write-huge failed with size=%u, i=%d\n",
1300 			       __func__, size, i);
1301 		}
1302 out_unpin:
1303 		i915_gem_object_unpin_pages(obj);
1304 		__i915_gem_object_put_pages(obj);
1305 out_put:
1306 		i915_gem_object_put(obj);
1307 
1308 		if (err == -ENOMEM || err == -ENXIO)
1309 			err = 0;
1310 
1311 		if (err)
1312 			break;
1313 
1314 		cond_resched();
1315 	}
1316 
1317 	return err;
1318 }
1319 
1320 static int igt_ppgtt_sanity_check(void *arg)
1321 {
1322 	struct i915_gem_context *ctx = arg;
1323 	struct drm_i915_private *i915 = ctx->i915;
1324 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
1325 	struct {
1326 		igt_create_fn fn;
1327 		unsigned int flags;
1328 	} backends[] = {
1329 		{ igt_create_system, 0,                        },
1330 		{ igt_create_local,  I915_BO_ALLOC_CONTIGUOUS, },
1331 	};
1332 	struct {
1333 		u32 size;
1334 		u32 pages;
1335 	} combos[] = {
1336 		{ SZ_64K,		SZ_64K		},
1337 		{ SZ_2M,		SZ_2M		},
1338 		{ SZ_2M,		SZ_64K		},
1339 		{ SZ_2M - SZ_64K,	SZ_64K		},
1340 		{ SZ_2M - SZ_4K,	SZ_64K | SZ_4K	},
1341 		{ SZ_2M + SZ_4K,	SZ_64K | SZ_4K	},
1342 		{ SZ_2M + SZ_4K,	SZ_2M  | SZ_4K	},
1343 		{ SZ_2M + SZ_64K,	SZ_2M  | SZ_64K },
1344 	};
1345 	int i, j;
1346 	int err;
1347 
1348 	if (supported == I915_GTT_PAGE_SIZE_4K)
1349 		return 0;
1350 
1351 	/*
1352 	 * Sanity check that the HW behaves with a limited set of combinations.
1353 	 * We already have a bunch of randomised testing, which should give us
1354 	 * a decent amount of variation between runs, however we should keep
1355 	 * this to limit the chances of introducing a temporary regression, by
1356 	 * testing the most obvious cases that might make something blow up.
1357 	 */
1358 
1359 	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1360 		for (j = 0; j < ARRAY_SIZE(combos); ++j) {
1361 			struct drm_i915_gem_object *obj;
1362 			u32 size = combos[j].size;
1363 			u32 pages = combos[j].pages;
1364 
1365 			obj = backends[i].fn(i915, size, backends[i].flags);
1366 			if (IS_ERR(obj)) {
1367 				err = PTR_ERR(obj);
1368 				if (err == -ENODEV) {
1369 					pr_info("Device lacks local memory, skipping\n");
1370 					err = 0;
1371 					break;
1372 				}
1373 
1374 				return err;
1375 			}
1376 
1377 			err = i915_gem_object_pin_pages(obj);
1378 			if (err) {
1379 				i915_gem_object_put(obj);
1380 				goto out;
1381 			}
1382 
1383 			GEM_BUG_ON(pages > obj->base.size);
1384 			pages = pages & supported;
1385 
1386 			if (pages)
1387 				obj->mm.page_sizes.sg = pages;
1388 
1389 			err = igt_write_huge(ctx, obj);
1390 
1391 			i915_gem_object_unpin_pages(obj);
1392 			__i915_gem_object_put_pages(obj);
1393 			i915_gem_object_put(obj);
1394 
1395 			if (err) {
1396 				pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
1397 				       __func__, size, pages, i, j);
1398 				goto out;
1399 			}
1400 		}
1401 
1402 		cond_resched();
1403 	}
1404 
1405 out:
1406 	if (err == -ENOMEM)
1407 		err = 0;
1408 
1409 	return err;
1410 }
1411 
1412 static int igt_ppgtt_pin_update(void *arg)
1413 {
1414 	struct i915_gem_context *ctx = arg;
1415 	struct drm_i915_private *dev_priv = ctx->i915;
1416 	unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
1417 	struct drm_i915_gem_object *obj;
1418 	struct i915_gem_engines_iter it;
1419 	struct i915_address_space *vm;
1420 	struct intel_context *ce;
1421 	struct i915_vma *vma;
1422 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1423 	unsigned int n;
1424 	int first, last;
1425 	int err = 0;
1426 
1427 	/*
1428 	 * Make sure there's no funny business when doing a PIN_UPDATE -- in the
1429 	 * past we had a subtle issue with being able to incorrectly do multiple
1430 	 * alloc va ranges on the same object when doing a PIN_UPDATE, which
1431 	 * resulted in some pretty nasty bugs, though only when using
1432 	 * huge-gtt-pages.
1433 	 */
1434 
1435 	vm = i915_gem_context_get_vm_rcu(ctx);
1436 	if (!i915_vm_is_4lvl(vm)) {
1437 		pr_info("48b PPGTT not supported, skipping\n");
1438 		goto out_vm;
1439 	}
1440 
1441 	first = ilog2(I915_GTT_PAGE_SIZE_64K);
1442 	last = ilog2(I915_GTT_PAGE_SIZE_2M);
1443 
1444 	for_each_set_bit_from(first, &supported, last + 1) {
1445 		unsigned int page_size = BIT(first);
1446 
1447 		obj = i915_gem_object_create_internal(dev_priv, page_size);
1448 		if (IS_ERR(obj)) {
1449 			err = PTR_ERR(obj);
1450 			goto out_vm;
1451 		}
1452 
1453 		vma = i915_vma_instance(obj, vm, NULL);
1454 		if (IS_ERR(vma)) {
1455 			err = PTR_ERR(vma);
1456 			goto out_put;
1457 		}
1458 
1459 		err = i915_vma_pin(vma, SZ_2M, 0, flags);
1460 		if (err)
1461 			goto out_put;
1462 
1463 		if (vma->page_sizes.sg < page_size) {
1464 			pr_info("Unable to allocate page-size %x, finishing test early\n",
1465 				page_size);
1466 			goto out_unpin;
1467 		}
1468 
1469 		err = igt_check_page_sizes(vma);
1470 		if (err)
1471 			goto out_unpin;
1472 
1473 		if (vma->page_sizes.gtt != page_size) {
1474 			dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
1475 
1476 			/*
1477 			 * The only valid reason for this to ever fail would be
1478 			 * if the dma-mapper screwed us over when we did the
1479 			 * dma_map_sg(), since it has the final say over the dma
1480 			 * address.
1481 			 */
1482 			if (IS_ALIGNED(addr, page_size)) {
1483 				pr_err("page_sizes.gtt=%u, expected=%u\n",
1484 				       vma->page_sizes.gtt, page_size);
1485 				err = -EINVAL;
1486 			} else {
1487 				pr_info("dma address misaligned, finishing test early\n");
1488 			}
1489 
1490 			goto out_unpin;
1491 		}
1492 
1493 		err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL);
1494 		if (err)
1495 			goto out_unpin;
1496 
1497 		i915_vma_unpin(vma);
1498 		i915_gem_object_put(obj);
1499 	}
1500 
1501 	obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
1502 	if (IS_ERR(obj)) {
1503 		err = PTR_ERR(obj);
1504 		goto out_vm;
1505 	}
1506 
1507 	vma = i915_vma_instance(obj, vm, NULL);
1508 	if (IS_ERR(vma)) {
1509 		err = PTR_ERR(vma);
1510 		goto out_put;
1511 	}
1512 
1513 	err = i915_vma_pin(vma, 0, 0, flags);
1514 	if (err)
1515 		goto out_put;
1516 
1517 	/*
1518 	 * Make sure we don't end up with something like where the pde is still
1519 	 * pointing to the 2M page, and the pt we just filled-in is dangling --
1520 	 * we can check this by writing to the first page where it would then
1521 	 * land in the now stale 2M page.
1522 	 */
1523 
1524 	n = 0;
1525 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1526 		if (!intel_engine_can_store_dword(ce->engine))
1527 			continue;
1528 
1529 		err = gpu_write(ce, vma, n++, 0xdeadbeaf);
1530 		if (err)
1531 			break;
1532 	}
1533 	i915_gem_context_unlock_engines(ctx);
1534 	if (err)
1535 		goto out_unpin;
1536 
1537 	while (n--) {
1538 		err = cpu_check(obj, n, 0xdeadbeaf);
1539 		if (err)
1540 			goto out_unpin;
1541 	}
1542 
1543 out_unpin:
1544 	i915_vma_unpin(vma);
1545 out_put:
1546 	i915_gem_object_put(obj);
1547 out_vm:
1548 	i915_vm_put(vm);
1549 
1550 	return err;
1551 }
1552 
1553 static int igt_tmpfs_fallback(void *arg)
1554 {
1555 	struct i915_gem_context *ctx = arg;
1556 	struct drm_i915_private *i915 = ctx->i915;
1557 	struct vfsmount *gemfs = i915->mm.gemfs;
1558 	struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
1559 	struct drm_i915_gem_object *obj;
1560 	struct i915_vma *vma;
1561 	u32 *vaddr;
1562 	int err = 0;
1563 
1564 	/*
1565 	 * Make sure that we don't burst into a ball of flames upon falling back
1566 	 * to tmpfs, which we rely on if on the off-chance we encouter a failure
1567 	 * when setting up gemfs.
1568 	 */
1569 
1570 	i915->mm.gemfs = NULL;
1571 
1572 	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
1573 	if (IS_ERR(obj)) {
1574 		err = PTR_ERR(obj);
1575 		goto out_restore;
1576 	}
1577 
1578 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1579 	if (IS_ERR(vaddr)) {
1580 		err = PTR_ERR(vaddr);
1581 		goto out_put;
1582 	}
1583 	*vaddr = 0xdeadbeaf;
1584 
1585 	__i915_gem_object_flush_map(obj, 0, 64);
1586 	i915_gem_object_unpin_map(obj);
1587 
1588 	vma = i915_vma_instance(obj, vm, NULL);
1589 	if (IS_ERR(vma)) {
1590 		err = PTR_ERR(vma);
1591 		goto out_put;
1592 	}
1593 
1594 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
1595 	if (err)
1596 		goto out_put;
1597 
1598 	err = igt_check_page_sizes(vma);
1599 
1600 	i915_vma_unpin(vma);
1601 out_put:
1602 	i915_gem_object_put(obj);
1603 out_restore:
1604 	i915->mm.gemfs = gemfs;
1605 
1606 	i915_vm_put(vm);
1607 	return err;
1608 }
1609 
1610 static int igt_shrink_thp(void *arg)
1611 {
1612 	struct i915_gem_context *ctx = arg;
1613 	struct drm_i915_private *i915 = ctx->i915;
1614 	struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
1615 	struct drm_i915_gem_object *obj;
1616 	struct i915_gem_engines_iter it;
1617 	struct intel_context *ce;
1618 	struct i915_vma *vma;
1619 	unsigned int flags = PIN_USER;
1620 	unsigned int n;
1621 	int err = 0;
1622 
1623 	/*
1624 	 * Sanity check shrinking huge-paged object -- make sure nothing blows
1625 	 * up.
1626 	 */
1627 
1628 	if (!igt_can_allocate_thp(i915)) {
1629 		pr_info("missing THP support, skipping\n");
1630 		goto out_vm;
1631 	}
1632 
1633 	obj = i915_gem_object_create_shmem(i915, SZ_2M);
1634 	if (IS_ERR(obj)) {
1635 		err = PTR_ERR(obj);
1636 		goto out_vm;
1637 	}
1638 
1639 	vma = i915_vma_instance(obj, vm, NULL);
1640 	if (IS_ERR(vma)) {
1641 		err = PTR_ERR(vma);
1642 		goto out_put;
1643 	}
1644 
1645 	err = i915_vma_pin(vma, 0, 0, flags);
1646 	if (err)
1647 		goto out_put;
1648 
1649 	if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1650 		pr_info("failed to allocate THP, finishing test early\n");
1651 		goto out_unpin;
1652 	}
1653 
1654 	err = igt_check_page_sizes(vma);
1655 	if (err)
1656 		goto out_unpin;
1657 
1658 	n = 0;
1659 
1660 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1661 		if (!intel_engine_can_store_dword(ce->engine))
1662 			continue;
1663 
1664 		err = gpu_write(ce, vma, n++, 0xdeadbeaf);
1665 		if (err)
1666 			break;
1667 	}
1668 	i915_gem_context_unlock_engines(ctx);
1669 	i915_vma_unpin(vma);
1670 	if (err)
1671 		goto out_put;
1672 
1673 	/*
1674 	 * Now that the pages are *unpinned* shrink-all should invoke
1675 	 * shmem to truncate our pages.
1676 	 */
1677 	i915_gem_shrink_all(i915);
1678 	if (i915_gem_object_has_pages(obj)) {
1679 		pr_err("shrink-all didn't truncate the pages\n");
1680 		err = -EINVAL;
1681 		goto out_put;
1682 	}
1683 
1684 	if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
1685 		pr_err("residual page-size bits left\n");
1686 		err = -EINVAL;
1687 		goto out_put;
1688 	}
1689 
1690 	err = i915_vma_pin(vma, 0, 0, flags);
1691 	if (err)
1692 		goto out_put;
1693 
1694 	while (n--) {
1695 		err = cpu_check(obj, n, 0xdeadbeaf);
1696 		if (err)
1697 			break;
1698 	}
1699 
1700 out_unpin:
1701 	i915_vma_unpin(vma);
1702 out_put:
1703 	i915_gem_object_put(obj);
1704 out_vm:
1705 	i915_vm_put(vm);
1706 
1707 	return err;
1708 }
1709 
1710 int i915_gem_huge_page_mock_selftests(void)
1711 {
1712 	static const struct i915_subtest tests[] = {
1713 		SUBTEST(igt_mock_exhaust_device_supported_pages),
1714 		SUBTEST(igt_mock_memory_region_huge_pages),
1715 		SUBTEST(igt_mock_ppgtt_misaligned_dma),
1716 		SUBTEST(igt_mock_ppgtt_huge_fill),
1717 		SUBTEST(igt_mock_ppgtt_64K),
1718 	};
1719 	struct drm_i915_private *dev_priv;
1720 	struct i915_ppgtt *ppgtt;
1721 	int err;
1722 
1723 	dev_priv = mock_gem_device();
1724 	if (!dev_priv)
1725 		return -ENOMEM;
1726 
1727 	/* Pretend to be a device which supports the 48b PPGTT */
1728 	mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
1729 	mkwrite_device_info(dev_priv)->ppgtt_size = 48;
1730 
1731 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
1732 	if (IS_ERR(ppgtt)) {
1733 		err = PTR_ERR(ppgtt);
1734 		goto out_unlock;
1735 	}
1736 
1737 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1738 		pr_err("failed to create 48b PPGTT\n");
1739 		err = -EINVAL;
1740 		goto out_put;
1741 	}
1742 
1743 	/* If we were ever hit this then it's time to mock the 64K scratch */
1744 	if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
1745 		pr_err("PPGTT missing 64K scratch page\n");
1746 		err = -EINVAL;
1747 		goto out_put;
1748 	}
1749 
1750 	err = i915_subtests(tests, ppgtt);
1751 
1752 out_put:
1753 	i915_vm_put(&ppgtt->vm);
1754 out_unlock:
1755 	drm_dev_put(&dev_priv->drm);
1756 	return err;
1757 }
1758 
1759 int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
1760 {
1761 	static const struct i915_subtest tests[] = {
1762 		SUBTEST(igt_shrink_thp),
1763 		SUBTEST(igt_ppgtt_pin_update),
1764 		SUBTEST(igt_tmpfs_fallback),
1765 		SUBTEST(igt_ppgtt_smoke_huge),
1766 		SUBTEST(igt_ppgtt_sanity_check),
1767 	};
1768 	struct i915_gem_context *ctx;
1769 	struct i915_address_space *vm;
1770 	struct file *file;
1771 	int err;
1772 
1773 	if (!HAS_PPGTT(i915)) {
1774 		pr_info("PPGTT not supported, skipping live-selftests\n");
1775 		return 0;
1776 	}
1777 
1778 	if (intel_gt_is_wedged(&i915->gt))
1779 		return 0;
1780 
1781 	file = mock_file(i915);
1782 	if (IS_ERR(file))
1783 		return PTR_ERR(file);
1784 
1785 	ctx = live_context(i915, file);
1786 	if (IS_ERR(ctx)) {
1787 		err = PTR_ERR(ctx);
1788 		goto out_file;
1789 	}
1790 
1791 	mutex_lock(&ctx->mutex);
1792 	vm = i915_gem_context_vm(ctx);
1793 	if (vm)
1794 		WRITE_ONCE(vm->scrub_64K, true);
1795 	mutex_unlock(&ctx->mutex);
1796 
1797 	err = i915_subtests(tests, ctx);
1798 
1799 out_file:
1800 	fput(file);
1801 	return err;
1802 }
1803