1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "i915_selftest.h"
10 
11 #include "gem/i915_gem_pm.h"
12 
13 #include "gt/intel_gt.h"
14 
15 #include "igt_gem_utils.h"
16 #include "mock_context.h"
17 
18 #include "selftests/mock_drm.h"
19 #include "selftests/mock_gem_device.h"
20 #include "selftests/i915_random.h"
21 
22 static const unsigned int page_sizes[] = {
23 	I915_GTT_PAGE_SIZE_2M,
24 	I915_GTT_PAGE_SIZE_64K,
25 	I915_GTT_PAGE_SIZE_4K,
26 };
27 
28 static unsigned int get_largest_page_size(struct drm_i915_private *i915,
29 					  u64 rem)
30 {
31 	int i;
32 
33 	for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
34 		unsigned int page_size = page_sizes[i];
35 
36 		if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
37 			return page_size;
38 	}
39 
40 	return 0;
41 }
42 
43 static void huge_pages_free_pages(struct sg_table *st)
44 {
45 	struct scatterlist *sg;
46 
47 	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
48 		if (sg_page(sg))
49 			__free_pages(sg_page(sg), get_order(sg->length));
50 	}
51 
52 	sg_free_table(st);
53 	kfree(st);
54 }
55 
56 static int get_huge_pages(struct drm_i915_gem_object *obj)
57 {
58 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
59 	unsigned int page_mask = obj->mm.page_mask;
60 	struct sg_table *st;
61 	struct scatterlist *sg;
62 	unsigned int sg_page_sizes;
63 	u64 rem;
64 
65 	st = kmalloc(sizeof(*st), GFP);
66 	if (!st)
67 		return -ENOMEM;
68 
69 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
70 		kfree(st);
71 		return -ENOMEM;
72 	}
73 
74 	rem = obj->base.size;
75 	sg = st->sgl;
76 	st->nents = 0;
77 	sg_page_sizes = 0;
78 
79 	/*
80 	 * Our goal here is simple, we want to greedily fill the object from
81 	 * largest to smallest page-size, while ensuring that we use *every*
82 	 * page-size as per the given page-mask.
83 	 */
84 	do {
85 		unsigned int bit = ilog2(page_mask);
86 		unsigned int page_size = BIT(bit);
87 		int order = get_order(page_size);
88 
89 		do {
90 			struct page *page;
91 
92 			GEM_BUG_ON(order >= MAX_ORDER);
93 			page = alloc_pages(GFP | __GFP_ZERO, order);
94 			if (!page)
95 				goto err;
96 
97 			sg_set_page(sg, page, page_size, 0);
98 			sg_page_sizes |= page_size;
99 			st->nents++;
100 
101 			rem -= page_size;
102 			if (!rem) {
103 				sg_mark_end(sg);
104 				break;
105 			}
106 
107 			sg = __sg_next(sg);
108 		} while ((rem - ((page_size-1) & page_mask)) >= page_size);
109 
110 		page_mask &= (page_size-1);
111 	} while (page_mask);
112 
113 	if (i915_gem_gtt_prepare_pages(obj, st))
114 		goto err;
115 
116 	obj->mm.madv = I915_MADV_DONTNEED;
117 
118 	GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
119 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
120 
121 	return 0;
122 
123 err:
124 	sg_set_page(sg, NULL, 0, 0);
125 	sg_mark_end(sg);
126 	huge_pages_free_pages(st);
127 
128 	return -ENOMEM;
129 }
130 
131 static void put_huge_pages(struct drm_i915_gem_object *obj,
132 			   struct sg_table *pages)
133 {
134 	i915_gem_gtt_finish_pages(obj, pages);
135 	huge_pages_free_pages(pages);
136 
137 	obj->mm.dirty = false;
138 	obj->mm.madv = I915_MADV_WILLNEED;
139 }
140 
141 static const struct drm_i915_gem_object_ops huge_page_ops = {
142 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
143 		 I915_GEM_OBJECT_IS_SHRINKABLE,
144 	.get_pages = get_huge_pages,
145 	.put_pages = put_huge_pages,
146 };
147 
148 static struct drm_i915_gem_object *
149 huge_pages_object(struct drm_i915_private *i915,
150 		  u64 size,
151 		  unsigned int page_mask)
152 {
153 	struct drm_i915_gem_object *obj;
154 
155 	GEM_BUG_ON(!size);
156 	GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
157 
158 	if (size >> PAGE_SHIFT > INT_MAX)
159 		return ERR_PTR(-E2BIG);
160 
161 	if (overflows_type(size, obj->base.size))
162 		return ERR_PTR(-E2BIG);
163 
164 	obj = i915_gem_object_alloc();
165 	if (!obj)
166 		return ERR_PTR(-ENOMEM);
167 
168 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
169 	i915_gem_object_init(obj, &huge_page_ops);
170 
171 	obj->write_domain = I915_GEM_DOMAIN_CPU;
172 	obj->read_domains = I915_GEM_DOMAIN_CPU;
173 	obj->cache_level = I915_CACHE_NONE;
174 
175 	obj->mm.page_mask = page_mask;
176 
177 	return obj;
178 }
179 
180 static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
181 {
182 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
183 	const u64 max_len = rounddown_pow_of_two(UINT_MAX);
184 	struct sg_table *st;
185 	struct scatterlist *sg;
186 	unsigned int sg_page_sizes;
187 	u64 rem;
188 
189 	st = kmalloc(sizeof(*st), GFP);
190 	if (!st)
191 		return -ENOMEM;
192 
193 	if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
194 		kfree(st);
195 		return -ENOMEM;
196 	}
197 
198 	/* Use optimal page sized chunks to fill in the sg table */
199 	rem = obj->base.size;
200 	sg = st->sgl;
201 	st->nents = 0;
202 	sg_page_sizes = 0;
203 	do {
204 		unsigned int page_size = get_largest_page_size(i915, rem);
205 		unsigned int len = min(page_size * div_u64(rem, page_size),
206 				       max_len);
207 
208 		GEM_BUG_ON(!page_size);
209 
210 		sg->offset = 0;
211 		sg->length = len;
212 		sg_dma_len(sg) = len;
213 		sg_dma_address(sg) = page_size;
214 
215 		sg_page_sizes |= len;
216 
217 		st->nents++;
218 
219 		rem -= len;
220 		if (!rem) {
221 			sg_mark_end(sg);
222 			break;
223 		}
224 
225 		sg = sg_next(sg);
226 	} while (1);
227 
228 	i915_sg_trim(st);
229 
230 	obj->mm.madv = I915_MADV_DONTNEED;
231 
232 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
233 
234 	return 0;
235 }
236 
237 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
238 {
239 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
240 	struct sg_table *st;
241 	struct scatterlist *sg;
242 	unsigned int page_size;
243 
244 	st = kmalloc(sizeof(*st), GFP);
245 	if (!st)
246 		return -ENOMEM;
247 
248 	if (sg_alloc_table(st, 1, GFP)) {
249 		kfree(st);
250 		return -ENOMEM;
251 	}
252 
253 	sg = st->sgl;
254 	st->nents = 1;
255 
256 	page_size = get_largest_page_size(i915, obj->base.size);
257 	GEM_BUG_ON(!page_size);
258 
259 	sg->offset = 0;
260 	sg->length = obj->base.size;
261 	sg_dma_len(sg) = obj->base.size;
262 	sg_dma_address(sg) = page_size;
263 
264 	obj->mm.madv = I915_MADV_DONTNEED;
265 
266 	__i915_gem_object_set_pages(obj, st, sg->length);
267 
268 	return 0;
269 #undef GFP
270 }
271 
272 static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
273 				 struct sg_table *pages)
274 {
275 	sg_free_table(pages);
276 	kfree(pages);
277 }
278 
279 static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
280 				struct sg_table *pages)
281 {
282 	fake_free_huge_pages(obj, pages);
283 	obj->mm.dirty = false;
284 	obj->mm.madv = I915_MADV_WILLNEED;
285 }
286 
287 static const struct drm_i915_gem_object_ops fake_ops = {
288 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
289 	.get_pages = fake_get_huge_pages,
290 	.put_pages = fake_put_huge_pages,
291 };
292 
293 static const struct drm_i915_gem_object_ops fake_ops_single = {
294 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
295 	.get_pages = fake_get_huge_pages_single,
296 	.put_pages = fake_put_huge_pages,
297 };
298 
299 static struct drm_i915_gem_object *
300 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
301 {
302 	struct drm_i915_gem_object *obj;
303 
304 	GEM_BUG_ON(!size);
305 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
306 
307 	if (size >> PAGE_SHIFT > UINT_MAX)
308 		return ERR_PTR(-E2BIG);
309 
310 	if (overflows_type(size, obj->base.size))
311 		return ERR_PTR(-E2BIG);
312 
313 	obj = i915_gem_object_alloc();
314 	if (!obj)
315 		return ERR_PTR(-ENOMEM);
316 
317 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
318 
319 	if (single)
320 		i915_gem_object_init(obj, &fake_ops_single);
321 	else
322 		i915_gem_object_init(obj, &fake_ops);
323 
324 	obj->write_domain = I915_GEM_DOMAIN_CPU;
325 	obj->read_domains = I915_GEM_DOMAIN_CPU;
326 	obj->cache_level = I915_CACHE_NONE;
327 
328 	return obj;
329 }
330 
331 static int igt_check_page_sizes(struct i915_vma *vma)
332 {
333 	struct drm_i915_private *i915 = vma->vm->i915;
334 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
335 	struct drm_i915_gem_object *obj = vma->obj;
336 	int err = 0;
337 
338 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
339 		pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
340 		       vma->page_sizes.sg & ~supported, supported);
341 		err = -EINVAL;
342 	}
343 
344 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
345 		pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
346 		       vma->page_sizes.gtt & ~supported, supported);
347 		err = -EINVAL;
348 	}
349 
350 	if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
351 		pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
352 		       vma->page_sizes.phys, obj->mm.page_sizes.phys);
353 		err = -EINVAL;
354 	}
355 
356 	if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
357 		pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
358 		       vma->page_sizes.sg, obj->mm.page_sizes.sg);
359 		err = -EINVAL;
360 	}
361 
362 	if (obj->mm.page_sizes.gtt) {
363 		pr_err("obj->page_sizes.gtt(%u) should never be set\n",
364 		       obj->mm.page_sizes.gtt);
365 		err = -EINVAL;
366 	}
367 
368 	return err;
369 }
370 
371 static int igt_mock_exhaust_device_supported_pages(void *arg)
372 {
373 	struct i915_ppgtt *ppgtt = arg;
374 	struct drm_i915_private *i915 = ppgtt->vm.i915;
375 	unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
376 	struct drm_i915_gem_object *obj;
377 	struct i915_vma *vma;
378 	int i, j, single;
379 	int err;
380 
381 	/*
382 	 * Sanity check creating objects with every valid page support
383 	 * combination for our mock device.
384 	 */
385 
386 	for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
387 		unsigned int combination = 0;
388 
389 		for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
390 			if (i & BIT(j))
391 				combination |= page_sizes[j];
392 		}
393 
394 		mkwrite_device_info(i915)->page_sizes = combination;
395 
396 		for (single = 0; single <= 1; ++single) {
397 			obj = fake_huge_pages_object(i915, combination, !!single);
398 			if (IS_ERR(obj)) {
399 				err = PTR_ERR(obj);
400 				goto out_device;
401 			}
402 
403 			if (obj->base.size != combination) {
404 				pr_err("obj->base.size=%zu, expected=%u\n",
405 				       obj->base.size, combination);
406 				err = -EINVAL;
407 				goto out_put;
408 			}
409 
410 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
411 			if (IS_ERR(vma)) {
412 				err = PTR_ERR(vma);
413 				goto out_put;
414 			}
415 
416 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
417 			if (err)
418 				goto out_close;
419 
420 			err = igt_check_page_sizes(vma);
421 
422 			if (vma->page_sizes.sg != combination) {
423 				pr_err("page_sizes.sg=%u, expected=%u\n",
424 				       vma->page_sizes.sg, combination);
425 				err = -EINVAL;
426 			}
427 
428 			i915_vma_unpin(vma);
429 			i915_vma_close(vma);
430 
431 			i915_gem_object_put(obj);
432 
433 			if (err)
434 				goto out_device;
435 		}
436 	}
437 
438 	goto out_device;
439 
440 out_close:
441 	i915_vma_close(vma);
442 out_put:
443 	i915_gem_object_put(obj);
444 out_device:
445 	mkwrite_device_info(i915)->page_sizes = saved_mask;
446 
447 	return err;
448 }
449 
450 static int igt_mock_ppgtt_misaligned_dma(void *arg)
451 {
452 	struct i915_ppgtt *ppgtt = arg;
453 	struct drm_i915_private *i915 = ppgtt->vm.i915;
454 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
455 	struct drm_i915_gem_object *obj;
456 	int bit;
457 	int err;
458 
459 	/*
460 	 * Sanity check dma misalignment for huge pages -- the dma addresses we
461 	 * insert into the paging structures need to always respect the page
462 	 * size alignment.
463 	 */
464 
465 	bit = ilog2(I915_GTT_PAGE_SIZE_64K);
466 
467 	for_each_set_bit_from(bit, &supported,
468 			      ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
469 		IGT_TIMEOUT(end_time);
470 		unsigned int page_size = BIT(bit);
471 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
472 		unsigned int offset;
473 		unsigned int size =
474 			round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
475 		struct i915_vma *vma;
476 
477 		obj = fake_huge_pages_object(i915, size, true);
478 		if (IS_ERR(obj))
479 			return PTR_ERR(obj);
480 
481 		if (obj->base.size != size) {
482 			pr_err("obj->base.size=%zu, expected=%u\n",
483 			       obj->base.size, size);
484 			err = -EINVAL;
485 			goto out_put;
486 		}
487 
488 		err = i915_gem_object_pin_pages(obj);
489 		if (err)
490 			goto out_put;
491 
492 		/* Force the page size for this object */
493 		obj->mm.page_sizes.sg = page_size;
494 
495 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
496 		if (IS_ERR(vma)) {
497 			err = PTR_ERR(vma);
498 			goto out_unpin;
499 		}
500 
501 		err = i915_vma_pin(vma, 0, 0, flags);
502 		if (err) {
503 			i915_vma_close(vma);
504 			goto out_unpin;
505 		}
506 
507 
508 		err = igt_check_page_sizes(vma);
509 
510 		if (vma->page_sizes.gtt != page_size) {
511 			pr_err("page_sizes.gtt=%u, expected %u\n",
512 			       vma->page_sizes.gtt, page_size);
513 			err = -EINVAL;
514 		}
515 
516 		i915_vma_unpin(vma);
517 
518 		if (err) {
519 			i915_vma_close(vma);
520 			goto out_unpin;
521 		}
522 
523 		/*
524 		 * Try all the other valid offsets until the next
525 		 * boundary -- should always fall back to using 4K
526 		 * pages.
527 		 */
528 		for (offset = 4096; offset < page_size; offset += 4096) {
529 			err = i915_vma_unbind(vma);
530 			if (err) {
531 				i915_vma_close(vma);
532 				goto out_unpin;
533 			}
534 
535 			err = i915_vma_pin(vma, 0, 0, flags | offset);
536 			if (err) {
537 				i915_vma_close(vma);
538 				goto out_unpin;
539 			}
540 
541 			err = igt_check_page_sizes(vma);
542 
543 			if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
544 				pr_err("page_sizes.gtt=%u, expected %llu\n",
545 				       vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
546 				err = -EINVAL;
547 			}
548 
549 			i915_vma_unpin(vma);
550 
551 			if (err) {
552 				i915_vma_close(vma);
553 				goto out_unpin;
554 			}
555 
556 			if (igt_timeout(end_time,
557 					"%s timed out at offset %x with page-size %x\n",
558 					__func__, offset, page_size))
559 				break;
560 		}
561 
562 		i915_vma_close(vma);
563 
564 		i915_gem_object_unpin_pages(obj);
565 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
566 		i915_gem_object_put(obj);
567 	}
568 
569 	return 0;
570 
571 out_unpin:
572 	i915_gem_object_unpin_pages(obj);
573 out_put:
574 	i915_gem_object_put(obj);
575 
576 	return err;
577 }
578 
579 static void close_object_list(struct list_head *objects,
580 			      struct i915_ppgtt *ppgtt)
581 {
582 	struct drm_i915_gem_object *obj, *on;
583 
584 	list_for_each_entry_safe(obj, on, objects, st_link) {
585 		struct i915_vma *vma;
586 
587 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
588 		if (!IS_ERR(vma))
589 			i915_vma_close(vma);
590 
591 		list_del(&obj->st_link);
592 		i915_gem_object_unpin_pages(obj);
593 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
594 		i915_gem_object_put(obj);
595 	}
596 }
597 
598 static int igt_mock_ppgtt_huge_fill(void *arg)
599 {
600 	struct i915_ppgtt *ppgtt = arg;
601 	struct drm_i915_private *i915 = ppgtt->vm.i915;
602 	unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
603 	unsigned long page_num;
604 	bool single = false;
605 	LIST_HEAD(objects);
606 	IGT_TIMEOUT(end_time);
607 	int err = -ENODEV;
608 
609 	for_each_prime_number_from(page_num, 1, max_pages) {
610 		struct drm_i915_gem_object *obj;
611 		u64 size = page_num << PAGE_SHIFT;
612 		struct i915_vma *vma;
613 		unsigned int expected_gtt = 0;
614 		int i;
615 
616 		obj = fake_huge_pages_object(i915, size, single);
617 		if (IS_ERR(obj)) {
618 			err = PTR_ERR(obj);
619 			break;
620 		}
621 
622 		if (obj->base.size != size) {
623 			pr_err("obj->base.size=%zd, expected=%llu\n",
624 			       obj->base.size, size);
625 			i915_gem_object_put(obj);
626 			err = -EINVAL;
627 			break;
628 		}
629 
630 		err = i915_gem_object_pin_pages(obj);
631 		if (err) {
632 			i915_gem_object_put(obj);
633 			break;
634 		}
635 
636 		list_add(&obj->st_link, &objects);
637 
638 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
639 		if (IS_ERR(vma)) {
640 			err = PTR_ERR(vma);
641 			break;
642 		}
643 
644 		err = i915_vma_pin(vma, 0, 0, PIN_USER);
645 		if (err)
646 			break;
647 
648 		err = igt_check_page_sizes(vma);
649 		if (err) {
650 			i915_vma_unpin(vma);
651 			break;
652 		}
653 
654 		/*
655 		 * Figure out the expected gtt page size knowing that we go from
656 		 * largest to smallest page size sg chunks, and that we align to
657 		 * the largest page size.
658 		 */
659 		for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
660 			unsigned int page_size = page_sizes[i];
661 
662 			if (HAS_PAGE_SIZES(i915, page_size) &&
663 			    size >= page_size) {
664 				expected_gtt |= page_size;
665 				size &= page_size-1;
666 			}
667 		}
668 
669 		GEM_BUG_ON(!expected_gtt);
670 		GEM_BUG_ON(size);
671 
672 		if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
673 			expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
674 
675 		i915_vma_unpin(vma);
676 
677 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
678 			if (!IS_ALIGNED(vma->node.start,
679 					I915_GTT_PAGE_SIZE_2M)) {
680 				pr_err("node.start(%llx) not aligned to 2M\n",
681 				       vma->node.start);
682 				err = -EINVAL;
683 				break;
684 			}
685 
686 			if (!IS_ALIGNED(vma->node.size,
687 					I915_GTT_PAGE_SIZE_2M)) {
688 				pr_err("node.size(%llx) not aligned to 2M\n",
689 				       vma->node.size);
690 				err = -EINVAL;
691 				break;
692 			}
693 		}
694 
695 		if (vma->page_sizes.gtt != expected_gtt) {
696 			pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
697 			       vma->page_sizes.gtt, expected_gtt,
698 			       obj->base.size, yesno(!!single));
699 			err = -EINVAL;
700 			break;
701 		}
702 
703 		if (igt_timeout(end_time,
704 				"%s timed out at size %zd\n",
705 				__func__, obj->base.size))
706 			break;
707 
708 		single = !single;
709 	}
710 
711 	close_object_list(&objects, ppgtt);
712 
713 	if (err == -ENOMEM || err == -ENOSPC)
714 		err = 0;
715 
716 	return err;
717 }
718 
719 static int igt_mock_ppgtt_64K(void *arg)
720 {
721 	struct i915_ppgtt *ppgtt = arg;
722 	struct drm_i915_private *i915 = ppgtt->vm.i915;
723 	struct drm_i915_gem_object *obj;
724 	const struct object_info {
725 		unsigned int size;
726 		unsigned int gtt;
727 		unsigned int offset;
728 	} objects[] = {
729 		/* Cases with forced padding/alignment */
730 		{
731 			.size = SZ_64K,
732 			.gtt = I915_GTT_PAGE_SIZE_64K,
733 			.offset = 0,
734 		},
735 		{
736 			.size = SZ_64K + SZ_4K,
737 			.gtt = I915_GTT_PAGE_SIZE_4K,
738 			.offset = 0,
739 		},
740 		{
741 			.size = SZ_64K - SZ_4K,
742 			.gtt = I915_GTT_PAGE_SIZE_4K,
743 			.offset = 0,
744 		},
745 		{
746 			.size = SZ_2M,
747 			.gtt = I915_GTT_PAGE_SIZE_64K,
748 			.offset = 0,
749 		},
750 		{
751 			.size = SZ_2M - SZ_4K,
752 			.gtt = I915_GTT_PAGE_SIZE_4K,
753 			.offset = 0,
754 		},
755 		{
756 			.size = SZ_2M + SZ_4K,
757 			.gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
758 			.offset = 0,
759 		},
760 		{
761 			.size = SZ_2M + SZ_64K,
762 			.gtt = I915_GTT_PAGE_SIZE_64K,
763 			.offset = 0,
764 		},
765 		{
766 			.size = SZ_2M - SZ_64K,
767 			.gtt = I915_GTT_PAGE_SIZE_64K,
768 			.offset = 0,
769 		},
770 		/* Try without any forced padding/alignment */
771 		{
772 			.size = SZ_64K,
773 			.offset = SZ_2M,
774 			.gtt = I915_GTT_PAGE_SIZE_4K,
775 		},
776 		{
777 			.size = SZ_128K,
778 			.offset = SZ_2M - SZ_64K,
779 			.gtt = I915_GTT_PAGE_SIZE_4K,
780 		},
781 	};
782 	struct i915_vma *vma;
783 	int i, single;
784 	int err;
785 
786 	/*
787 	 * Sanity check some of the trickiness with 64K pages -- either we can
788 	 * safely mark the whole page-table(2M block) as 64K, or we have to
789 	 * always fallback to 4K.
790 	 */
791 
792 	if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
793 		return 0;
794 
795 	for (i = 0; i < ARRAY_SIZE(objects); ++i) {
796 		unsigned int size = objects[i].size;
797 		unsigned int expected_gtt = objects[i].gtt;
798 		unsigned int offset = objects[i].offset;
799 		unsigned int flags = PIN_USER;
800 
801 		for (single = 0; single <= 1; single++) {
802 			obj = fake_huge_pages_object(i915, size, !!single);
803 			if (IS_ERR(obj))
804 				return PTR_ERR(obj);
805 
806 			err = i915_gem_object_pin_pages(obj);
807 			if (err)
808 				goto out_object_put;
809 
810 			/*
811 			 * Disable 2M pages -- We only want to use 64K/4K pages
812 			 * for this test.
813 			 */
814 			obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
815 
816 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
817 			if (IS_ERR(vma)) {
818 				err = PTR_ERR(vma);
819 				goto out_object_unpin;
820 			}
821 
822 			if (offset)
823 				flags |= PIN_OFFSET_FIXED | offset;
824 
825 			err = i915_vma_pin(vma, 0, 0, flags);
826 			if (err)
827 				goto out_vma_close;
828 
829 			err = igt_check_page_sizes(vma);
830 			if (err)
831 				goto out_vma_unpin;
832 
833 			if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
834 				if (!IS_ALIGNED(vma->node.start,
835 						I915_GTT_PAGE_SIZE_2M)) {
836 					pr_err("node.start(%llx) not aligned to 2M\n",
837 					       vma->node.start);
838 					err = -EINVAL;
839 					goto out_vma_unpin;
840 				}
841 
842 				if (!IS_ALIGNED(vma->node.size,
843 						I915_GTT_PAGE_SIZE_2M)) {
844 					pr_err("node.size(%llx) not aligned to 2M\n",
845 					       vma->node.size);
846 					err = -EINVAL;
847 					goto out_vma_unpin;
848 				}
849 			}
850 
851 			if (vma->page_sizes.gtt != expected_gtt) {
852 				pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
853 				       vma->page_sizes.gtt, expected_gtt, i,
854 				       yesno(!!single));
855 				err = -EINVAL;
856 				goto out_vma_unpin;
857 			}
858 
859 			i915_vma_unpin(vma);
860 			i915_vma_close(vma);
861 
862 			i915_gem_object_unpin_pages(obj);
863 			__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
864 			i915_gem_object_put(obj);
865 		}
866 	}
867 
868 	return 0;
869 
870 out_vma_unpin:
871 	i915_vma_unpin(vma);
872 out_vma_close:
873 	i915_vma_close(vma);
874 out_object_unpin:
875 	i915_gem_object_unpin_pages(obj);
876 out_object_put:
877 	i915_gem_object_put(obj);
878 
879 	return err;
880 }
881 
882 static int gpu_write(struct i915_vma *vma,
883 		     struct i915_gem_context *ctx,
884 		     struct intel_engine_cs *engine,
885 		     u32 dw,
886 		     u32 val)
887 {
888 	int err;
889 
890 	i915_gem_object_lock(vma->obj);
891 	err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
892 	i915_gem_object_unlock(vma->obj);
893 	if (err)
894 		return err;
895 
896 	return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32),
897 			       vma->size >> PAGE_SHIFT, val);
898 }
899 
900 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
901 {
902 	unsigned int needs_flush;
903 	unsigned long n;
904 	int err;
905 
906 	err = i915_gem_object_prepare_read(obj, &needs_flush);
907 	if (err)
908 		return err;
909 
910 	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
911 		u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
912 
913 		if (needs_flush & CLFLUSH_BEFORE)
914 			drm_clflush_virt_range(ptr, PAGE_SIZE);
915 
916 		if (ptr[dword] != val) {
917 			pr_err("n=%lu ptr[%u]=%u, val=%u\n",
918 			       n, dword, ptr[dword], val);
919 			kunmap_atomic(ptr);
920 			err = -EINVAL;
921 			break;
922 		}
923 
924 		kunmap_atomic(ptr);
925 	}
926 
927 	i915_gem_object_finish_access(obj);
928 
929 	return err;
930 }
931 
932 static int __igt_write_huge(struct i915_gem_context *ctx,
933 			    struct intel_engine_cs *engine,
934 			    struct drm_i915_gem_object *obj,
935 			    u64 size, u64 offset,
936 			    u32 dword, u32 val)
937 {
938 	struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
939 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
940 	struct i915_vma *vma;
941 	int err;
942 
943 	vma = i915_vma_instance(obj, vm, NULL);
944 	if (IS_ERR(vma))
945 		return PTR_ERR(vma);
946 
947 	err = i915_vma_unbind(vma);
948 	if (err)
949 		goto out_vma_close;
950 
951 	err = i915_vma_pin(vma, size, 0, flags | offset);
952 	if (err) {
953 		/*
954 		 * The ggtt may have some pages reserved so
955 		 * refrain from erroring out.
956 		 */
957 		if (err == -ENOSPC && i915_is_ggtt(vm))
958 			err = 0;
959 
960 		goto out_vma_close;
961 	}
962 
963 	err = igt_check_page_sizes(vma);
964 	if (err)
965 		goto out_vma_unpin;
966 
967 	err = gpu_write(vma, ctx, engine, dword, val);
968 	if (err) {
969 		pr_err("gpu-write failed at offset=%llx\n", offset);
970 		goto out_vma_unpin;
971 	}
972 
973 	err = cpu_check(obj, dword, val);
974 	if (err) {
975 		pr_err("cpu-check failed at offset=%llx\n", offset);
976 		goto out_vma_unpin;
977 	}
978 
979 out_vma_unpin:
980 	i915_vma_unpin(vma);
981 out_vma_close:
982 	i915_vma_destroy(vma);
983 
984 	return err;
985 }
986 
987 static int igt_write_huge(struct i915_gem_context *ctx,
988 			  struct drm_i915_gem_object *obj)
989 {
990 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
991 	struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
992 	static struct intel_engine_cs *engines[I915_NUM_ENGINES];
993 	struct intel_engine_cs *engine;
994 	I915_RND_STATE(prng);
995 	IGT_TIMEOUT(end_time);
996 	unsigned int max_page_size;
997 	unsigned int id;
998 	u64 max;
999 	u64 num;
1000 	u64 size;
1001 	int *order;
1002 	int i, n;
1003 	int err = 0;
1004 
1005 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1006 
1007 	size = obj->base.size;
1008 	if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1009 		size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1010 
1011 	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1012 	max = div_u64((vm->total - size), max_page_size);
1013 
1014 	n = 0;
1015 	for_each_engine(engine, i915, id) {
1016 		if (!intel_engine_can_store_dword(engine)) {
1017 			pr_info("store-dword-imm not supported on engine=%u\n",
1018 				id);
1019 			continue;
1020 		}
1021 		engines[n++] = engine;
1022 	}
1023 
1024 	if (!n)
1025 		return 0;
1026 
1027 	/*
1028 	 * To keep things interesting when alternating between engines in our
1029 	 * randomized order, lets also make feeding to the same engine a few
1030 	 * times in succession a possibility by enlarging the permutation array.
1031 	 */
1032 	order = i915_random_order(n * I915_NUM_ENGINES, &prng);
1033 	if (!order)
1034 		return -ENOMEM;
1035 
1036 	/*
1037 	 * Try various offsets in an ascending/descending fashion until we
1038 	 * timeout -- we want to avoid issues hidden by effectively always using
1039 	 * offset = 0.
1040 	 */
1041 	i = 0;
1042 	for_each_prime_number_from(num, 0, max) {
1043 		u64 offset_low = num * max_page_size;
1044 		u64 offset_high = (max - num) * max_page_size;
1045 		u32 dword = offset_in_page(num) / 4;
1046 
1047 		engine = engines[order[i] % n];
1048 		i = (i + 1) % (n * I915_NUM_ENGINES);
1049 
1050 		/*
1051 		 * In order to utilize 64K pages we need to both pad the vma
1052 		 * size and ensure the vma offset is at the start of the pt
1053 		 * boundary, however to improve coverage we opt for testing both
1054 		 * aligned and unaligned offsets.
1055 		 */
1056 		if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1057 			offset_low = round_down(offset_low,
1058 						I915_GTT_PAGE_SIZE_2M);
1059 
1060 		err = __igt_write_huge(ctx, engine, obj, size, offset_low,
1061 				       dword, num + 1);
1062 		if (err)
1063 			break;
1064 
1065 		err = __igt_write_huge(ctx, engine, obj, size, offset_high,
1066 				       dword, num + 1);
1067 		if (err)
1068 			break;
1069 
1070 		if (igt_timeout(end_time,
1071 				"%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1072 				__func__, engine->id, offset_low, offset_high,
1073 				max_page_size))
1074 			break;
1075 	}
1076 
1077 	kfree(order);
1078 
1079 	return err;
1080 }
1081 
1082 static int igt_ppgtt_exhaust_huge(void *arg)
1083 {
1084 	struct i915_gem_context *ctx = arg;
1085 	struct drm_i915_private *i915 = ctx->i915;
1086 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
1087 	static unsigned int pages[ARRAY_SIZE(page_sizes)];
1088 	struct drm_i915_gem_object *obj;
1089 	unsigned int size_mask;
1090 	unsigned int page_mask;
1091 	int n, i;
1092 	int err = -ENODEV;
1093 
1094 	if (supported == I915_GTT_PAGE_SIZE_4K)
1095 		return 0;
1096 
1097 	/*
1098 	 * Sanity check creating objects with a varying mix of page sizes --
1099 	 * ensuring that our writes lands in the right place.
1100 	 */
1101 
1102 	n = 0;
1103 	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
1104 		pages[n++] = BIT(i);
1105 
1106 	for (size_mask = 2; size_mask < BIT(n); size_mask++) {
1107 		unsigned int size = 0;
1108 
1109 		for (i = 0; i < n; i++) {
1110 			if (size_mask & BIT(i))
1111 				size |= pages[i];
1112 		}
1113 
1114 		/*
1115 		 * For our page mask we want to enumerate all the page-size
1116 		 * combinations which will fit into our chosen object size.
1117 		 */
1118 		for (page_mask = 2; page_mask <= size_mask; page_mask++) {
1119 			unsigned int page_sizes = 0;
1120 
1121 			for (i = 0; i < n; i++) {
1122 				if (page_mask & BIT(i))
1123 					page_sizes |= pages[i];
1124 			}
1125 
1126 			/*
1127 			 * Ensure that we can actually fill the given object
1128 			 * with our chosen page mask.
1129 			 */
1130 			if (!IS_ALIGNED(size, BIT(__ffs(page_sizes))))
1131 				continue;
1132 
1133 			obj = huge_pages_object(i915, size, page_sizes);
1134 			if (IS_ERR(obj)) {
1135 				err = PTR_ERR(obj);
1136 				goto out_device;
1137 			}
1138 
1139 			err = i915_gem_object_pin_pages(obj);
1140 			if (err) {
1141 				i915_gem_object_put(obj);
1142 
1143 				if (err == -ENOMEM) {
1144 					pr_info("unable to get pages, size=%u, pages=%u\n",
1145 						size, page_sizes);
1146 					err = 0;
1147 					break;
1148 				}
1149 
1150 				pr_err("pin_pages failed, size=%u, pages=%u\n",
1151 				       size_mask, page_mask);
1152 
1153 				goto out_device;
1154 			}
1155 
1156 			/* Force the page-size for the gtt insertion */
1157 			obj->mm.page_sizes.sg = page_sizes;
1158 
1159 			err = igt_write_huge(ctx, obj);
1160 			if (err) {
1161 				pr_err("exhaust write-huge failed with size=%u\n",
1162 				       size);
1163 				goto out_unpin;
1164 			}
1165 
1166 			i915_gem_object_unpin_pages(obj);
1167 			__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
1168 			i915_gem_object_put(obj);
1169 		}
1170 	}
1171 
1172 	goto out_device;
1173 
1174 out_unpin:
1175 	i915_gem_object_unpin_pages(obj);
1176 	i915_gem_object_put(obj);
1177 out_device:
1178 	mkwrite_device_info(i915)->page_sizes = supported;
1179 
1180 	return err;
1181 }
1182 
1183 static int igt_ppgtt_internal_huge(void *arg)
1184 {
1185 	struct i915_gem_context *ctx = arg;
1186 	struct drm_i915_private *i915 = ctx->i915;
1187 	struct drm_i915_gem_object *obj;
1188 	static const unsigned int sizes[] = {
1189 		SZ_64K,
1190 		SZ_128K,
1191 		SZ_256K,
1192 		SZ_512K,
1193 		SZ_1M,
1194 		SZ_2M,
1195 	};
1196 	int i;
1197 	int err;
1198 
1199 	/*
1200 	 * Sanity check that the HW uses huge pages correctly through internal
1201 	 * -- ensure that our writes land in the right place.
1202 	 */
1203 
1204 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1205 		unsigned int size = sizes[i];
1206 
1207 		obj = i915_gem_object_create_internal(i915, size);
1208 		if (IS_ERR(obj))
1209 			return PTR_ERR(obj);
1210 
1211 		err = i915_gem_object_pin_pages(obj);
1212 		if (err)
1213 			goto out_put;
1214 
1215 		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
1216 			pr_info("internal unable to allocate huge-page(s) with size=%u\n",
1217 				size);
1218 			goto out_unpin;
1219 		}
1220 
1221 		err = igt_write_huge(ctx, obj);
1222 		if (err) {
1223 			pr_err("internal write-huge failed with size=%u\n",
1224 			       size);
1225 			goto out_unpin;
1226 		}
1227 
1228 		i915_gem_object_unpin_pages(obj);
1229 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
1230 		i915_gem_object_put(obj);
1231 	}
1232 
1233 	return 0;
1234 
1235 out_unpin:
1236 	i915_gem_object_unpin_pages(obj);
1237 out_put:
1238 	i915_gem_object_put(obj);
1239 
1240 	return err;
1241 }
1242 
1243 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1244 {
1245 	return i915->mm.gemfs && has_transparent_hugepage();
1246 }
1247 
1248 static int igt_ppgtt_gemfs_huge(void *arg)
1249 {
1250 	struct i915_gem_context *ctx = arg;
1251 	struct drm_i915_private *i915 = ctx->i915;
1252 	struct drm_i915_gem_object *obj;
1253 	static const unsigned int sizes[] = {
1254 		SZ_2M,
1255 		SZ_4M,
1256 		SZ_8M,
1257 		SZ_16M,
1258 		SZ_32M,
1259 	};
1260 	int i;
1261 	int err;
1262 
1263 	/*
1264 	 * Sanity check that the HW uses huge pages correctly through gemfs --
1265 	 * ensure that our writes land in the right place.
1266 	 */
1267 
1268 	if (!igt_can_allocate_thp(i915)) {
1269 		pr_info("missing THP support, skipping\n");
1270 		return 0;
1271 	}
1272 
1273 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1274 		unsigned int size = sizes[i];
1275 
1276 		obj = i915_gem_object_create_shmem(i915, size);
1277 		if (IS_ERR(obj))
1278 			return PTR_ERR(obj);
1279 
1280 		err = i915_gem_object_pin_pages(obj);
1281 		if (err)
1282 			goto out_put;
1283 
1284 		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1285 			pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
1286 				size);
1287 			goto out_unpin;
1288 		}
1289 
1290 		err = igt_write_huge(ctx, obj);
1291 		if (err) {
1292 			pr_err("gemfs write-huge failed with size=%u\n",
1293 			       size);
1294 			goto out_unpin;
1295 		}
1296 
1297 		i915_gem_object_unpin_pages(obj);
1298 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
1299 		i915_gem_object_put(obj);
1300 	}
1301 
1302 	return 0;
1303 
1304 out_unpin:
1305 	i915_gem_object_unpin_pages(obj);
1306 out_put:
1307 	i915_gem_object_put(obj);
1308 
1309 	return err;
1310 }
1311 
1312 static int igt_ppgtt_pin_update(void *arg)
1313 {
1314 	struct i915_gem_context *ctx = arg;
1315 	struct drm_i915_private *dev_priv = ctx->i915;
1316 	unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
1317 	struct i915_address_space *vm = ctx->vm;
1318 	struct drm_i915_gem_object *obj;
1319 	struct i915_vma *vma;
1320 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1321 	struct intel_engine_cs *engine;
1322 	enum intel_engine_id id;
1323 	unsigned int n;
1324 	int first, last;
1325 	int err;
1326 
1327 	/*
1328 	 * Make sure there's no funny business when doing a PIN_UPDATE -- in the
1329 	 * past we had a subtle issue with being able to incorrectly do multiple
1330 	 * alloc va ranges on the same object when doing a PIN_UPDATE, which
1331 	 * resulted in some pretty nasty bugs, though only when using
1332 	 * huge-gtt-pages.
1333 	 */
1334 
1335 	if (!vm || !i915_vm_is_4lvl(vm)) {
1336 		pr_info("48b PPGTT not supported, skipping\n");
1337 		return 0;
1338 	}
1339 
1340 	first = ilog2(I915_GTT_PAGE_SIZE_64K);
1341 	last = ilog2(I915_GTT_PAGE_SIZE_2M);
1342 
1343 	for_each_set_bit_from(first, &supported, last + 1) {
1344 		unsigned int page_size = BIT(first);
1345 
1346 		obj = i915_gem_object_create_internal(dev_priv, page_size);
1347 		if (IS_ERR(obj))
1348 			return PTR_ERR(obj);
1349 
1350 		vma = i915_vma_instance(obj, vm, NULL);
1351 		if (IS_ERR(vma)) {
1352 			err = PTR_ERR(vma);
1353 			goto out_put;
1354 		}
1355 
1356 		err = i915_vma_pin(vma, SZ_2M, 0, flags);
1357 		if (err)
1358 			goto out_close;
1359 
1360 		if (vma->page_sizes.sg < page_size) {
1361 			pr_info("Unable to allocate page-size %x, finishing test early\n",
1362 				page_size);
1363 			goto out_unpin;
1364 		}
1365 
1366 		err = igt_check_page_sizes(vma);
1367 		if (err)
1368 			goto out_unpin;
1369 
1370 		if (vma->page_sizes.gtt != page_size) {
1371 			dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
1372 
1373 			/*
1374 			 * The only valid reason for this to ever fail would be
1375 			 * if the dma-mapper screwed us over when we did the
1376 			 * dma_map_sg(), since it has the final say over the dma
1377 			 * address.
1378 			 */
1379 			if (IS_ALIGNED(addr, page_size)) {
1380 				pr_err("page_sizes.gtt=%u, expected=%u\n",
1381 				       vma->page_sizes.gtt, page_size);
1382 				err = -EINVAL;
1383 			} else {
1384 				pr_info("dma address misaligned, finishing test early\n");
1385 			}
1386 
1387 			goto out_unpin;
1388 		}
1389 
1390 		err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
1391 		if (err)
1392 			goto out_unpin;
1393 
1394 		i915_vma_unpin(vma);
1395 		i915_vma_close(vma);
1396 
1397 		i915_gem_object_put(obj);
1398 	}
1399 
1400 	obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
1401 	if (IS_ERR(obj))
1402 		return PTR_ERR(obj);
1403 
1404 	vma = i915_vma_instance(obj, vm, NULL);
1405 	if (IS_ERR(vma)) {
1406 		err = PTR_ERR(vma);
1407 		goto out_put;
1408 	}
1409 
1410 	err = i915_vma_pin(vma, 0, 0, flags);
1411 	if (err)
1412 		goto out_close;
1413 
1414 	/*
1415 	 * Make sure we don't end up with something like where the pde is still
1416 	 * pointing to the 2M page, and the pt we just filled-in is dangling --
1417 	 * we can check this by writing to the first page where it would then
1418 	 * land in the now stale 2M page.
1419 	 */
1420 
1421 	n = 0;
1422 	for_each_engine(engine, dev_priv, id) {
1423 		if (!intel_engine_can_store_dword(engine))
1424 			continue;
1425 
1426 		err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
1427 		if (err)
1428 			goto out_unpin;
1429 	}
1430 	while (n--) {
1431 		err = cpu_check(obj, n, 0xdeadbeaf);
1432 		if (err)
1433 			goto out_unpin;
1434 	}
1435 
1436 out_unpin:
1437 	i915_vma_unpin(vma);
1438 out_close:
1439 	i915_vma_close(vma);
1440 out_put:
1441 	i915_gem_object_put(obj);
1442 
1443 	return err;
1444 }
1445 
1446 static int igt_tmpfs_fallback(void *arg)
1447 {
1448 	struct i915_gem_context *ctx = arg;
1449 	struct drm_i915_private *i915 = ctx->i915;
1450 	struct vfsmount *gemfs = i915->mm.gemfs;
1451 	struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
1452 	struct drm_i915_gem_object *obj;
1453 	struct i915_vma *vma;
1454 	u32 *vaddr;
1455 	int err = 0;
1456 
1457 	/*
1458 	 * Make sure that we don't burst into a ball of flames upon falling back
1459 	 * to tmpfs, which we rely on if on the off-chance we encouter a failure
1460 	 * when setting up gemfs.
1461 	 */
1462 
1463 	i915->mm.gemfs = NULL;
1464 
1465 	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
1466 	if (IS_ERR(obj)) {
1467 		err = PTR_ERR(obj);
1468 		goto out_restore;
1469 	}
1470 
1471 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1472 	if (IS_ERR(vaddr)) {
1473 		err = PTR_ERR(vaddr);
1474 		goto out_put;
1475 	}
1476 	*vaddr = 0xdeadbeaf;
1477 
1478 	__i915_gem_object_flush_map(obj, 0, 64);
1479 	i915_gem_object_unpin_map(obj);
1480 
1481 	vma = i915_vma_instance(obj, vm, NULL);
1482 	if (IS_ERR(vma)) {
1483 		err = PTR_ERR(vma);
1484 		goto out_put;
1485 	}
1486 
1487 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
1488 	if (err)
1489 		goto out_close;
1490 
1491 	err = igt_check_page_sizes(vma);
1492 
1493 	i915_vma_unpin(vma);
1494 out_close:
1495 	i915_vma_close(vma);
1496 out_put:
1497 	i915_gem_object_put(obj);
1498 out_restore:
1499 	i915->mm.gemfs = gemfs;
1500 
1501 	return err;
1502 }
1503 
1504 static int igt_shrink_thp(void *arg)
1505 {
1506 	struct i915_gem_context *ctx = arg;
1507 	struct drm_i915_private *i915 = ctx->i915;
1508 	struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
1509 	struct drm_i915_gem_object *obj;
1510 	struct intel_engine_cs *engine;
1511 	enum intel_engine_id id;
1512 	struct i915_vma *vma;
1513 	unsigned int flags = PIN_USER;
1514 	unsigned int n;
1515 	int err;
1516 
1517 	/*
1518 	 * Sanity check shrinking huge-paged object -- make sure nothing blows
1519 	 * up.
1520 	 */
1521 
1522 	if (!igt_can_allocate_thp(i915)) {
1523 		pr_info("missing THP support, skipping\n");
1524 		return 0;
1525 	}
1526 
1527 	obj = i915_gem_object_create_shmem(i915, SZ_2M);
1528 	if (IS_ERR(obj))
1529 		return PTR_ERR(obj);
1530 
1531 	vma = i915_vma_instance(obj, vm, NULL);
1532 	if (IS_ERR(vma)) {
1533 		err = PTR_ERR(vma);
1534 		goto out_put;
1535 	}
1536 
1537 	err = i915_vma_pin(vma, 0, 0, flags);
1538 	if (err)
1539 		goto out_close;
1540 
1541 	if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1542 		pr_info("failed to allocate THP, finishing test early\n");
1543 		goto out_unpin;
1544 	}
1545 
1546 	err = igt_check_page_sizes(vma);
1547 	if (err)
1548 		goto out_unpin;
1549 
1550 	n = 0;
1551 	for_each_engine(engine, i915, id) {
1552 		if (!intel_engine_can_store_dword(engine))
1553 			continue;
1554 
1555 		err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
1556 		if (err)
1557 			goto out_unpin;
1558 	}
1559 
1560 	i915_vma_unpin(vma);
1561 
1562 	/*
1563 	 * Now that the pages are *unpinned* shrink-all should invoke
1564 	 * shmem to truncate our pages.
1565 	 */
1566 	i915_gem_shrink_all(i915);
1567 	if (i915_gem_object_has_pages(obj)) {
1568 		pr_err("shrink-all didn't truncate the pages\n");
1569 		err = -EINVAL;
1570 		goto out_close;
1571 	}
1572 
1573 	if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
1574 		pr_err("residual page-size bits left\n");
1575 		err = -EINVAL;
1576 		goto out_close;
1577 	}
1578 
1579 	err = i915_vma_pin(vma, 0, 0, flags);
1580 	if (err)
1581 		goto out_close;
1582 
1583 	while (n--) {
1584 		err = cpu_check(obj, n, 0xdeadbeaf);
1585 		if (err)
1586 			goto out_unpin;
1587 	}
1588 
1589 
1590 out_unpin:
1591 	i915_vma_unpin(vma);
1592 out_close:
1593 	i915_vma_close(vma);
1594 out_put:
1595 	i915_gem_object_put(obj);
1596 
1597 	return err;
1598 }
1599 
1600 int i915_gem_huge_page_mock_selftests(void)
1601 {
1602 	static const struct i915_subtest tests[] = {
1603 		SUBTEST(igt_mock_exhaust_device_supported_pages),
1604 		SUBTEST(igt_mock_ppgtt_misaligned_dma),
1605 		SUBTEST(igt_mock_ppgtt_huge_fill),
1606 		SUBTEST(igt_mock_ppgtt_64K),
1607 	};
1608 	struct drm_i915_private *dev_priv;
1609 	struct i915_ppgtt *ppgtt;
1610 	int err;
1611 
1612 	dev_priv = mock_gem_device();
1613 	if (!dev_priv)
1614 		return -ENOMEM;
1615 
1616 	/* Pretend to be a device which supports the 48b PPGTT */
1617 	mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
1618 	mkwrite_device_info(dev_priv)->ppgtt_size = 48;
1619 
1620 	mutex_lock(&dev_priv->drm.struct_mutex);
1621 	ppgtt = i915_ppgtt_create(dev_priv);
1622 	if (IS_ERR(ppgtt)) {
1623 		err = PTR_ERR(ppgtt);
1624 		goto out_unlock;
1625 	}
1626 
1627 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1628 		pr_err("failed to create 48b PPGTT\n");
1629 		err = -EINVAL;
1630 		goto out_close;
1631 	}
1632 
1633 	/* If we were ever hit this then it's time to mock the 64K scratch */
1634 	if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
1635 		pr_err("PPGTT missing 64K scratch page\n");
1636 		err = -EINVAL;
1637 		goto out_close;
1638 	}
1639 
1640 	err = i915_subtests(tests, ppgtt);
1641 
1642 out_close:
1643 	i915_vm_put(&ppgtt->vm);
1644 
1645 out_unlock:
1646 	mutex_unlock(&dev_priv->drm.struct_mutex);
1647 	drm_dev_put(&dev_priv->drm);
1648 
1649 	return err;
1650 }
1651 
1652 int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
1653 {
1654 	static const struct i915_subtest tests[] = {
1655 		SUBTEST(igt_shrink_thp),
1656 		SUBTEST(igt_ppgtt_pin_update),
1657 		SUBTEST(igt_tmpfs_fallback),
1658 		SUBTEST(igt_ppgtt_exhaust_huge),
1659 		SUBTEST(igt_ppgtt_gemfs_huge),
1660 		SUBTEST(igt_ppgtt_internal_huge),
1661 	};
1662 	struct drm_file *file;
1663 	struct i915_gem_context *ctx;
1664 	intel_wakeref_t wakeref;
1665 	int err;
1666 
1667 	if (!HAS_PPGTT(i915)) {
1668 		pr_info("PPGTT not supported, skipping live-selftests\n");
1669 		return 0;
1670 	}
1671 
1672 	if (intel_gt_is_wedged(&i915->gt))
1673 		return 0;
1674 
1675 	file = mock_file(i915);
1676 	if (IS_ERR(file))
1677 		return PTR_ERR(file);
1678 
1679 	mutex_lock(&i915->drm.struct_mutex);
1680 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1681 
1682 	ctx = live_context(i915, file);
1683 	if (IS_ERR(ctx)) {
1684 		err = PTR_ERR(ctx);
1685 		goto out_unlock;
1686 	}
1687 
1688 	if (ctx->vm)
1689 		ctx->vm->scrub_64K = true;
1690 
1691 	err = i915_subtests(tests, ctx);
1692 
1693 out_unlock:
1694 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1695 	mutex_unlock(&i915->drm.struct_mutex);
1696 
1697 	mock_file_free(i915, file);
1698 
1699 	return err;
1700 }
1701