1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/prime_numbers.h>
7 #include <linux/sort.h>
8 
9 #include "../i915_selftest.h"
10 
11 #include "mock_drm.h"
12 #include "mock_gem_device.h"
13 #include "mock_region.h"
14 
15 #include "gem/i915_gem_context.h"
16 #include "gem/i915_gem_lmem.h"
17 #include "gem/i915_gem_region.h"
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
20 #include "gt/intel_engine_pm.h"
21 #include "gt/intel_engine_user.h"
22 #include "gt/intel_gt.h"
23 #include "i915_buddy.h"
24 #include "gt/intel_migrate.h"
25 #include "i915_memcpy.h"
26 #include "i915_ttm_buddy_manager.h"
27 #include "selftests/igt_flush_test.h"
28 #include "selftests/i915_random.h"
29 
30 static void close_objects(struct intel_memory_region *mem,
31 			  struct list_head *objects)
32 {
33 	struct drm_i915_private *i915 = mem->i915;
34 	struct drm_i915_gem_object *obj, *on;
35 
36 	list_for_each_entry_safe(obj, on, objects, st_link) {
37 		i915_gem_object_lock(obj, NULL);
38 		if (i915_gem_object_has_pinned_pages(obj))
39 			i915_gem_object_unpin_pages(obj);
40 		/* No polluting the memory region between tests */
41 		__i915_gem_object_put_pages(obj);
42 		i915_gem_object_unlock(obj);
43 		list_del(&obj->st_link);
44 		i915_gem_object_put(obj);
45 	}
46 
47 	cond_resched();
48 
49 	i915_gem_drain_freed_objects(i915);
50 }
51 
52 static int igt_mock_fill(void *arg)
53 {
54 	struct intel_memory_region *mem = arg;
55 	resource_size_t total = resource_size(&mem->region);
56 	resource_size_t page_size;
57 	resource_size_t rem;
58 	unsigned long max_pages;
59 	unsigned long page_num;
60 	LIST_HEAD(objects);
61 	int err = 0;
62 
63 	page_size = PAGE_SIZE;
64 	max_pages = div64_u64(total, page_size);
65 	rem = total;
66 
67 	for_each_prime_number_from(page_num, 1, max_pages) {
68 		resource_size_t size = page_num * page_size;
69 		struct drm_i915_gem_object *obj;
70 
71 		obj = i915_gem_object_create_region(mem, size, 0, 0);
72 		if (IS_ERR(obj)) {
73 			err = PTR_ERR(obj);
74 			break;
75 		}
76 
77 		err = i915_gem_object_pin_pages_unlocked(obj);
78 		if (err) {
79 			i915_gem_object_put(obj);
80 			break;
81 		}
82 
83 		list_add(&obj->st_link, &objects);
84 		rem -= size;
85 	}
86 
87 	if (err == -ENOMEM)
88 		err = 0;
89 	if (err == -ENXIO) {
90 		if (page_num * page_size <= rem) {
91 			pr_err("%s failed, space still left in region\n",
92 			       __func__);
93 			err = -EINVAL;
94 		} else {
95 			err = 0;
96 		}
97 	}
98 
99 	close_objects(mem, &objects);
100 
101 	return err;
102 }
103 
104 static struct drm_i915_gem_object *
105 igt_object_create(struct intel_memory_region *mem,
106 		  struct list_head *objects,
107 		  u64 size,
108 		  unsigned int flags)
109 {
110 	struct drm_i915_gem_object *obj;
111 	int err;
112 
113 	obj = i915_gem_object_create_region(mem, size, 0, flags);
114 	if (IS_ERR(obj))
115 		return obj;
116 
117 	err = i915_gem_object_pin_pages_unlocked(obj);
118 	if (err)
119 		goto put;
120 
121 	list_add(&obj->st_link, objects);
122 	return obj;
123 
124 put:
125 	i915_gem_object_put(obj);
126 	return ERR_PTR(err);
127 }
128 
129 static void igt_object_release(struct drm_i915_gem_object *obj)
130 {
131 	i915_gem_object_lock(obj, NULL);
132 	i915_gem_object_unpin_pages(obj);
133 	__i915_gem_object_put_pages(obj);
134 	i915_gem_object_unlock(obj);
135 	list_del(&obj->st_link);
136 	i915_gem_object_put(obj);
137 }
138 
139 static bool is_contiguous(struct drm_i915_gem_object *obj)
140 {
141 	struct scatterlist *sg;
142 	dma_addr_t addr = -1;
143 
144 	for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
145 		if (addr != -1 && sg_dma_address(sg) != addr)
146 			return false;
147 
148 		addr = sg_dma_address(sg) + sg_dma_len(sg);
149 	}
150 
151 	return true;
152 }
153 
154 static int igt_mock_reserve(void *arg)
155 {
156 	struct intel_memory_region *mem = arg;
157 	struct drm_i915_private *i915 = mem->i915;
158 	resource_size_t avail = resource_size(&mem->region);
159 	struct drm_i915_gem_object *obj;
160 	const u32 chunk_size = SZ_32M;
161 	u32 i, offset, count, *order;
162 	u64 allocated, cur_avail;
163 	I915_RND_STATE(prng);
164 	LIST_HEAD(objects);
165 	int err = 0;
166 
167 	count = avail / chunk_size;
168 	order = i915_random_order(count, &prng);
169 	if (!order)
170 		return 0;
171 
172 	mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
173 	if (IS_ERR(mem)) {
174 		pr_err("failed to create memory region\n");
175 		err = PTR_ERR(mem);
176 		goto out_free_order;
177 	}
178 
179 	/* Reserve a bunch of ranges within the region */
180 	for (i = 0; i < count; ++i) {
181 		u64 start = order[i] * chunk_size;
182 		u64 size = i915_prandom_u32_max_state(chunk_size, &prng);
183 
184 		/* Allow for some really big holes */
185 		if (!size)
186 			continue;
187 
188 		size = round_up(size, PAGE_SIZE);
189 		offset = igt_random_offset(&prng, 0, chunk_size, size,
190 					   PAGE_SIZE);
191 
192 		err = intel_memory_region_reserve(mem, start + offset, size);
193 		if (err) {
194 			pr_err("%s failed to reserve range", __func__);
195 			goto out_close;
196 		}
197 
198 		/* XXX: maybe sanity check the block range here? */
199 		avail -= size;
200 	}
201 
202 	/* Try to see if we can allocate from the remaining space */
203 	allocated = 0;
204 	cur_avail = avail;
205 	do {
206 		u32 size = i915_prandom_u32_max_state(cur_avail, &prng);
207 
208 		size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
209 		obj = igt_object_create(mem, &objects, size, 0);
210 		if (IS_ERR(obj)) {
211 			if (PTR_ERR(obj) == -ENXIO)
212 				break;
213 
214 			err = PTR_ERR(obj);
215 			goto out_close;
216 		}
217 		cur_avail -= size;
218 		allocated += size;
219 	} while (1);
220 
221 	if (allocated != avail) {
222 		pr_err("%s mismatch between allocation and free space", __func__);
223 		err = -EINVAL;
224 	}
225 
226 out_close:
227 	close_objects(mem, &objects);
228 	intel_memory_region_put(mem);
229 out_free_order:
230 	kfree(order);
231 	return err;
232 }
233 
234 static int igt_mock_contiguous(void *arg)
235 {
236 	struct intel_memory_region *mem = arg;
237 	struct drm_i915_gem_object *obj;
238 	unsigned long n_objects;
239 	LIST_HEAD(objects);
240 	LIST_HEAD(holes);
241 	I915_RND_STATE(prng);
242 	resource_size_t total;
243 	resource_size_t min;
244 	u64 target;
245 	int err = 0;
246 
247 	total = resource_size(&mem->region);
248 
249 	/* Min size */
250 	obj = igt_object_create(mem, &objects, PAGE_SIZE,
251 				I915_BO_ALLOC_CONTIGUOUS);
252 	if (IS_ERR(obj))
253 		return PTR_ERR(obj);
254 
255 	if (!is_contiguous(obj)) {
256 		pr_err("%s min object spans disjoint sg entries\n", __func__);
257 		err = -EINVAL;
258 		goto err_close_objects;
259 	}
260 
261 	igt_object_release(obj);
262 
263 	/* Max size */
264 	obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
265 	if (IS_ERR(obj))
266 		return PTR_ERR(obj);
267 
268 	if (!is_contiguous(obj)) {
269 		pr_err("%s max object spans disjoint sg entries\n", __func__);
270 		err = -EINVAL;
271 		goto err_close_objects;
272 	}
273 
274 	igt_object_release(obj);
275 
276 	/* Internal fragmentation should not bleed into the object size */
277 	target = i915_prandom_u64_state(&prng);
278 	div64_u64_rem(target, total, &target);
279 	target = round_up(target, PAGE_SIZE);
280 	target = max_t(u64, PAGE_SIZE, target);
281 
282 	obj = igt_object_create(mem, &objects, target,
283 				I915_BO_ALLOC_CONTIGUOUS);
284 	if (IS_ERR(obj))
285 		return PTR_ERR(obj);
286 
287 	if (obj->base.size != target) {
288 		pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
289 		       obj->base.size, target);
290 		err = -EINVAL;
291 		goto err_close_objects;
292 	}
293 
294 	if (!is_contiguous(obj)) {
295 		pr_err("%s object spans disjoint sg entries\n", __func__);
296 		err = -EINVAL;
297 		goto err_close_objects;
298 	}
299 
300 	igt_object_release(obj);
301 
302 	/*
303 	 * Try to fragment the address space, such that half of it is free, but
304 	 * the max contiguous block size is SZ_64K.
305 	 */
306 
307 	target = SZ_64K;
308 	n_objects = div64_u64(total, target);
309 
310 	while (n_objects--) {
311 		struct list_head *list;
312 
313 		if (n_objects % 2)
314 			list = &holes;
315 		else
316 			list = &objects;
317 
318 		obj = igt_object_create(mem, list, target,
319 					I915_BO_ALLOC_CONTIGUOUS);
320 		if (IS_ERR(obj)) {
321 			err = PTR_ERR(obj);
322 			goto err_close_objects;
323 		}
324 	}
325 
326 	close_objects(mem, &holes);
327 
328 	min = target;
329 	target = total >> 1;
330 
331 	/* Make sure we can still allocate all the fragmented space */
332 	obj = igt_object_create(mem, &objects, target, 0);
333 	if (IS_ERR(obj)) {
334 		err = PTR_ERR(obj);
335 		goto err_close_objects;
336 	}
337 
338 	igt_object_release(obj);
339 
340 	/*
341 	 * Even though we have enough free space, we don't have a big enough
342 	 * contiguous block. Make sure that holds true.
343 	 */
344 
345 	do {
346 		bool should_fail = target > min;
347 
348 		obj = igt_object_create(mem, &objects, target,
349 					I915_BO_ALLOC_CONTIGUOUS);
350 		if (should_fail != IS_ERR(obj)) {
351 			pr_err("%s target allocation(%llx) mismatch\n",
352 			       __func__, target);
353 			err = -EINVAL;
354 			goto err_close_objects;
355 		}
356 
357 		target >>= 1;
358 	} while (target >= PAGE_SIZE);
359 
360 err_close_objects:
361 	list_splice_tail(&holes, &objects);
362 	close_objects(mem, &objects);
363 	return err;
364 }
365 
366 static int igt_mock_splintered_region(void *arg)
367 {
368 	struct intel_memory_region *mem = arg;
369 	struct drm_i915_private *i915 = mem->i915;
370 	struct i915_ttm_buddy_resource *res;
371 	struct drm_i915_gem_object *obj;
372 	struct i915_buddy_mm *mm;
373 	unsigned int expected_order;
374 	LIST_HEAD(objects);
375 	u64 size;
376 	int err = 0;
377 
378 	/*
379 	 * Sanity check we can still allocate everything even if the
380 	 * mm.max_order != mm.size. i.e our starting address space size is not a
381 	 * power-of-two.
382 	 */
383 
384 	size = (SZ_4G - 1) & PAGE_MASK;
385 	mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
386 	if (IS_ERR(mem))
387 		return PTR_ERR(mem);
388 
389 	obj = igt_object_create(mem, &objects, size, 0);
390 	if (IS_ERR(obj)) {
391 		err = PTR_ERR(obj);
392 		goto out_close;
393 	}
394 
395 	res = to_ttm_buddy_resource(obj->mm.res);
396 	mm = res->mm;
397 	if (mm->size != size) {
398 		pr_err("%s size mismatch(%llu != %llu)\n",
399 		       __func__, mm->size, size);
400 		err = -EINVAL;
401 		goto out_put;
402 	}
403 
404 	expected_order = get_order(rounddown_pow_of_two(size));
405 	if (mm->max_order != expected_order) {
406 		pr_err("%s order mismatch(%u != %u)\n",
407 		       __func__, mm->max_order, expected_order);
408 		err = -EINVAL;
409 		goto out_put;
410 	}
411 
412 	close_objects(mem, &objects);
413 
414 	/*
415 	 * While we should be able allocate everything without any flag
416 	 * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
417 	 * actually limited to the largest power-of-two for the region size i.e
418 	 * max_order, due to the inner workings of the buddy allocator. So make
419 	 * sure that does indeed hold true.
420 	 */
421 
422 	obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
423 	if (!IS_ERR(obj)) {
424 		pr_err("%s too large contiguous allocation was not rejected\n",
425 		       __func__);
426 		err = -EINVAL;
427 		goto out_close;
428 	}
429 
430 	obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
431 				I915_BO_ALLOC_CONTIGUOUS);
432 	if (IS_ERR(obj)) {
433 		pr_err("%s largest possible contiguous allocation failed\n",
434 		       __func__);
435 		err = PTR_ERR(obj);
436 		goto out_close;
437 	}
438 
439 out_close:
440 	close_objects(mem, &objects);
441 out_put:
442 	intel_memory_region_put(mem);
443 	return err;
444 }
445 
446 #ifndef SZ_8G
447 #define SZ_8G BIT_ULL(33)
448 #endif
449 
450 static int igt_mock_max_segment(void *arg)
451 {
452 	const unsigned int max_segment = rounddown(UINT_MAX, PAGE_SIZE);
453 	struct intel_memory_region *mem = arg;
454 	struct drm_i915_private *i915 = mem->i915;
455 	struct i915_ttm_buddy_resource *res;
456 	struct drm_i915_gem_object *obj;
457 	struct i915_buddy_block *block;
458 	struct i915_buddy_mm *mm;
459 	struct list_head *blocks;
460 	struct scatterlist *sg;
461 	LIST_HEAD(objects);
462 	u64 size;
463 	int err = 0;
464 
465 	/*
466 	 * While we may create very large contiguous blocks, we may need
467 	 * to break those down for consumption elsewhere. In particular,
468 	 * dma-mapping with scatterlist elements have an implicit limit of
469 	 * UINT_MAX on each element.
470 	 */
471 
472 	size = SZ_8G;
473 	mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
474 	if (IS_ERR(mem))
475 		return PTR_ERR(mem);
476 
477 	obj = igt_object_create(mem, &objects, size, 0);
478 	if (IS_ERR(obj)) {
479 		err = PTR_ERR(obj);
480 		goto out_put;
481 	}
482 
483 	res = to_ttm_buddy_resource(obj->mm.res);
484 	blocks = &res->blocks;
485 	mm = res->mm;
486 	size = 0;
487 	list_for_each_entry(block, blocks, link) {
488 		if (i915_buddy_block_size(mm, block) > size)
489 			size = i915_buddy_block_size(mm, block);
490 	}
491 	if (size < max_segment) {
492 		pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
493 		       __func__, max_segment, size);
494 		err = -EINVAL;
495 		goto out_close;
496 	}
497 
498 	for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
499 		if (sg->length > max_segment) {
500 			pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
501 			       __func__, sg->length, max_segment);
502 			err = -EINVAL;
503 			goto out_close;
504 		}
505 	}
506 
507 out_close:
508 	close_objects(mem, &objects);
509 out_put:
510 	intel_memory_region_put(mem);
511 	return err;
512 }
513 
514 static int igt_gpu_write_dw(struct intel_context *ce,
515 			    struct i915_vma *vma,
516 			    u32 dword,
517 			    u32 value)
518 {
519 	return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
520 			       vma->size >> PAGE_SHIFT, value);
521 }
522 
523 static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
524 {
525 	unsigned long n = obj->base.size >> PAGE_SHIFT;
526 	u32 *ptr;
527 	int err;
528 
529 	err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
530 	if (err)
531 		return err;
532 
533 	ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
534 	if (IS_ERR(ptr))
535 		return PTR_ERR(ptr);
536 
537 	ptr += dword;
538 	while (n--) {
539 		if (*ptr != val) {
540 			pr_err("base[%u]=%08x, val=%08x\n",
541 			       dword, *ptr, val);
542 			err = -EINVAL;
543 			break;
544 		}
545 
546 		ptr += PAGE_SIZE / sizeof(*ptr);
547 	}
548 
549 	i915_gem_object_unpin_map(obj);
550 	return err;
551 }
552 
553 static int igt_gpu_write(struct i915_gem_context *ctx,
554 			 struct drm_i915_gem_object *obj)
555 {
556 	struct i915_gem_engines *engines;
557 	struct i915_gem_engines_iter it;
558 	struct i915_address_space *vm;
559 	struct intel_context *ce;
560 	I915_RND_STATE(prng);
561 	IGT_TIMEOUT(end_time);
562 	unsigned int count;
563 	struct i915_vma *vma;
564 	int *order;
565 	int i, n;
566 	int err = 0;
567 
568 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
569 
570 	n = 0;
571 	count = 0;
572 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
573 		count++;
574 		if (!intel_engine_can_store_dword(ce->engine))
575 			continue;
576 
577 		vm = ce->vm;
578 		n++;
579 	}
580 	i915_gem_context_unlock_engines(ctx);
581 	if (!n)
582 		return 0;
583 
584 	order = i915_random_order(count * count, &prng);
585 	if (!order)
586 		return -ENOMEM;
587 
588 	vma = i915_vma_instance(obj, vm, NULL);
589 	if (IS_ERR(vma)) {
590 		err = PTR_ERR(vma);
591 		goto out_free;
592 	}
593 
594 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
595 	if (err)
596 		goto out_free;
597 
598 	i = 0;
599 	engines = i915_gem_context_lock_engines(ctx);
600 	do {
601 		u32 rng = prandom_u32_state(&prng);
602 		u32 dword = offset_in_page(rng) / 4;
603 
604 		ce = engines->engines[order[i] % engines->num_engines];
605 		i = (i + 1) % (count * count);
606 		if (!ce || !intel_engine_can_store_dword(ce->engine))
607 			continue;
608 
609 		err = igt_gpu_write_dw(ce, vma, dword, rng);
610 		if (err)
611 			break;
612 
613 		i915_gem_object_lock(obj, NULL);
614 		err = igt_cpu_check(obj, dword, rng);
615 		i915_gem_object_unlock(obj);
616 		if (err)
617 			break;
618 	} while (!__igt_timeout(end_time, NULL));
619 	i915_gem_context_unlock_engines(ctx);
620 
621 out_free:
622 	kfree(order);
623 
624 	if (err == -ENOMEM)
625 		err = 0;
626 
627 	return err;
628 }
629 
630 static int igt_lmem_create(void *arg)
631 {
632 	struct drm_i915_private *i915 = arg;
633 	struct drm_i915_gem_object *obj;
634 	int err = 0;
635 
636 	obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
637 	if (IS_ERR(obj))
638 		return PTR_ERR(obj);
639 
640 	err = i915_gem_object_pin_pages_unlocked(obj);
641 	if (err)
642 		goto out_put;
643 
644 	i915_gem_object_unpin_pages(obj);
645 out_put:
646 	i915_gem_object_put(obj);
647 
648 	return err;
649 }
650 
651 static int igt_lmem_create_with_ps(void *arg)
652 {
653 	struct drm_i915_private *i915 = arg;
654 	int err = 0;
655 	u32 ps;
656 
657 	for (ps = PAGE_SIZE; ps <= SZ_1G; ps <<= 1) {
658 		struct drm_i915_gem_object *obj;
659 		dma_addr_t daddr;
660 
661 		obj = __i915_gem_object_create_lmem_with_ps(i915, ps, ps, 0);
662 		if (IS_ERR(obj)) {
663 			err = PTR_ERR(obj);
664 			if (err == -ENXIO || err == -E2BIG) {
665 				pr_info("%s not enough lmem for ps(%u) err=%d\n",
666 					__func__, ps, err);
667 				err = 0;
668 			}
669 
670 			break;
671 		}
672 
673 		if (obj->base.size != ps) {
674 			pr_err("%s size(%zu) != ps(%u)\n",
675 			       __func__, obj->base.size, ps);
676 			err = -EINVAL;
677 			goto out_put;
678 		}
679 
680 		i915_gem_object_lock(obj, NULL);
681 		err = i915_gem_object_pin_pages(obj);
682 		if (err)
683 			goto out_put;
684 
685 		daddr = i915_gem_object_get_dma_address(obj, 0);
686 		if (!IS_ALIGNED(daddr, ps)) {
687 			pr_err("%s daddr(%pa) not aligned with ps(%u)\n",
688 			       __func__, &daddr, ps);
689 			err = -EINVAL;
690 			goto out_unpin;
691 		}
692 
693 out_unpin:
694 		i915_gem_object_unpin_pages(obj);
695 		__i915_gem_object_put_pages(obj);
696 out_put:
697 		i915_gem_object_unlock(obj);
698 		i915_gem_object_put(obj);
699 
700 		if (err)
701 			break;
702 	}
703 
704 	return err;
705 }
706 
707 static int igt_lmem_create_cleared_cpu(void *arg)
708 {
709 	struct drm_i915_private *i915 = arg;
710 	I915_RND_STATE(prng);
711 	IGT_TIMEOUT(end_time);
712 	u32 size, i;
713 	int err;
714 
715 	i915_gem_drain_freed_objects(i915);
716 
717 	size = max_t(u32, PAGE_SIZE, i915_prandom_u32_max_state(SZ_32M, &prng));
718 	size = round_up(size, PAGE_SIZE);
719 	i = 0;
720 
721 	do {
722 		struct drm_i915_gem_object *obj;
723 		unsigned int flags;
724 		u32 dword, val;
725 		void *vaddr;
726 
727 		/*
728 		 * Alternate between cleared and uncleared allocations, while
729 		 * also dirtying the pages each time to check that the pages are
730 		 * always cleared if requested, since we should get some overlap
731 		 * of the underlying pages, if not all, since we are the only
732 		 * user.
733 		 */
734 
735 		flags = I915_BO_ALLOC_CPU_CLEAR;
736 		if (i & 1)
737 			flags = 0;
738 
739 		obj = i915_gem_object_create_lmem(i915, size, flags);
740 		if (IS_ERR(obj))
741 			return PTR_ERR(obj);
742 
743 		i915_gem_object_lock(obj, NULL);
744 		err = i915_gem_object_pin_pages(obj);
745 		if (err)
746 			goto out_put;
747 
748 		dword = i915_prandom_u32_max_state(PAGE_SIZE / sizeof(u32),
749 						   &prng);
750 
751 		if (flags & I915_BO_ALLOC_CPU_CLEAR) {
752 			err = igt_cpu_check(obj, dword, 0);
753 			if (err) {
754 				pr_err("%s failed with size=%u, flags=%u\n",
755 				       __func__, size, flags);
756 				goto out_unpin;
757 			}
758 		}
759 
760 		vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
761 		if (IS_ERR(vaddr)) {
762 			err = PTR_ERR(vaddr);
763 			goto out_unpin;
764 		}
765 
766 		val = prandom_u32_state(&prng);
767 
768 		memset32(vaddr, val, obj->base.size / sizeof(u32));
769 
770 		i915_gem_object_flush_map(obj);
771 		i915_gem_object_unpin_map(obj);
772 out_unpin:
773 		i915_gem_object_unpin_pages(obj);
774 		__i915_gem_object_put_pages(obj);
775 out_put:
776 		i915_gem_object_unlock(obj);
777 		i915_gem_object_put(obj);
778 
779 		if (err)
780 			break;
781 		++i;
782 	} while (!__igt_timeout(end_time, NULL));
783 
784 	pr_info("%s completed (%u) iterations\n", __func__, i);
785 
786 	return err;
787 }
788 
789 static int igt_lmem_write_gpu(void *arg)
790 {
791 	struct drm_i915_private *i915 = arg;
792 	struct drm_i915_gem_object *obj;
793 	struct i915_gem_context *ctx;
794 	struct file *file;
795 	I915_RND_STATE(prng);
796 	u32 sz;
797 	int err;
798 
799 	file = mock_file(i915);
800 	if (IS_ERR(file))
801 		return PTR_ERR(file);
802 
803 	ctx = live_context(i915, file);
804 	if (IS_ERR(ctx)) {
805 		err = PTR_ERR(ctx);
806 		goto out_file;
807 	}
808 
809 	sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
810 
811 	obj = i915_gem_object_create_lmem(i915, sz, 0);
812 	if (IS_ERR(obj)) {
813 		err = PTR_ERR(obj);
814 		goto out_file;
815 	}
816 
817 	err = i915_gem_object_pin_pages_unlocked(obj);
818 	if (err)
819 		goto out_put;
820 
821 	err = igt_gpu_write(ctx, obj);
822 	if (err)
823 		pr_err("igt_gpu_write failed(%d)\n", err);
824 
825 	i915_gem_object_unpin_pages(obj);
826 out_put:
827 	i915_gem_object_put(obj);
828 out_file:
829 	fput(file);
830 	return err;
831 }
832 
833 static struct intel_engine_cs *
834 random_engine_class(struct drm_i915_private *i915,
835 		    unsigned int class,
836 		    struct rnd_state *prng)
837 {
838 	struct intel_engine_cs *engine;
839 	unsigned int count;
840 
841 	count = 0;
842 	for (engine = intel_engine_lookup_user(i915, class, 0);
843 	     engine && engine->uabi_class == class;
844 	     engine = rb_entry_safe(rb_next(&engine->uabi_node),
845 				    typeof(*engine), uabi_node))
846 		count++;
847 
848 	count = i915_prandom_u32_max_state(count, prng);
849 	return intel_engine_lookup_user(i915, class, count);
850 }
851 
852 static int igt_lmem_write_cpu(void *arg)
853 {
854 	struct drm_i915_private *i915 = arg;
855 	struct drm_i915_gem_object *obj;
856 	I915_RND_STATE(prng);
857 	IGT_TIMEOUT(end_time);
858 	u32 bytes[] = {
859 		0, /* rng placeholder */
860 		sizeof(u32),
861 		sizeof(u64),
862 		64, /* cl */
863 		PAGE_SIZE,
864 		PAGE_SIZE - sizeof(u32),
865 		PAGE_SIZE - sizeof(u64),
866 		PAGE_SIZE - 64,
867 	};
868 	struct intel_engine_cs *engine;
869 	struct i915_request *rq;
870 	u32 *vaddr;
871 	u32 sz;
872 	u32 i;
873 	int *order;
874 	int count;
875 	int err;
876 
877 	engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
878 	if (!engine)
879 		return 0;
880 
881 	pr_info("%s: using %s\n", __func__, engine->name);
882 
883 	sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
884 	sz = max_t(u32, 2 * PAGE_SIZE, sz);
885 
886 	obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
887 	if (IS_ERR(obj))
888 		return PTR_ERR(obj);
889 
890 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
891 	if (IS_ERR(vaddr)) {
892 		err = PTR_ERR(vaddr);
893 		goto out_put;
894 	}
895 
896 	i915_gem_object_lock(obj, NULL);
897 	/* Put the pages into a known state -- from the gpu for added fun */
898 	intel_engine_pm_get(engine);
899 	err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
900 					  obj->mm.pages->sgl, I915_CACHE_NONE,
901 					  true, 0xdeadbeaf, &rq);
902 	if (rq) {
903 		dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
904 		i915_request_put(rq);
905 	}
906 
907 	intel_engine_pm_put(engine);
908 	if (!err)
909 		err = i915_gem_object_set_to_wc_domain(obj, true);
910 	i915_gem_object_unlock(obj);
911 	if (err)
912 		goto out_unpin;
913 
914 	count = ARRAY_SIZE(bytes);
915 	order = i915_random_order(count * count, &prng);
916 	if (!order) {
917 		err = -ENOMEM;
918 		goto out_unpin;
919 	}
920 
921 	/* A random multiple of u32, picked between [64, PAGE_SIZE - 64] */
922 	bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32));
923 	GEM_BUG_ON(!IS_ALIGNED(bytes[0], sizeof(u32)));
924 
925 	i = 0;
926 	do {
927 		u32 offset;
928 		u32 align;
929 		u32 dword;
930 		u32 size;
931 		u32 val;
932 
933 		size = bytes[order[i] % count];
934 		i = (i + 1) % (count * count);
935 
936 		align = bytes[order[i] % count];
937 		i = (i + 1) % (count * count);
938 
939 		align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
940 
941 		offset = igt_random_offset(&prng, 0, obj->base.size,
942 					   size, align);
943 
944 		val = prandom_u32_state(&prng);
945 		memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
946 			 size / sizeof(u32));
947 
948 		/*
949 		 * Sample random dw -- don't waste precious time reading every
950 		 * single dw.
951 		 */
952 		dword = igt_random_offset(&prng, offset,
953 					  offset + size,
954 					  sizeof(u32), sizeof(u32));
955 		dword /= sizeof(u32);
956 		if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
957 			pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
958 			       __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
959 			       size, align, offset);
960 			err = -EINVAL;
961 			break;
962 		}
963 	} while (!__igt_timeout(end_time, NULL));
964 
965 out_unpin:
966 	i915_gem_object_unpin_map(obj);
967 out_put:
968 	i915_gem_object_put(obj);
969 
970 	return err;
971 }
972 
973 static const char *repr_type(u32 type)
974 {
975 	switch (type) {
976 	case I915_MAP_WB:
977 		return "WB";
978 	case I915_MAP_WC:
979 		return "WC";
980 	}
981 
982 	return "";
983 }
984 
985 static struct drm_i915_gem_object *
986 create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
987 			  void **out_addr)
988 {
989 	struct drm_i915_gem_object *obj;
990 	void *addr;
991 
992 	obj = i915_gem_object_create_region(mr, size, 0, 0);
993 	if (IS_ERR(obj)) {
994 		if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
995 			return ERR_PTR(-ENODEV);
996 		return obj;
997 	}
998 
999 	addr = i915_gem_object_pin_map_unlocked(obj, type);
1000 	if (IS_ERR(addr)) {
1001 		i915_gem_object_put(obj);
1002 		if (PTR_ERR(addr) == -ENXIO)
1003 			return ERR_PTR(-ENODEV);
1004 		return addr;
1005 	}
1006 
1007 	*out_addr = addr;
1008 	return obj;
1009 }
1010 
1011 static int wrap_ktime_compare(const void *A, const void *B)
1012 {
1013 	const ktime_t *a = A, *b = B;
1014 
1015 	return ktime_compare(*a, *b);
1016 }
1017 
1018 static void igt_memcpy_long(void *dst, const void *src, size_t size)
1019 {
1020 	unsigned long *tmp = dst;
1021 	const unsigned long *s = src;
1022 
1023 	size = size / sizeof(unsigned long);
1024 	while (size--)
1025 		*tmp++ = *s++;
1026 }
1027 
1028 static inline void igt_memcpy(void *dst, const void *src, size_t size)
1029 {
1030 	memcpy(dst, src, size);
1031 }
1032 
1033 static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
1034 {
1035 	i915_memcpy_from_wc(dst, src, size);
1036 }
1037 
1038 static int _perf_memcpy(struct intel_memory_region *src_mr,
1039 			struct intel_memory_region *dst_mr,
1040 			u64 size, u32 src_type, u32 dst_type)
1041 {
1042 	struct drm_i915_private *i915 = src_mr->i915;
1043 	const struct {
1044 		const char *name;
1045 		void (*copy)(void *dst, const void *src, size_t size);
1046 		bool skip;
1047 	} tests[] = {
1048 		{
1049 			"memcpy",
1050 			igt_memcpy,
1051 		},
1052 		{
1053 			"memcpy_long",
1054 			igt_memcpy_long,
1055 		},
1056 		{
1057 			"memcpy_from_wc",
1058 			igt_memcpy_from_wc,
1059 			!i915_has_memcpy_from_wc(),
1060 		},
1061 	};
1062 	struct drm_i915_gem_object *src, *dst;
1063 	void *src_addr, *dst_addr;
1064 	int ret = 0;
1065 	int i;
1066 
1067 	src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
1068 	if (IS_ERR(src)) {
1069 		ret = PTR_ERR(src);
1070 		goto out;
1071 	}
1072 
1073 	dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
1074 	if (IS_ERR(dst)) {
1075 		ret = PTR_ERR(dst);
1076 		goto out_unpin_src;
1077 	}
1078 
1079 	for (i = 0; i < ARRAY_SIZE(tests); ++i) {
1080 		ktime_t t[5];
1081 		int pass;
1082 
1083 		if (tests[i].skip)
1084 			continue;
1085 
1086 		for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
1087 			ktime_t t0, t1;
1088 
1089 			t0 = ktime_get();
1090 
1091 			tests[i].copy(dst_addr, src_addr, size);
1092 
1093 			t1 = ktime_get();
1094 			t[pass] = ktime_sub(t1, t0);
1095 		}
1096 
1097 		sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
1098 		if (t[0] <= 0) {
1099 			/* ignore the impossible to protect our sanity */
1100 			pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
1101 				 __func__,
1102 				 src_mr->name, repr_type(src_type),
1103 				 dst_mr->name, repr_type(dst_type),
1104 				 tests[i].name, size >> 10,
1105 				 t[0], t[4]);
1106 			continue;
1107 		}
1108 
1109 		pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
1110 			__func__,
1111 			src_mr->name, repr_type(src_type),
1112 			dst_mr->name, repr_type(dst_type),
1113 			tests[i].name, size >> 10,
1114 			div64_u64(mul_u32_u32(4 * size,
1115 					      1000 * 1000 * 1000),
1116 				  t[1] + 2 * t[2] + t[3]) >> 20);
1117 
1118 		cond_resched();
1119 	}
1120 
1121 	i915_gem_object_unpin_map(dst);
1122 	i915_gem_object_put(dst);
1123 out_unpin_src:
1124 	i915_gem_object_unpin_map(src);
1125 	i915_gem_object_put(src);
1126 
1127 	i915_gem_drain_freed_objects(i915);
1128 out:
1129 	if (ret == -ENODEV)
1130 		ret = 0;
1131 
1132 	return ret;
1133 }
1134 
1135 static int perf_memcpy(void *arg)
1136 {
1137 	struct drm_i915_private *i915 = arg;
1138 	static const u32 types[] = {
1139 		I915_MAP_WB,
1140 		I915_MAP_WC,
1141 	};
1142 	static const u32 sizes[] = {
1143 		SZ_4K,
1144 		SZ_64K,
1145 		SZ_4M,
1146 	};
1147 	struct intel_memory_region *src_mr, *dst_mr;
1148 	int src_id, dst_id;
1149 	int i, j, k;
1150 	int ret;
1151 
1152 	for_each_memory_region(src_mr, i915, src_id) {
1153 		for_each_memory_region(dst_mr, i915, dst_id) {
1154 			for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1155 				for (j = 0; j < ARRAY_SIZE(types); ++j) {
1156 					for (k = 0; k < ARRAY_SIZE(types); ++k) {
1157 						ret = _perf_memcpy(src_mr,
1158 								   dst_mr,
1159 								   sizes[i],
1160 								   types[j],
1161 								   types[k]);
1162 						if (ret)
1163 							return ret;
1164 					}
1165 				}
1166 			}
1167 		}
1168 	}
1169 
1170 	return 0;
1171 }
1172 
1173 int intel_memory_region_mock_selftests(void)
1174 {
1175 	static const struct i915_subtest tests[] = {
1176 		SUBTEST(igt_mock_reserve),
1177 		SUBTEST(igt_mock_fill),
1178 		SUBTEST(igt_mock_contiguous),
1179 		SUBTEST(igt_mock_splintered_region),
1180 		SUBTEST(igt_mock_max_segment),
1181 	};
1182 	struct intel_memory_region *mem;
1183 	struct drm_i915_private *i915;
1184 	int err;
1185 
1186 	i915 = mock_gem_device();
1187 	if (!i915)
1188 		return -ENOMEM;
1189 
1190 	mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
1191 	if (IS_ERR(mem)) {
1192 		pr_err("failed to create memory region\n");
1193 		err = PTR_ERR(mem);
1194 		goto out_unref;
1195 	}
1196 
1197 	err = i915_subtests(tests, mem);
1198 
1199 	intel_memory_region_put(mem);
1200 out_unref:
1201 	mock_destroy_device(i915);
1202 	return err;
1203 }
1204 
1205 int intel_memory_region_live_selftests(struct drm_i915_private *i915)
1206 {
1207 	static const struct i915_subtest tests[] = {
1208 		SUBTEST(igt_lmem_create),
1209 		SUBTEST(igt_lmem_create_with_ps),
1210 		SUBTEST(igt_lmem_create_cleared_cpu),
1211 		SUBTEST(igt_lmem_write_cpu),
1212 		SUBTEST(igt_lmem_write_gpu),
1213 	};
1214 
1215 	if (!HAS_LMEM(i915)) {
1216 		pr_info("device lacks LMEM support, skipping\n");
1217 		return 0;
1218 	}
1219 
1220 	if (intel_gt_is_wedged(&i915->gt))
1221 		return 0;
1222 
1223 	return i915_live_subtests(tests, i915);
1224 }
1225 
1226 int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
1227 {
1228 	static const struct i915_subtest tests[] = {
1229 		SUBTEST(perf_memcpy),
1230 	};
1231 
1232 	if (intel_gt_is_wedged(&i915->gt))
1233 		return 0;
1234 
1235 	return i915_live_subtests(tests, i915);
1236 }
1237