1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_gt.h"
11 #include "gt/intel_gt_pm.h"
12 #include "gem/i915_gem_region.h"
13 #include "huge_gem_object.h"
14 #include "i915_selftest.h"
15 #include "selftests/i915_random.h"
16 #include "selftests/igt_flush_test.h"
17 #include "selftests/igt_mmap.h"
18 
19 struct tile {
20 	unsigned int width;
21 	unsigned int height;
22 	unsigned int stride;
23 	unsigned int size;
24 	unsigned int tiling;
25 	unsigned int swizzle;
26 };
27 
28 static u64 swizzle_bit(unsigned int bit, u64 offset)
29 {
30 	return (offset & BIT_ULL(bit)) >> (bit - 6);
31 }
32 
33 static u64 tiled_offset(const struct tile *tile, u64 v)
34 {
35 	u64 x, y;
36 
37 	if (tile->tiling == I915_TILING_NONE)
38 		return v;
39 
40 	y = div64_u64_rem(v, tile->stride, &x);
41 	v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
42 
43 	if (tile->tiling == I915_TILING_X) {
44 		v += y * tile->width;
45 		v += div64_u64_rem(x, tile->width, &x) << tile->size;
46 		v += x;
47 	} else if (tile->width == 128) {
48 		const unsigned int ytile_span = 16;
49 		const unsigned int ytile_height = 512;
50 
51 		v += y * ytile_span;
52 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
53 		v += x;
54 	} else {
55 		const unsigned int ytile_span = 32;
56 		const unsigned int ytile_height = 256;
57 
58 		v += y * ytile_span;
59 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
60 		v += x;
61 	}
62 
63 	switch (tile->swizzle) {
64 	case I915_BIT_6_SWIZZLE_9:
65 		v ^= swizzle_bit(9, v);
66 		break;
67 	case I915_BIT_6_SWIZZLE_9_10:
68 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
69 		break;
70 	case I915_BIT_6_SWIZZLE_9_11:
71 		v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
72 		break;
73 	case I915_BIT_6_SWIZZLE_9_10_11:
74 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
75 		break;
76 	}
77 
78 	return v;
79 }
80 
81 static int check_partial_mapping(struct drm_i915_gem_object *obj,
82 				 const struct tile *tile,
83 				 struct rnd_state *prng)
84 {
85 	const unsigned long npages = obj->base.size / PAGE_SIZE;
86 	struct i915_ggtt_view view;
87 	struct i915_vma *vma;
88 	unsigned long page;
89 	u32 __iomem *io;
90 	struct page *p;
91 	unsigned int n;
92 	u64 offset;
93 	u32 *cpu;
94 	int err;
95 
96 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
97 	if (err) {
98 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
99 		       tile->tiling, tile->stride, err);
100 		return err;
101 	}
102 
103 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
104 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
105 
106 	i915_gem_object_lock(obj);
107 	err = i915_gem_object_set_to_gtt_domain(obj, true);
108 	i915_gem_object_unlock(obj);
109 	if (err) {
110 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
111 		return err;
112 	}
113 
114 	page = i915_prandom_u32_max_state(npages, prng);
115 	view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
116 
117 	vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
118 	if (IS_ERR(vma)) {
119 		pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
120 		       page, (int)PTR_ERR(vma));
121 		return PTR_ERR(vma);
122 	}
123 
124 	n = page - view.partial.offset;
125 	GEM_BUG_ON(n >= view.partial.size);
126 
127 	io = i915_vma_pin_iomap(vma);
128 	i915_vma_unpin(vma);
129 	if (IS_ERR(io)) {
130 		pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
131 		       page, (int)PTR_ERR(io));
132 		err = PTR_ERR(io);
133 		goto out;
134 	}
135 
136 	iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
137 	i915_vma_unpin_iomap(vma);
138 
139 	offset = tiled_offset(tile, page << PAGE_SHIFT);
140 	if (offset >= obj->base.size)
141 		goto out;
142 
143 	intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
144 
145 	p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
146 	cpu = kmap(p) + offset_in_page(offset);
147 	drm_clflush_virt_range(cpu, sizeof(*cpu));
148 	if (*cpu != (u32)page) {
149 		pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
150 		       page, n,
151 		       view.partial.offset,
152 		       view.partial.size,
153 		       vma->size >> PAGE_SHIFT,
154 		       tile->tiling ? tile_row_pages(obj) : 0,
155 		       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
156 		       offset >> PAGE_SHIFT,
157 		       (unsigned int)offset_in_page(offset),
158 		       offset,
159 		       (u32)page, *cpu);
160 		err = -EINVAL;
161 	}
162 	*cpu = 0;
163 	drm_clflush_virt_range(cpu, sizeof(*cpu));
164 	kunmap(p);
165 
166 out:
167 	__i915_vma_put(vma);
168 	return err;
169 }
170 
171 static int check_partial_mappings(struct drm_i915_gem_object *obj,
172 				  const struct tile *tile,
173 				  unsigned long end_time)
174 {
175 	const unsigned int nreal = obj->scratch / PAGE_SIZE;
176 	const unsigned long npages = obj->base.size / PAGE_SIZE;
177 	struct i915_vma *vma;
178 	unsigned long page;
179 	int err;
180 
181 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
182 	if (err) {
183 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
184 		       tile->tiling, tile->stride, err);
185 		return err;
186 	}
187 
188 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
189 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
190 
191 	i915_gem_object_lock(obj);
192 	err = i915_gem_object_set_to_gtt_domain(obj, true);
193 	i915_gem_object_unlock(obj);
194 	if (err) {
195 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
196 		return err;
197 	}
198 
199 	for_each_prime_number_from(page, 1, npages) {
200 		struct i915_ggtt_view view =
201 			compute_partial_view(obj, page, MIN_CHUNK_PAGES);
202 		u32 __iomem *io;
203 		struct page *p;
204 		unsigned int n;
205 		u64 offset;
206 		u32 *cpu;
207 
208 		GEM_BUG_ON(view.partial.size > nreal);
209 		cond_resched();
210 
211 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
212 		if (IS_ERR(vma)) {
213 			pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
214 			       page, (int)PTR_ERR(vma));
215 			return PTR_ERR(vma);
216 		}
217 
218 		n = page - view.partial.offset;
219 		GEM_BUG_ON(n >= view.partial.size);
220 
221 		io = i915_vma_pin_iomap(vma);
222 		i915_vma_unpin(vma);
223 		if (IS_ERR(io)) {
224 			pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
225 			       page, (int)PTR_ERR(io));
226 			return PTR_ERR(io);
227 		}
228 
229 		iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
230 		i915_vma_unpin_iomap(vma);
231 
232 		offset = tiled_offset(tile, page << PAGE_SHIFT);
233 		if (offset >= obj->base.size)
234 			continue;
235 
236 		intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
237 
238 		p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
239 		cpu = kmap(p) + offset_in_page(offset);
240 		drm_clflush_virt_range(cpu, sizeof(*cpu));
241 		if (*cpu != (u32)page) {
242 			pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
243 			       page, n,
244 			       view.partial.offset,
245 			       view.partial.size,
246 			       vma->size >> PAGE_SHIFT,
247 			       tile->tiling ? tile_row_pages(obj) : 0,
248 			       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
249 			       offset >> PAGE_SHIFT,
250 			       (unsigned int)offset_in_page(offset),
251 			       offset,
252 			       (u32)page, *cpu);
253 			err = -EINVAL;
254 		}
255 		*cpu = 0;
256 		drm_clflush_virt_range(cpu, sizeof(*cpu));
257 		kunmap(p);
258 		if (err)
259 			return err;
260 
261 		__i915_vma_put(vma);
262 
263 		if (igt_timeout(end_time,
264 				"%s: timed out after tiling=%d stride=%d\n",
265 				__func__, tile->tiling, tile->stride))
266 			return -EINTR;
267 	}
268 
269 	return 0;
270 }
271 
272 static unsigned int
273 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
274 {
275 	if (INTEL_GEN(i915) <= 2) {
276 		tile->height = 16;
277 		tile->width = 128;
278 		tile->size = 11;
279 	} else if (tile->tiling == I915_TILING_Y &&
280 		   HAS_128_BYTE_Y_TILING(i915)) {
281 		tile->height = 32;
282 		tile->width = 128;
283 		tile->size = 12;
284 	} else {
285 		tile->height = 8;
286 		tile->width = 512;
287 		tile->size = 12;
288 	}
289 
290 	if (INTEL_GEN(i915) < 4)
291 		return 8192 / tile->width;
292 	else if (INTEL_GEN(i915) < 7)
293 		return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
294 	else
295 		return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
296 }
297 
298 static int igt_partial_tiling(void *arg)
299 {
300 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
301 	struct drm_i915_private *i915 = arg;
302 	struct drm_i915_gem_object *obj;
303 	intel_wakeref_t wakeref;
304 	int tiling;
305 	int err;
306 
307 	if (!i915_ggtt_has_aperture(&i915->ggtt))
308 		return 0;
309 
310 	/* We want to check the page mapping and fencing of a large object
311 	 * mmapped through the GTT. The object we create is larger than can
312 	 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
313 	 * We then check that a write through each partial GGTT vma ends up
314 	 * in the right set of pages within the object, and with the expected
315 	 * tiling, which we verify by manual swizzling.
316 	 */
317 
318 	obj = huge_gem_object(i915,
319 			      nreal << PAGE_SHIFT,
320 			      (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
321 	if (IS_ERR(obj))
322 		return PTR_ERR(obj);
323 
324 	err = i915_gem_object_pin_pages(obj);
325 	if (err) {
326 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
327 		       nreal, obj->base.size / PAGE_SIZE, err);
328 		goto out;
329 	}
330 
331 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
332 
333 	if (1) {
334 		IGT_TIMEOUT(end);
335 		struct tile tile;
336 
337 		tile.height = 1;
338 		tile.width = 1;
339 		tile.size = 0;
340 		tile.stride = 0;
341 		tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
342 		tile.tiling = I915_TILING_NONE;
343 
344 		err = check_partial_mappings(obj, &tile, end);
345 		if (err && err != -EINTR)
346 			goto out_unlock;
347 	}
348 
349 	for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
350 		IGT_TIMEOUT(end);
351 		unsigned int max_pitch;
352 		unsigned int pitch;
353 		struct tile tile;
354 
355 		if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
356 			/*
357 			 * The swizzling pattern is actually unknown as it
358 			 * varies based on physical address of each page.
359 			 * See i915_gem_detect_bit_6_swizzle().
360 			 */
361 			break;
362 
363 		tile.tiling = tiling;
364 		switch (tiling) {
365 		case I915_TILING_X:
366 			tile.swizzle = i915->ggtt.bit_6_swizzle_x;
367 			break;
368 		case I915_TILING_Y:
369 			tile.swizzle = i915->ggtt.bit_6_swizzle_y;
370 			break;
371 		}
372 
373 		GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
374 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
375 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
376 			continue;
377 
378 		max_pitch = setup_tile_size(&tile, i915);
379 
380 		for (pitch = max_pitch; pitch; pitch >>= 1) {
381 			tile.stride = tile.width * pitch;
382 			err = check_partial_mappings(obj, &tile, end);
383 			if (err == -EINTR)
384 				goto next_tiling;
385 			if (err)
386 				goto out_unlock;
387 
388 			if (pitch > 2 && INTEL_GEN(i915) >= 4) {
389 				tile.stride = tile.width * (pitch - 1);
390 				err = check_partial_mappings(obj, &tile, end);
391 				if (err == -EINTR)
392 					goto next_tiling;
393 				if (err)
394 					goto out_unlock;
395 			}
396 
397 			if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
398 				tile.stride = tile.width * (pitch + 1);
399 				err = check_partial_mappings(obj, &tile, end);
400 				if (err == -EINTR)
401 					goto next_tiling;
402 				if (err)
403 					goto out_unlock;
404 			}
405 		}
406 
407 		if (INTEL_GEN(i915) >= 4) {
408 			for_each_prime_number(pitch, max_pitch) {
409 				tile.stride = tile.width * pitch;
410 				err = check_partial_mappings(obj, &tile, end);
411 				if (err == -EINTR)
412 					goto next_tiling;
413 				if (err)
414 					goto out_unlock;
415 			}
416 		}
417 
418 next_tiling: ;
419 	}
420 
421 out_unlock:
422 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
423 	i915_gem_object_unpin_pages(obj);
424 out:
425 	i915_gem_object_put(obj);
426 	return err;
427 }
428 
429 static int igt_smoke_tiling(void *arg)
430 {
431 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
432 	struct drm_i915_private *i915 = arg;
433 	struct drm_i915_gem_object *obj;
434 	intel_wakeref_t wakeref;
435 	I915_RND_STATE(prng);
436 	unsigned long count;
437 	IGT_TIMEOUT(end);
438 	int err;
439 
440 	if (!i915_ggtt_has_aperture(&i915->ggtt))
441 		return 0;
442 
443 	/*
444 	 * igt_partial_tiling() does an exhastive check of partial tiling
445 	 * chunking, but will undoubtably run out of time. Here, we do a
446 	 * randomised search and hope over many runs of 1s with different
447 	 * seeds we will do a thorough check.
448 	 *
449 	 * Remember to look at the st_seed if we see a flip-flop in BAT!
450 	 */
451 
452 	if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
453 		return 0;
454 
455 	obj = huge_gem_object(i915,
456 			      nreal << PAGE_SHIFT,
457 			      (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
458 	if (IS_ERR(obj))
459 		return PTR_ERR(obj);
460 
461 	err = i915_gem_object_pin_pages(obj);
462 	if (err) {
463 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
464 		       nreal, obj->base.size / PAGE_SIZE, err);
465 		goto out;
466 	}
467 
468 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
469 
470 	count = 0;
471 	do {
472 		struct tile tile;
473 
474 		tile.tiling =
475 			i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
476 		switch (tile.tiling) {
477 		case I915_TILING_NONE:
478 			tile.height = 1;
479 			tile.width = 1;
480 			tile.size = 0;
481 			tile.stride = 0;
482 			tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
483 			break;
484 
485 		case I915_TILING_X:
486 			tile.swizzle = i915->ggtt.bit_6_swizzle_x;
487 			break;
488 		case I915_TILING_Y:
489 			tile.swizzle = i915->ggtt.bit_6_swizzle_y;
490 			break;
491 		}
492 
493 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
494 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
495 			continue;
496 
497 		if (tile.tiling != I915_TILING_NONE) {
498 			unsigned int max_pitch = setup_tile_size(&tile, i915);
499 
500 			tile.stride =
501 				i915_prandom_u32_max_state(max_pitch, &prng);
502 			tile.stride = (1 + tile.stride) * tile.width;
503 			if (INTEL_GEN(i915) < 4)
504 				tile.stride = rounddown_pow_of_two(tile.stride);
505 		}
506 
507 		err = check_partial_mapping(obj, &tile, &prng);
508 		if (err)
509 			break;
510 
511 		count++;
512 	} while (!__igt_timeout(end, NULL));
513 
514 	pr_info("%s: Completed %lu trials\n", __func__, count);
515 
516 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
517 	i915_gem_object_unpin_pages(obj);
518 out:
519 	i915_gem_object_put(obj);
520 	return err;
521 }
522 
523 static int make_obj_busy(struct drm_i915_gem_object *obj)
524 {
525 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
526 	struct intel_engine_cs *engine;
527 
528 	for_each_uabi_engine(engine, i915) {
529 		struct i915_request *rq;
530 		struct i915_vma *vma;
531 		int err;
532 
533 		vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
534 		if (IS_ERR(vma))
535 			return PTR_ERR(vma);
536 
537 		err = i915_vma_pin(vma, 0, 0, PIN_USER);
538 		if (err)
539 			return err;
540 
541 		rq = intel_engine_create_kernel_request(engine);
542 		if (IS_ERR(rq)) {
543 			i915_vma_unpin(vma);
544 			return PTR_ERR(rq);
545 		}
546 
547 		i915_vma_lock(vma);
548 		err = i915_request_await_object(rq, vma->obj, true);
549 		if (err == 0)
550 			err = i915_vma_move_to_active(vma, rq,
551 						      EXEC_OBJECT_WRITE);
552 		i915_vma_unlock(vma);
553 
554 		i915_request_add(rq);
555 		i915_vma_unpin(vma);
556 		if (err)
557 			return err;
558 	}
559 
560 	i915_gem_object_put(obj); /* leave it only alive via its active ref */
561 	return 0;
562 }
563 
564 static bool assert_mmap_offset(struct drm_i915_private *i915,
565 			       unsigned long size,
566 			       int expected)
567 {
568 	struct drm_i915_gem_object *obj;
569 	struct i915_mmap_offset *mmo;
570 
571 	obj = i915_gem_object_create_internal(i915, size);
572 	if (IS_ERR(obj))
573 		return PTR_ERR(obj);
574 
575 	mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
576 	i915_gem_object_put(obj);
577 
578 	return PTR_ERR_OR_ZERO(mmo) == expected;
579 }
580 
581 static void disable_retire_worker(struct drm_i915_private *i915)
582 {
583 	i915_gem_driver_unregister__shrinker(i915);
584 	intel_gt_pm_get(&i915->gt);
585 	cancel_delayed_work_sync(&i915->gt.requests.retire_work);
586 }
587 
588 static void restore_retire_worker(struct drm_i915_private *i915)
589 {
590 	igt_flush_test(i915);
591 	intel_gt_pm_put(&i915->gt);
592 	i915_gem_driver_register__shrinker(i915);
593 }
594 
595 static void mmap_offset_lock(struct drm_i915_private *i915)
596 	__acquires(&i915->drm.vma_offset_manager->vm_lock)
597 {
598 	write_lock(&i915->drm.vma_offset_manager->vm_lock);
599 }
600 
601 static void mmap_offset_unlock(struct drm_i915_private *i915)
602 	__releases(&i915->drm.vma_offset_manager->vm_lock)
603 {
604 	write_unlock(&i915->drm.vma_offset_manager->vm_lock);
605 }
606 
607 static int igt_mmap_offset_exhaustion(void *arg)
608 {
609 	struct drm_i915_private *i915 = arg;
610 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
611 	struct drm_i915_gem_object *obj;
612 	struct drm_mm_node *hole, *next;
613 	struct i915_mmap_offset *mmo;
614 	int loop, err = 0;
615 
616 	/* Disable background reaper */
617 	disable_retire_worker(i915);
618 	GEM_BUG_ON(!i915->gt.awake);
619 	intel_gt_retire_requests(&i915->gt);
620 	i915_gem_drain_freed_objects(i915);
621 
622 	/* Trim the device mmap space to only a page */
623 	mmap_offset_lock(i915);
624 	loop = 1; /* PAGE_SIZE units */
625 	list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
626 		struct drm_mm_node *resv;
627 
628 		resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
629 		if (!resv) {
630 			err = -ENOMEM;
631 			goto out_park;
632 		}
633 
634 		resv->start = drm_mm_hole_node_start(hole) + loop;
635 		resv->size = hole->hole_size - loop;
636 		resv->color = -1ul;
637 		loop = 0;
638 
639 		if (!resv->size) {
640 			kfree(resv);
641 			continue;
642 		}
643 
644 		pr_debug("Reserving hole [%llx + %llx]\n",
645 			 resv->start, resv->size);
646 
647 		err = drm_mm_reserve_node(mm, resv);
648 		if (err) {
649 			pr_err("Failed to trim VMA manager, err=%d\n", err);
650 			kfree(resv);
651 			goto out_park;
652 		}
653 	}
654 	GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
655 	mmap_offset_unlock(i915);
656 
657 	/* Just fits! */
658 	if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
659 		pr_err("Unable to insert object into single page hole\n");
660 		err = -EINVAL;
661 		goto out;
662 	}
663 
664 	/* Too large */
665 	if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
666 		pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
667 		err = -EINVAL;
668 		goto out;
669 	}
670 
671 	/* Fill the hole, further allocation attempts should then fail */
672 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
673 	if (IS_ERR(obj)) {
674 		err = PTR_ERR(obj);
675 		goto out;
676 	}
677 
678 	mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
679 	if (IS_ERR(mmo)) {
680 		pr_err("Unable to insert object into reclaimed hole\n");
681 		err = PTR_ERR(mmo);
682 		goto err_obj;
683 	}
684 
685 	if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
686 		pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
687 		err = -EINVAL;
688 		goto err_obj;
689 	}
690 
691 	i915_gem_object_put(obj);
692 
693 	/* Now fill with busy dead objects that we expect to reap */
694 	for (loop = 0; loop < 3; loop++) {
695 		if (intel_gt_is_wedged(&i915->gt))
696 			break;
697 
698 		obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
699 		if (IS_ERR(obj)) {
700 			err = PTR_ERR(obj);
701 			goto out;
702 		}
703 
704 		err = make_obj_busy(obj);
705 		if (err) {
706 			pr_err("[loop %d] Failed to busy the object\n", loop);
707 			goto err_obj;
708 		}
709 	}
710 
711 out:
712 	mmap_offset_lock(i915);
713 out_park:
714 	drm_mm_for_each_node_safe(hole, next, mm) {
715 		if (hole->color != -1ul)
716 			continue;
717 
718 		drm_mm_remove_node(hole);
719 		kfree(hole);
720 	}
721 	mmap_offset_unlock(i915);
722 	restore_retire_worker(i915);
723 	return err;
724 err_obj:
725 	i915_gem_object_put(obj);
726 	goto out;
727 }
728 
729 static int gtt_set(struct drm_i915_gem_object *obj)
730 {
731 	struct i915_vma *vma;
732 	void __iomem *map;
733 	int err = 0;
734 
735 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
736 	if (IS_ERR(vma))
737 		return PTR_ERR(vma);
738 
739 	intel_gt_pm_get(vma->vm->gt);
740 	map = i915_vma_pin_iomap(vma);
741 	i915_vma_unpin(vma);
742 	if (IS_ERR(map)) {
743 		err = PTR_ERR(map);
744 		goto out;
745 	}
746 
747 	memset_io(map, POISON_INUSE, obj->base.size);
748 	i915_vma_unpin_iomap(vma);
749 
750 out:
751 	intel_gt_pm_put(vma->vm->gt);
752 	return err;
753 }
754 
755 static int gtt_check(struct drm_i915_gem_object *obj)
756 {
757 	struct i915_vma *vma;
758 	void __iomem *map;
759 	int err = 0;
760 
761 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
762 	if (IS_ERR(vma))
763 		return PTR_ERR(vma);
764 
765 	intel_gt_pm_get(vma->vm->gt);
766 	map = i915_vma_pin_iomap(vma);
767 	i915_vma_unpin(vma);
768 	if (IS_ERR(map)) {
769 		err = PTR_ERR(map);
770 		goto out;
771 	}
772 
773 	if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
774 		pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
775 		       obj->mm.region->name);
776 		err = -EINVAL;
777 	}
778 	i915_vma_unpin_iomap(vma);
779 
780 out:
781 	intel_gt_pm_put(vma->vm->gt);
782 	return err;
783 }
784 
785 static int wc_set(struct drm_i915_gem_object *obj)
786 {
787 	void *vaddr;
788 
789 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
790 	if (IS_ERR(vaddr))
791 		return PTR_ERR(vaddr);
792 
793 	memset(vaddr, POISON_INUSE, obj->base.size);
794 	i915_gem_object_flush_map(obj);
795 	i915_gem_object_unpin_map(obj);
796 
797 	return 0;
798 }
799 
800 static int wc_check(struct drm_i915_gem_object *obj)
801 {
802 	void *vaddr;
803 	int err = 0;
804 
805 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
806 	if (IS_ERR(vaddr))
807 		return PTR_ERR(vaddr);
808 
809 	if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
810 		pr_err("%s: Write via mmap did not land in backing store (WC)\n",
811 		       obj->mm.region->name);
812 		err = -EINVAL;
813 	}
814 	i915_gem_object_unpin_map(obj);
815 
816 	return err;
817 }
818 
819 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
820 {
821 	if (type == I915_MMAP_TYPE_GTT &&
822 	    !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
823 		return false;
824 
825 	if (type != I915_MMAP_TYPE_GTT &&
826 	    !i915_gem_object_type_has(obj,
827 				      I915_GEM_OBJECT_HAS_STRUCT_PAGE |
828 				      I915_GEM_OBJECT_HAS_IOMEM))
829 		return false;
830 
831 	return true;
832 }
833 
834 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
835 static int __igt_mmap(struct drm_i915_private *i915,
836 		      struct drm_i915_gem_object *obj,
837 		      enum i915_mmap_type type)
838 {
839 	struct i915_mmap_offset *mmo;
840 	struct vm_area_struct *area;
841 	unsigned long addr;
842 	int err, i;
843 
844 	if (!can_mmap(obj, type))
845 		return 0;
846 
847 	err = wc_set(obj);
848 	if (err == -ENXIO)
849 		err = gtt_set(obj);
850 	if (err)
851 		return err;
852 
853 	mmo = mmap_offset_attach(obj, type, NULL);
854 	if (IS_ERR(mmo))
855 		return PTR_ERR(mmo);
856 
857 	addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
858 	if (IS_ERR_VALUE(addr))
859 		return addr;
860 
861 	pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
862 
863 	area = find_vma(current->mm, addr);
864 	if (!area) {
865 		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
866 		       obj->mm.region->name);
867 		err = -EINVAL;
868 		goto out_unmap;
869 	}
870 
871 	if (area->vm_private_data != mmo) {
872 		pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n",
873 		       obj->mm.region->name);
874 		err = -EINVAL;
875 		goto out_unmap;
876 	}
877 
878 	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
879 		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
880 		u32 x;
881 
882 		if (get_user(x, ux)) {
883 			pr_err("%s: Unable to read from mmap, offset:%zd\n",
884 			       obj->mm.region->name, i * sizeof(x));
885 			err = -EFAULT;
886 			goto out_unmap;
887 		}
888 
889 		if (x != expand32(POISON_INUSE)) {
890 			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
891 			       obj->mm.region->name,
892 			       i * sizeof(x), x, expand32(POISON_INUSE));
893 			err = -EINVAL;
894 			goto out_unmap;
895 		}
896 
897 		x = expand32(POISON_FREE);
898 		if (put_user(x, ux)) {
899 			pr_err("%s: Unable to write to mmap, offset:%zd\n",
900 			       obj->mm.region->name, i * sizeof(x));
901 			err = -EFAULT;
902 			goto out_unmap;
903 		}
904 	}
905 
906 	if (type == I915_MMAP_TYPE_GTT)
907 		intel_gt_flush_ggtt_writes(&i915->gt);
908 
909 	err = wc_check(obj);
910 	if (err == -ENXIO)
911 		err = gtt_check(obj);
912 out_unmap:
913 	vm_munmap(addr, obj->base.size);
914 	return err;
915 }
916 
917 static int igt_mmap(void *arg)
918 {
919 	struct drm_i915_private *i915 = arg;
920 	struct intel_memory_region *mr;
921 	enum intel_region_id id;
922 
923 	for_each_memory_region(mr, i915, id) {
924 		unsigned long sizes[] = {
925 			PAGE_SIZE,
926 			mr->min_page_size,
927 			SZ_4M,
928 		};
929 		int i;
930 
931 		for (i = 0; i < ARRAY_SIZE(sizes); i++) {
932 			struct drm_i915_gem_object *obj;
933 			int err;
934 
935 			obj = i915_gem_object_create_region(mr, sizes[i], 0);
936 			if (obj == ERR_PTR(-ENODEV))
937 				continue;
938 
939 			if (IS_ERR(obj))
940 				return PTR_ERR(obj);
941 
942 			err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
943 			if (err == 0)
944 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
945 
946 			i915_gem_object_put(obj);
947 			if (err)
948 				return err;
949 		}
950 	}
951 
952 	return 0;
953 }
954 
955 static int __igt_mmap_gpu(struct drm_i915_private *i915,
956 			  struct drm_i915_gem_object *obj,
957 			  enum i915_mmap_type type)
958 {
959 	struct intel_engine_cs *engine;
960 	struct i915_mmap_offset *mmo;
961 	unsigned long addr;
962 	u32 __user *ux;
963 	u32 bbe;
964 	int err;
965 
966 	/*
967 	 * Verify that the mmap access into the backing store aligns with
968 	 * that of the GPU, i.e. that mmap is indeed writing into the same
969 	 * page as being read by the GPU.
970 	 */
971 
972 	if (!can_mmap(obj, type))
973 		return 0;
974 
975 	err = wc_set(obj);
976 	if (err == -ENXIO)
977 		err = gtt_set(obj);
978 	if (err)
979 		return err;
980 
981 	mmo = mmap_offset_attach(obj, type, NULL);
982 	if (IS_ERR(mmo))
983 		return PTR_ERR(mmo);
984 
985 	addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
986 	if (IS_ERR_VALUE(addr))
987 		return addr;
988 
989 	ux = u64_to_user_ptr((u64)addr);
990 	bbe = MI_BATCH_BUFFER_END;
991 	if (put_user(bbe, ux)) {
992 		pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
993 		err = -EFAULT;
994 		goto out_unmap;
995 	}
996 
997 	if (type == I915_MMAP_TYPE_GTT)
998 		intel_gt_flush_ggtt_writes(&i915->gt);
999 
1000 	for_each_uabi_engine(engine, i915) {
1001 		struct i915_request *rq;
1002 		struct i915_vma *vma;
1003 
1004 		vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1005 		if (IS_ERR(vma)) {
1006 			err = PTR_ERR(vma);
1007 			goto out_unmap;
1008 		}
1009 
1010 		err = i915_vma_pin(vma, 0, 0, PIN_USER);
1011 		if (err)
1012 			goto out_unmap;
1013 
1014 		rq = i915_request_create(engine->kernel_context);
1015 		if (IS_ERR(rq)) {
1016 			err = PTR_ERR(rq);
1017 			goto out_unpin;
1018 		}
1019 
1020 		i915_vma_lock(vma);
1021 		err = i915_request_await_object(rq, vma->obj, false);
1022 		if (err == 0)
1023 			err = i915_vma_move_to_active(vma, rq, 0);
1024 		i915_vma_unlock(vma);
1025 
1026 		err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1027 		i915_request_get(rq);
1028 		i915_request_add(rq);
1029 
1030 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1031 			struct drm_printer p =
1032 				drm_info_printer(engine->i915->drm.dev);
1033 
1034 			pr_err("%s(%s, %s): Failed to execute batch\n",
1035 			       __func__, engine->name, obj->mm.region->name);
1036 			intel_engine_dump(engine, &p,
1037 					  "%s\n", engine->name);
1038 
1039 			intel_gt_set_wedged(engine->gt);
1040 			err = -EIO;
1041 		}
1042 		i915_request_put(rq);
1043 
1044 out_unpin:
1045 		i915_vma_unpin(vma);
1046 		if (err)
1047 			goto out_unmap;
1048 	}
1049 
1050 out_unmap:
1051 	vm_munmap(addr, obj->base.size);
1052 	return err;
1053 }
1054 
1055 static int igt_mmap_gpu(void *arg)
1056 {
1057 	struct drm_i915_private *i915 = arg;
1058 	struct intel_memory_region *mr;
1059 	enum intel_region_id id;
1060 
1061 	for_each_memory_region(mr, i915, id) {
1062 		struct drm_i915_gem_object *obj;
1063 		int err;
1064 
1065 		obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
1066 		if (obj == ERR_PTR(-ENODEV))
1067 			continue;
1068 
1069 		if (IS_ERR(obj))
1070 			return PTR_ERR(obj);
1071 
1072 		err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1073 		if (err == 0)
1074 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1075 
1076 		i915_gem_object_put(obj);
1077 		if (err)
1078 			return err;
1079 	}
1080 
1081 	return 0;
1082 }
1083 
1084 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1085 {
1086 	if (!pte_present(*pte) || pte_none(*pte)) {
1087 		pr_err("missing PTE:%lx\n",
1088 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1089 		return -EINVAL;
1090 	}
1091 
1092 	return 0;
1093 }
1094 
1095 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1096 {
1097 	if (pte_present(*pte) && !pte_none(*pte)) {
1098 		pr_err("present PTE:%lx; expected to be revoked\n",
1099 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1100 		return -EINVAL;
1101 	}
1102 
1103 	return 0;
1104 }
1105 
1106 static int check_present(unsigned long addr, unsigned long len)
1107 {
1108 	return apply_to_page_range(current->mm, addr, len,
1109 				   check_present_pte, (void *)addr);
1110 }
1111 
1112 static int check_absent(unsigned long addr, unsigned long len)
1113 {
1114 	return apply_to_page_range(current->mm, addr, len,
1115 				   check_absent_pte, (void *)addr);
1116 }
1117 
1118 static int prefault_range(u64 start, u64 len)
1119 {
1120 	const char __user *addr, *end;
1121 	char __maybe_unused c;
1122 	int err;
1123 
1124 	addr = u64_to_user_ptr(start);
1125 	end = addr + len;
1126 
1127 	for (; addr < end; addr += PAGE_SIZE) {
1128 		err = __get_user(c, addr);
1129 		if (err)
1130 			return err;
1131 	}
1132 
1133 	return __get_user(c, end - 1);
1134 }
1135 
1136 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1137 			     struct drm_i915_gem_object *obj,
1138 			     enum i915_mmap_type type)
1139 {
1140 	struct i915_mmap_offset *mmo;
1141 	unsigned long addr;
1142 	int err;
1143 
1144 	if (!can_mmap(obj, type))
1145 		return 0;
1146 
1147 	mmo = mmap_offset_attach(obj, type, NULL);
1148 	if (IS_ERR(mmo))
1149 		return PTR_ERR(mmo);
1150 
1151 	addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
1152 	if (IS_ERR_VALUE(addr))
1153 		return addr;
1154 
1155 	err = prefault_range(addr, obj->base.size);
1156 	if (err)
1157 		goto out_unmap;
1158 
1159 	GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
1160 		   !atomic_read(&obj->bind_count));
1161 
1162 	err = check_present(addr, obj->base.size);
1163 	if (err) {
1164 		pr_err("%s: was not present\n", obj->mm.region->name);
1165 		goto out_unmap;
1166 	}
1167 
1168 	/*
1169 	 * After unbinding the object from the GGTT, its address may be reused
1170 	 * for other objects. Ergo we have to revoke the previous mmap PTE
1171 	 * access as it no longer points to the same object.
1172 	 */
1173 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1174 	if (err) {
1175 		pr_err("Failed to unbind object!\n");
1176 		goto out_unmap;
1177 	}
1178 	GEM_BUG_ON(atomic_read(&obj->bind_count));
1179 
1180 	if (type != I915_MMAP_TYPE_GTT) {
1181 		__i915_gem_object_put_pages(obj);
1182 		if (i915_gem_object_has_pages(obj)) {
1183 			pr_err("Failed to put-pages object!\n");
1184 			err = -EINVAL;
1185 			goto out_unmap;
1186 		}
1187 	}
1188 
1189 	err = check_absent(addr, obj->base.size);
1190 	if (err) {
1191 		pr_err("%s: was not absent\n", obj->mm.region->name);
1192 		goto out_unmap;
1193 	}
1194 
1195 out_unmap:
1196 	vm_munmap(addr, obj->base.size);
1197 	return err;
1198 }
1199 
1200 static int igt_mmap_revoke(void *arg)
1201 {
1202 	struct drm_i915_private *i915 = arg;
1203 	struct intel_memory_region *mr;
1204 	enum intel_region_id id;
1205 
1206 	for_each_memory_region(mr, i915, id) {
1207 		struct drm_i915_gem_object *obj;
1208 		int err;
1209 
1210 		obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
1211 		if (obj == ERR_PTR(-ENODEV))
1212 			continue;
1213 
1214 		if (IS_ERR(obj))
1215 			return PTR_ERR(obj);
1216 
1217 		err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1218 		if (err == 0)
1219 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1220 
1221 		i915_gem_object_put(obj);
1222 		if (err)
1223 			return err;
1224 	}
1225 
1226 	return 0;
1227 }
1228 
1229 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1230 {
1231 	static const struct i915_subtest tests[] = {
1232 		SUBTEST(igt_partial_tiling),
1233 		SUBTEST(igt_smoke_tiling),
1234 		SUBTEST(igt_mmap_offset_exhaustion),
1235 		SUBTEST(igt_mmap),
1236 		SUBTEST(igt_mmap_revoke),
1237 		SUBTEST(igt_mmap_gpu),
1238 	};
1239 
1240 	return i915_subtests(tests, i915);
1241 }
1242