1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_gpu_commands.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_pm.h"
13 #include "gem/i915_gem_region.h"
14 #include "huge_gem_object.h"
15 #include "i915_selftest.h"
16 #include "selftests/i915_random.h"
17 #include "selftests/igt_flush_test.h"
18 #include "selftests/igt_mmap.h"
19 
20 struct tile {
21 	unsigned int width;
22 	unsigned int height;
23 	unsigned int stride;
24 	unsigned int size;
25 	unsigned int tiling;
26 	unsigned int swizzle;
27 };
28 
29 static u64 swizzle_bit(unsigned int bit, u64 offset)
30 {
31 	return (offset & BIT_ULL(bit)) >> (bit - 6);
32 }
33 
34 static u64 tiled_offset(const struct tile *tile, u64 v)
35 {
36 	u64 x, y;
37 
38 	if (tile->tiling == I915_TILING_NONE)
39 		return v;
40 
41 	y = div64_u64_rem(v, tile->stride, &x);
42 	v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
43 
44 	if (tile->tiling == I915_TILING_X) {
45 		v += y * tile->width;
46 		v += div64_u64_rem(x, tile->width, &x) << tile->size;
47 		v += x;
48 	} else if (tile->width == 128) {
49 		const unsigned int ytile_span = 16;
50 		const unsigned int ytile_height = 512;
51 
52 		v += y * ytile_span;
53 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
54 		v += x;
55 	} else {
56 		const unsigned int ytile_span = 32;
57 		const unsigned int ytile_height = 256;
58 
59 		v += y * ytile_span;
60 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
61 		v += x;
62 	}
63 
64 	switch (tile->swizzle) {
65 	case I915_BIT_6_SWIZZLE_9:
66 		v ^= swizzle_bit(9, v);
67 		break;
68 	case I915_BIT_6_SWIZZLE_9_10:
69 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
70 		break;
71 	case I915_BIT_6_SWIZZLE_9_11:
72 		v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
73 		break;
74 	case I915_BIT_6_SWIZZLE_9_10_11:
75 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
76 		break;
77 	}
78 
79 	return v;
80 }
81 
82 static int check_partial_mapping(struct drm_i915_gem_object *obj,
83 				 const struct tile *tile,
84 				 struct rnd_state *prng)
85 {
86 	const unsigned long npages = obj->base.size / PAGE_SIZE;
87 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
88 	struct i915_ggtt_view view;
89 	struct i915_vma *vma;
90 	unsigned long page;
91 	u32 __iomem *io;
92 	struct page *p;
93 	unsigned int n;
94 	u64 offset;
95 	u32 *cpu;
96 	int err;
97 
98 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
99 	if (err) {
100 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
101 		       tile->tiling, tile->stride, err);
102 		return err;
103 	}
104 
105 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
106 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
107 
108 	i915_gem_object_lock(obj, NULL);
109 	err = i915_gem_object_set_to_gtt_domain(obj, true);
110 	i915_gem_object_unlock(obj);
111 	if (err) {
112 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
113 		return err;
114 	}
115 
116 	page = i915_prandom_u32_max_state(npages, prng);
117 	view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
118 
119 	vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
120 	if (IS_ERR(vma)) {
121 		pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
122 		       page, (int)PTR_ERR(vma));
123 		return PTR_ERR(vma);
124 	}
125 
126 	n = page - view.partial.offset;
127 	GEM_BUG_ON(n >= view.partial.size);
128 
129 	io = i915_vma_pin_iomap(vma);
130 	i915_vma_unpin(vma);
131 	if (IS_ERR(io)) {
132 		pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
133 		       page, (int)PTR_ERR(io));
134 		err = PTR_ERR(io);
135 		goto out;
136 	}
137 
138 	iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
139 	i915_vma_unpin_iomap(vma);
140 
141 	offset = tiled_offset(tile, page << PAGE_SHIFT);
142 	if (offset >= obj->base.size)
143 		goto out;
144 
145 	intel_gt_flush_ggtt_writes(to_gt(i915));
146 
147 	p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
148 	cpu = kmap(p) + offset_in_page(offset);
149 	drm_clflush_virt_range(cpu, sizeof(*cpu));
150 	if (*cpu != (u32)page) {
151 		pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
152 		       page, n,
153 		       view.partial.offset,
154 		       view.partial.size,
155 		       vma->size >> PAGE_SHIFT,
156 		       tile->tiling ? tile_row_pages(obj) : 0,
157 		       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
158 		       offset >> PAGE_SHIFT,
159 		       (unsigned int)offset_in_page(offset),
160 		       offset,
161 		       (u32)page, *cpu);
162 		err = -EINVAL;
163 	}
164 	*cpu = 0;
165 	drm_clflush_virt_range(cpu, sizeof(*cpu));
166 	kunmap(p);
167 
168 out:
169 	i915_gem_object_lock(obj, NULL);
170 	__i915_vma_put(vma);
171 	i915_gem_object_unlock(obj);
172 	return err;
173 }
174 
175 static int check_partial_mappings(struct drm_i915_gem_object *obj,
176 				  const struct tile *tile,
177 				  unsigned long end_time)
178 {
179 	const unsigned int nreal = obj->scratch / PAGE_SIZE;
180 	const unsigned long npages = obj->base.size / PAGE_SIZE;
181 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
182 	struct i915_vma *vma;
183 	unsigned long page;
184 	int err;
185 
186 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
187 	if (err) {
188 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
189 		       tile->tiling, tile->stride, err);
190 		return err;
191 	}
192 
193 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
194 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
195 
196 	i915_gem_object_lock(obj, NULL);
197 	err = i915_gem_object_set_to_gtt_domain(obj, true);
198 	i915_gem_object_unlock(obj);
199 	if (err) {
200 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
201 		return err;
202 	}
203 
204 	for_each_prime_number_from(page, 1, npages) {
205 		struct i915_ggtt_view view =
206 			compute_partial_view(obj, page, MIN_CHUNK_PAGES);
207 		u32 __iomem *io;
208 		struct page *p;
209 		unsigned int n;
210 		u64 offset;
211 		u32 *cpu;
212 
213 		GEM_BUG_ON(view.partial.size > nreal);
214 		cond_resched();
215 
216 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
217 		if (IS_ERR(vma)) {
218 			pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
219 			       page, (int)PTR_ERR(vma));
220 			return PTR_ERR(vma);
221 		}
222 
223 		n = page - view.partial.offset;
224 		GEM_BUG_ON(n >= view.partial.size);
225 
226 		io = i915_vma_pin_iomap(vma);
227 		i915_vma_unpin(vma);
228 		if (IS_ERR(io)) {
229 			pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
230 			       page, (int)PTR_ERR(io));
231 			return PTR_ERR(io);
232 		}
233 
234 		iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
235 		i915_vma_unpin_iomap(vma);
236 
237 		offset = tiled_offset(tile, page << PAGE_SHIFT);
238 		if (offset >= obj->base.size)
239 			continue;
240 
241 		intel_gt_flush_ggtt_writes(to_gt(i915));
242 
243 		p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
244 		cpu = kmap(p) + offset_in_page(offset);
245 		drm_clflush_virt_range(cpu, sizeof(*cpu));
246 		if (*cpu != (u32)page) {
247 			pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
248 			       page, n,
249 			       view.partial.offset,
250 			       view.partial.size,
251 			       vma->size >> PAGE_SHIFT,
252 			       tile->tiling ? tile_row_pages(obj) : 0,
253 			       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
254 			       offset >> PAGE_SHIFT,
255 			       (unsigned int)offset_in_page(offset),
256 			       offset,
257 			       (u32)page, *cpu);
258 			err = -EINVAL;
259 		}
260 		*cpu = 0;
261 		drm_clflush_virt_range(cpu, sizeof(*cpu));
262 		kunmap(p);
263 		if (err)
264 			return err;
265 
266 		i915_gem_object_lock(obj, NULL);
267 		__i915_vma_put(vma);
268 		i915_gem_object_unlock(obj);
269 
270 		if (igt_timeout(end_time,
271 				"%s: timed out after tiling=%d stride=%d\n",
272 				__func__, tile->tiling, tile->stride))
273 			return -EINTR;
274 	}
275 
276 	return 0;
277 }
278 
279 static unsigned int
280 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
281 {
282 	if (GRAPHICS_VER(i915) <= 2) {
283 		tile->height = 16;
284 		tile->width = 128;
285 		tile->size = 11;
286 	} else if (tile->tiling == I915_TILING_Y &&
287 		   HAS_128_BYTE_Y_TILING(i915)) {
288 		tile->height = 32;
289 		tile->width = 128;
290 		tile->size = 12;
291 	} else {
292 		tile->height = 8;
293 		tile->width = 512;
294 		tile->size = 12;
295 	}
296 
297 	if (GRAPHICS_VER(i915) < 4)
298 		return 8192 / tile->width;
299 	else if (GRAPHICS_VER(i915) < 7)
300 		return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
301 	else
302 		return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
303 }
304 
305 static int igt_partial_tiling(void *arg)
306 {
307 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
308 	struct drm_i915_private *i915 = arg;
309 	struct drm_i915_gem_object *obj;
310 	intel_wakeref_t wakeref;
311 	int tiling;
312 	int err;
313 
314 	if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
315 		return 0;
316 
317 	/* We want to check the page mapping and fencing of a large object
318 	 * mmapped through the GTT. The object we create is larger than can
319 	 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
320 	 * We then check that a write through each partial GGTT vma ends up
321 	 * in the right set of pages within the object, and with the expected
322 	 * tiling, which we verify by manual swizzling.
323 	 */
324 
325 	obj = huge_gem_object(i915,
326 			      nreal << PAGE_SHIFT,
327 			      (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
328 	if (IS_ERR(obj))
329 		return PTR_ERR(obj);
330 
331 	err = i915_gem_object_pin_pages_unlocked(obj);
332 	if (err) {
333 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
334 		       nreal, obj->base.size / PAGE_SIZE, err);
335 		goto out;
336 	}
337 
338 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
339 
340 	if (1) {
341 		IGT_TIMEOUT(end);
342 		struct tile tile;
343 
344 		tile.height = 1;
345 		tile.width = 1;
346 		tile.size = 0;
347 		tile.stride = 0;
348 		tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
349 		tile.tiling = I915_TILING_NONE;
350 
351 		err = check_partial_mappings(obj, &tile, end);
352 		if (err && err != -EINTR)
353 			goto out_unlock;
354 	}
355 
356 	for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
357 		IGT_TIMEOUT(end);
358 		unsigned int max_pitch;
359 		unsigned int pitch;
360 		struct tile tile;
361 
362 		if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
363 			/*
364 			 * The swizzling pattern is actually unknown as it
365 			 * varies based on physical address of each page.
366 			 * See i915_gem_detect_bit_6_swizzle().
367 			 */
368 			break;
369 
370 		tile.tiling = tiling;
371 		switch (tiling) {
372 		case I915_TILING_X:
373 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
374 			break;
375 		case I915_TILING_Y:
376 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
377 			break;
378 		}
379 
380 		GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
381 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
382 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
383 			continue;
384 
385 		max_pitch = setup_tile_size(&tile, i915);
386 
387 		for (pitch = max_pitch; pitch; pitch >>= 1) {
388 			tile.stride = tile.width * pitch;
389 			err = check_partial_mappings(obj, &tile, end);
390 			if (err == -EINTR)
391 				goto next_tiling;
392 			if (err)
393 				goto out_unlock;
394 
395 			if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
396 				tile.stride = tile.width * (pitch - 1);
397 				err = check_partial_mappings(obj, &tile, end);
398 				if (err == -EINTR)
399 					goto next_tiling;
400 				if (err)
401 					goto out_unlock;
402 			}
403 
404 			if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
405 				tile.stride = tile.width * (pitch + 1);
406 				err = check_partial_mappings(obj, &tile, end);
407 				if (err == -EINTR)
408 					goto next_tiling;
409 				if (err)
410 					goto out_unlock;
411 			}
412 		}
413 
414 		if (GRAPHICS_VER(i915) >= 4) {
415 			for_each_prime_number(pitch, max_pitch) {
416 				tile.stride = tile.width * pitch;
417 				err = check_partial_mappings(obj, &tile, end);
418 				if (err == -EINTR)
419 					goto next_tiling;
420 				if (err)
421 					goto out_unlock;
422 			}
423 		}
424 
425 next_tiling: ;
426 	}
427 
428 out_unlock:
429 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
430 	i915_gem_object_unpin_pages(obj);
431 out:
432 	i915_gem_object_put(obj);
433 	return err;
434 }
435 
436 static int igt_smoke_tiling(void *arg)
437 {
438 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
439 	struct drm_i915_private *i915 = arg;
440 	struct drm_i915_gem_object *obj;
441 	intel_wakeref_t wakeref;
442 	I915_RND_STATE(prng);
443 	unsigned long count;
444 	IGT_TIMEOUT(end);
445 	int err;
446 
447 	if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
448 		return 0;
449 
450 	/*
451 	 * igt_partial_tiling() does an exhastive check of partial tiling
452 	 * chunking, but will undoubtably run out of time. Here, we do a
453 	 * randomised search and hope over many runs of 1s with different
454 	 * seeds we will do a thorough check.
455 	 *
456 	 * Remember to look at the st_seed if we see a flip-flop in BAT!
457 	 */
458 
459 	if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
460 		return 0;
461 
462 	obj = huge_gem_object(i915,
463 			      nreal << PAGE_SHIFT,
464 			      (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
465 	if (IS_ERR(obj))
466 		return PTR_ERR(obj);
467 
468 	err = i915_gem_object_pin_pages_unlocked(obj);
469 	if (err) {
470 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
471 		       nreal, obj->base.size / PAGE_SIZE, err);
472 		goto out;
473 	}
474 
475 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
476 
477 	count = 0;
478 	do {
479 		struct tile tile;
480 
481 		tile.tiling =
482 			i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
483 		switch (tile.tiling) {
484 		case I915_TILING_NONE:
485 			tile.height = 1;
486 			tile.width = 1;
487 			tile.size = 0;
488 			tile.stride = 0;
489 			tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
490 			break;
491 
492 		case I915_TILING_X:
493 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
494 			break;
495 		case I915_TILING_Y:
496 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
497 			break;
498 		}
499 
500 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
501 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
502 			continue;
503 
504 		if (tile.tiling != I915_TILING_NONE) {
505 			unsigned int max_pitch = setup_tile_size(&tile, i915);
506 
507 			tile.stride =
508 				i915_prandom_u32_max_state(max_pitch, &prng);
509 			tile.stride = (1 + tile.stride) * tile.width;
510 			if (GRAPHICS_VER(i915) < 4)
511 				tile.stride = rounddown_pow_of_two(tile.stride);
512 		}
513 
514 		err = check_partial_mapping(obj, &tile, &prng);
515 		if (err)
516 			break;
517 
518 		count++;
519 	} while (!__igt_timeout(end, NULL));
520 
521 	pr_info("%s: Completed %lu trials\n", __func__, count);
522 
523 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
524 	i915_gem_object_unpin_pages(obj);
525 out:
526 	i915_gem_object_put(obj);
527 	return err;
528 }
529 
530 static int make_obj_busy(struct drm_i915_gem_object *obj)
531 {
532 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
533 	struct intel_engine_cs *engine;
534 
535 	for_each_uabi_engine(engine, i915) {
536 		struct i915_request *rq;
537 		struct i915_vma *vma;
538 		struct i915_gem_ww_ctx ww;
539 		int err;
540 
541 		vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
542 		if (IS_ERR(vma))
543 			return PTR_ERR(vma);
544 
545 		i915_gem_ww_ctx_init(&ww, false);
546 retry:
547 		err = i915_gem_object_lock(obj, &ww);
548 		if (!err)
549 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
550 		if (err)
551 			goto err;
552 
553 		rq = intel_engine_create_kernel_request(engine);
554 		if (IS_ERR(rq)) {
555 			err = PTR_ERR(rq);
556 			goto err_unpin;
557 		}
558 
559 		err = i915_request_await_object(rq, vma->obj, true);
560 		if (err == 0)
561 			err = i915_vma_move_to_active(vma, rq,
562 						      EXEC_OBJECT_WRITE);
563 
564 		i915_request_add(rq);
565 err_unpin:
566 		i915_vma_unpin(vma);
567 err:
568 		if (err == -EDEADLK) {
569 			err = i915_gem_ww_ctx_backoff(&ww);
570 			if (!err)
571 				goto retry;
572 		}
573 		i915_gem_ww_ctx_fini(&ww);
574 		if (err)
575 			return err;
576 	}
577 
578 	i915_gem_object_put(obj); /* leave it only alive via its active ref */
579 	return 0;
580 }
581 
582 static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
583 {
584 	if (HAS_LMEM(i915))
585 		return I915_MMAP_TYPE_FIXED;
586 
587 	return I915_MMAP_TYPE_GTT;
588 }
589 
590 static struct drm_i915_gem_object *
591 create_sys_or_internal(struct drm_i915_private *i915,
592 		       unsigned long size)
593 {
594 	if (HAS_LMEM(i915)) {
595 		struct intel_memory_region *sys_region =
596 			i915->mm.regions[INTEL_REGION_SMEM];
597 
598 		return __i915_gem_object_create_user(i915, size, &sys_region, 1);
599 	}
600 
601 	return i915_gem_object_create_internal(i915, size);
602 }
603 
604 static bool assert_mmap_offset(struct drm_i915_private *i915,
605 			       unsigned long size,
606 			       int expected)
607 {
608 	struct drm_i915_gem_object *obj;
609 	u64 offset;
610 	int ret;
611 
612 	obj = create_sys_or_internal(i915, size);
613 	if (IS_ERR(obj))
614 		return expected && expected == PTR_ERR(obj);
615 
616 	ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
617 	i915_gem_object_put(obj);
618 
619 	return ret == expected;
620 }
621 
622 static void disable_retire_worker(struct drm_i915_private *i915)
623 {
624 	i915_gem_driver_unregister__shrinker(i915);
625 	intel_gt_pm_get(to_gt(i915));
626 	cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
627 }
628 
629 static void restore_retire_worker(struct drm_i915_private *i915)
630 {
631 	igt_flush_test(i915);
632 	intel_gt_pm_put(to_gt(i915));
633 	i915_gem_driver_register__shrinker(i915);
634 }
635 
636 static void mmap_offset_lock(struct drm_i915_private *i915)
637 	__acquires(&i915->drm.vma_offset_manager->vm_lock)
638 {
639 	write_lock(&i915->drm.vma_offset_manager->vm_lock);
640 }
641 
642 static void mmap_offset_unlock(struct drm_i915_private *i915)
643 	__releases(&i915->drm.vma_offset_manager->vm_lock)
644 {
645 	write_unlock(&i915->drm.vma_offset_manager->vm_lock);
646 }
647 
648 static int igt_mmap_offset_exhaustion(void *arg)
649 {
650 	struct drm_i915_private *i915 = arg;
651 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
652 	struct drm_i915_gem_object *obj;
653 	struct drm_mm_node *hole, *next;
654 	int loop, err = 0;
655 	u64 offset;
656 	int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
657 
658 	/* Disable background reaper */
659 	disable_retire_worker(i915);
660 	GEM_BUG_ON(!to_gt(i915)->awake);
661 	intel_gt_retire_requests(to_gt(i915));
662 	i915_gem_drain_freed_objects(i915);
663 
664 	/* Trim the device mmap space to only a page */
665 	mmap_offset_lock(i915);
666 	loop = 1; /* PAGE_SIZE units */
667 	list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
668 		struct drm_mm_node *resv;
669 
670 		resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
671 		if (!resv) {
672 			err = -ENOMEM;
673 			goto out_park;
674 		}
675 
676 		resv->start = drm_mm_hole_node_start(hole) + loop;
677 		resv->size = hole->hole_size - loop;
678 		resv->color = -1ul;
679 		loop = 0;
680 
681 		if (!resv->size) {
682 			kfree(resv);
683 			continue;
684 		}
685 
686 		pr_debug("Reserving hole [%llx + %llx]\n",
687 			 resv->start, resv->size);
688 
689 		err = drm_mm_reserve_node(mm, resv);
690 		if (err) {
691 			pr_err("Failed to trim VMA manager, err=%d\n", err);
692 			kfree(resv);
693 			goto out_park;
694 		}
695 	}
696 	GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
697 	mmap_offset_unlock(i915);
698 
699 	/* Just fits! */
700 	if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
701 		pr_err("Unable to insert object into single page hole\n");
702 		err = -EINVAL;
703 		goto out;
704 	}
705 
706 	/* Too large */
707 	if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
708 		pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
709 		err = -EINVAL;
710 		goto out;
711 	}
712 
713 	/* Fill the hole, further allocation attempts should then fail */
714 	obj = create_sys_or_internal(i915, PAGE_SIZE);
715 	if (IS_ERR(obj)) {
716 		err = PTR_ERR(obj);
717 		pr_err("Unable to create object for reclaimed hole\n");
718 		goto out;
719 	}
720 
721 	err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
722 	if (err) {
723 		pr_err("Unable to insert object into reclaimed hole\n");
724 		goto err_obj;
725 	}
726 
727 	if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
728 		pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
729 		err = -EINVAL;
730 		goto err_obj;
731 	}
732 
733 	i915_gem_object_put(obj);
734 
735 	/* Now fill with busy dead objects that we expect to reap */
736 	for (loop = 0; loop < 3; loop++) {
737 		if (intel_gt_is_wedged(to_gt(i915)))
738 			break;
739 
740 		obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
741 		if (IS_ERR(obj)) {
742 			err = PTR_ERR(obj);
743 			goto out;
744 		}
745 
746 		err = make_obj_busy(obj);
747 		if (err) {
748 			pr_err("[loop %d] Failed to busy the object\n", loop);
749 			goto err_obj;
750 		}
751 	}
752 
753 out:
754 	mmap_offset_lock(i915);
755 out_park:
756 	drm_mm_for_each_node_safe(hole, next, mm) {
757 		if (hole->color != -1ul)
758 			continue;
759 
760 		drm_mm_remove_node(hole);
761 		kfree(hole);
762 	}
763 	mmap_offset_unlock(i915);
764 	restore_retire_worker(i915);
765 	return err;
766 err_obj:
767 	i915_gem_object_put(obj);
768 	goto out;
769 }
770 
771 static int gtt_set(struct drm_i915_gem_object *obj)
772 {
773 	struct i915_vma *vma;
774 	void __iomem *map;
775 	int err = 0;
776 
777 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
778 	if (IS_ERR(vma))
779 		return PTR_ERR(vma);
780 
781 	intel_gt_pm_get(vma->vm->gt);
782 	map = i915_vma_pin_iomap(vma);
783 	i915_vma_unpin(vma);
784 	if (IS_ERR(map)) {
785 		err = PTR_ERR(map);
786 		goto out;
787 	}
788 
789 	memset_io(map, POISON_INUSE, obj->base.size);
790 	i915_vma_unpin_iomap(vma);
791 
792 out:
793 	intel_gt_pm_put(vma->vm->gt);
794 	return err;
795 }
796 
797 static int gtt_check(struct drm_i915_gem_object *obj)
798 {
799 	struct i915_vma *vma;
800 	void __iomem *map;
801 	int err = 0;
802 
803 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
804 	if (IS_ERR(vma))
805 		return PTR_ERR(vma);
806 
807 	intel_gt_pm_get(vma->vm->gt);
808 	map = i915_vma_pin_iomap(vma);
809 	i915_vma_unpin(vma);
810 	if (IS_ERR(map)) {
811 		err = PTR_ERR(map);
812 		goto out;
813 	}
814 
815 	if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
816 		pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
817 		       obj->mm.region->name);
818 		err = -EINVAL;
819 	}
820 	i915_vma_unpin_iomap(vma);
821 
822 out:
823 	intel_gt_pm_put(vma->vm->gt);
824 	return err;
825 }
826 
827 static int wc_set(struct drm_i915_gem_object *obj)
828 {
829 	void *vaddr;
830 
831 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
832 	if (IS_ERR(vaddr))
833 		return PTR_ERR(vaddr);
834 
835 	memset(vaddr, POISON_INUSE, obj->base.size);
836 	i915_gem_object_flush_map(obj);
837 	i915_gem_object_unpin_map(obj);
838 
839 	return 0;
840 }
841 
842 static int wc_check(struct drm_i915_gem_object *obj)
843 {
844 	void *vaddr;
845 	int err = 0;
846 
847 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
848 	if (IS_ERR(vaddr))
849 		return PTR_ERR(vaddr);
850 
851 	if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
852 		pr_err("%s: Write via mmap did not land in backing store (WC)\n",
853 		       obj->mm.region->name);
854 		err = -EINVAL;
855 	}
856 	i915_gem_object_unpin_map(obj);
857 
858 	return err;
859 }
860 
861 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
862 {
863 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
864 	bool no_map;
865 
866 	if (obj->ops->mmap_offset)
867 		return type == I915_MMAP_TYPE_FIXED;
868 	else if (type == I915_MMAP_TYPE_FIXED)
869 		return false;
870 
871 	if (type == I915_MMAP_TYPE_GTT &&
872 	    !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
873 		return false;
874 
875 	i915_gem_object_lock(obj, NULL);
876 	no_map = (type != I915_MMAP_TYPE_GTT &&
877 		  !i915_gem_object_has_struct_page(obj) &&
878 		  !i915_gem_object_has_iomem(obj));
879 	i915_gem_object_unlock(obj);
880 
881 	return !no_map;
882 }
883 
884 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
885 static int __igt_mmap(struct drm_i915_private *i915,
886 		      struct drm_i915_gem_object *obj,
887 		      enum i915_mmap_type type)
888 {
889 	struct vm_area_struct *area;
890 	unsigned long addr;
891 	int err, i;
892 	u64 offset;
893 
894 	if (!can_mmap(obj, type))
895 		return 0;
896 
897 	err = wc_set(obj);
898 	if (err == -ENXIO)
899 		err = gtt_set(obj);
900 	if (err)
901 		return err;
902 
903 	err = __assign_mmap_offset(obj, type, &offset, NULL);
904 	if (err)
905 		return err;
906 
907 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
908 	if (IS_ERR_VALUE(addr))
909 		return addr;
910 
911 	pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
912 
913 	mmap_read_lock(current->mm);
914 	area = vma_lookup(current->mm, addr);
915 	mmap_read_unlock(current->mm);
916 	if (!area) {
917 		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
918 		       obj->mm.region->name);
919 		err = -EINVAL;
920 		goto out_unmap;
921 	}
922 
923 	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
924 		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
925 		u32 x;
926 
927 		if (get_user(x, ux)) {
928 			pr_err("%s: Unable to read from mmap, offset:%zd\n",
929 			       obj->mm.region->name, i * sizeof(x));
930 			err = -EFAULT;
931 			goto out_unmap;
932 		}
933 
934 		if (x != expand32(POISON_INUSE)) {
935 			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
936 			       obj->mm.region->name,
937 			       i * sizeof(x), x, expand32(POISON_INUSE));
938 			err = -EINVAL;
939 			goto out_unmap;
940 		}
941 
942 		x = expand32(POISON_FREE);
943 		if (put_user(x, ux)) {
944 			pr_err("%s: Unable to write to mmap, offset:%zd\n",
945 			       obj->mm.region->name, i * sizeof(x));
946 			err = -EFAULT;
947 			goto out_unmap;
948 		}
949 	}
950 
951 	if (type == I915_MMAP_TYPE_GTT)
952 		intel_gt_flush_ggtt_writes(to_gt(i915));
953 
954 	err = wc_check(obj);
955 	if (err == -ENXIO)
956 		err = gtt_check(obj);
957 out_unmap:
958 	vm_munmap(addr, obj->base.size);
959 	return err;
960 }
961 
962 static int igt_mmap(void *arg)
963 {
964 	struct drm_i915_private *i915 = arg;
965 	struct intel_memory_region *mr;
966 	enum intel_region_id id;
967 
968 	for_each_memory_region(mr, i915, id) {
969 		unsigned long sizes[] = {
970 			PAGE_SIZE,
971 			mr->min_page_size,
972 			SZ_4M,
973 		};
974 		int i;
975 
976 		for (i = 0; i < ARRAY_SIZE(sizes); i++) {
977 			struct drm_i915_gem_object *obj;
978 			int err;
979 
980 			obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
981 			if (obj == ERR_PTR(-ENODEV))
982 				continue;
983 
984 			if (IS_ERR(obj))
985 				return PTR_ERR(obj);
986 
987 			err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
988 			if (err == 0)
989 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
990 			if (err == 0)
991 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
992 
993 			i915_gem_object_put(obj);
994 			if (err)
995 				return err;
996 		}
997 	}
998 
999 	return 0;
1000 }
1001 
1002 static const char *repr_mmap_type(enum i915_mmap_type type)
1003 {
1004 	switch (type) {
1005 	case I915_MMAP_TYPE_GTT: return "gtt";
1006 	case I915_MMAP_TYPE_WB: return "wb";
1007 	case I915_MMAP_TYPE_WC: return "wc";
1008 	case I915_MMAP_TYPE_UC: return "uc";
1009 	case I915_MMAP_TYPE_FIXED: return "fixed";
1010 	default: return "unknown";
1011 	}
1012 }
1013 
1014 static bool can_access(struct drm_i915_gem_object *obj)
1015 {
1016 	bool access;
1017 
1018 	i915_gem_object_lock(obj, NULL);
1019 	access = i915_gem_object_has_struct_page(obj) ||
1020 		i915_gem_object_has_iomem(obj);
1021 	i915_gem_object_unlock(obj);
1022 
1023 	return access;
1024 }
1025 
1026 static int __igt_mmap_access(struct drm_i915_private *i915,
1027 			     struct drm_i915_gem_object *obj,
1028 			     enum i915_mmap_type type)
1029 {
1030 	unsigned long __user *ptr;
1031 	unsigned long A, B;
1032 	unsigned long x, y;
1033 	unsigned long addr;
1034 	int err;
1035 	u64 offset;
1036 
1037 	memset(&A, 0xAA, sizeof(A));
1038 	memset(&B, 0xBB, sizeof(B));
1039 
1040 	if (!can_mmap(obj, type) || !can_access(obj))
1041 		return 0;
1042 
1043 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1044 	if (err)
1045 		return err;
1046 
1047 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1048 	if (IS_ERR_VALUE(addr))
1049 		return addr;
1050 	ptr = (unsigned long __user *)addr;
1051 
1052 	err = __put_user(A, ptr);
1053 	if (err) {
1054 		pr_err("%s(%s): failed to write into user mmap\n",
1055 		       obj->mm.region->name, repr_mmap_type(type));
1056 		goto out_unmap;
1057 	}
1058 
1059 	intel_gt_flush_ggtt_writes(to_gt(i915));
1060 
1061 	err = access_process_vm(current, addr, &x, sizeof(x), 0);
1062 	if (err != sizeof(x)) {
1063 		pr_err("%s(%s): access_process_vm() read failed\n",
1064 		       obj->mm.region->name, repr_mmap_type(type));
1065 		goto out_unmap;
1066 	}
1067 
1068 	err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1069 	if (err != sizeof(B)) {
1070 		pr_err("%s(%s): access_process_vm() write failed\n",
1071 		       obj->mm.region->name, repr_mmap_type(type));
1072 		goto out_unmap;
1073 	}
1074 
1075 	intel_gt_flush_ggtt_writes(to_gt(i915));
1076 
1077 	err = __get_user(y, ptr);
1078 	if (err) {
1079 		pr_err("%s(%s): failed to read from user mmap\n",
1080 		       obj->mm.region->name, repr_mmap_type(type));
1081 		goto out_unmap;
1082 	}
1083 
1084 	if (x != A || y != B) {
1085 		pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1086 		       obj->mm.region->name, repr_mmap_type(type),
1087 		       x, y);
1088 		err = -EINVAL;
1089 		goto out_unmap;
1090 	}
1091 
1092 out_unmap:
1093 	vm_munmap(addr, obj->base.size);
1094 	return err;
1095 }
1096 
1097 static int igt_mmap_access(void *arg)
1098 {
1099 	struct drm_i915_private *i915 = arg;
1100 	struct intel_memory_region *mr;
1101 	enum intel_region_id id;
1102 
1103 	for_each_memory_region(mr, i915, id) {
1104 		struct drm_i915_gem_object *obj;
1105 		int err;
1106 
1107 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1108 		if (obj == ERR_PTR(-ENODEV))
1109 			continue;
1110 
1111 		if (IS_ERR(obj))
1112 			return PTR_ERR(obj);
1113 
1114 		err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1115 		if (err == 0)
1116 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1117 		if (err == 0)
1118 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1119 		if (err == 0)
1120 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1121 		if (err == 0)
1122 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
1123 
1124 		i915_gem_object_put(obj);
1125 		if (err)
1126 			return err;
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1133 			  struct drm_i915_gem_object *obj,
1134 			  enum i915_mmap_type type)
1135 {
1136 	struct intel_engine_cs *engine;
1137 	unsigned long addr;
1138 	u32 __user *ux;
1139 	u32 bbe;
1140 	int err;
1141 	u64 offset;
1142 
1143 	/*
1144 	 * Verify that the mmap access into the backing store aligns with
1145 	 * that of the GPU, i.e. that mmap is indeed writing into the same
1146 	 * page as being read by the GPU.
1147 	 */
1148 
1149 	if (!can_mmap(obj, type))
1150 		return 0;
1151 
1152 	err = wc_set(obj);
1153 	if (err == -ENXIO)
1154 		err = gtt_set(obj);
1155 	if (err)
1156 		return err;
1157 
1158 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1159 	if (err)
1160 		return err;
1161 
1162 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1163 	if (IS_ERR_VALUE(addr))
1164 		return addr;
1165 
1166 	ux = u64_to_user_ptr((u64)addr);
1167 	bbe = MI_BATCH_BUFFER_END;
1168 	if (put_user(bbe, ux)) {
1169 		pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1170 		err = -EFAULT;
1171 		goto out_unmap;
1172 	}
1173 
1174 	if (type == I915_MMAP_TYPE_GTT)
1175 		intel_gt_flush_ggtt_writes(to_gt(i915));
1176 
1177 	for_each_uabi_engine(engine, i915) {
1178 		struct i915_request *rq;
1179 		struct i915_vma *vma;
1180 		struct i915_gem_ww_ctx ww;
1181 
1182 		vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1183 		if (IS_ERR(vma)) {
1184 			err = PTR_ERR(vma);
1185 			goto out_unmap;
1186 		}
1187 
1188 		i915_gem_ww_ctx_init(&ww, false);
1189 retry:
1190 		err = i915_gem_object_lock(obj, &ww);
1191 		if (!err)
1192 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
1193 		if (err)
1194 			goto out_ww;
1195 
1196 		rq = i915_request_create(engine->kernel_context);
1197 		if (IS_ERR(rq)) {
1198 			err = PTR_ERR(rq);
1199 			goto out_unpin;
1200 		}
1201 
1202 		err = i915_request_await_object(rq, vma->obj, false);
1203 		if (err == 0)
1204 			err = i915_vma_move_to_active(vma, rq, 0);
1205 
1206 		err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1207 		i915_request_get(rq);
1208 		i915_request_add(rq);
1209 
1210 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1211 			struct drm_printer p =
1212 				drm_info_printer(engine->i915->drm.dev);
1213 
1214 			pr_err("%s(%s, %s): Failed to execute batch\n",
1215 			       __func__, engine->name, obj->mm.region->name);
1216 			intel_engine_dump(engine, &p,
1217 					  "%s\n", engine->name);
1218 
1219 			intel_gt_set_wedged(engine->gt);
1220 			err = -EIO;
1221 		}
1222 		i915_request_put(rq);
1223 
1224 out_unpin:
1225 		i915_vma_unpin(vma);
1226 out_ww:
1227 		if (err == -EDEADLK) {
1228 			err = i915_gem_ww_ctx_backoff(&ww);
1229 			if (!err)
1230 				goto retry;
1231 		}
1232 		i915_gem_ww_ctx_fini(&ww);
1233 		if (err)
1234 			goto out_unmap;
1235 	}
1236 
1237 out_unmap:
1238 	vm_munmap(addr, obj->base.size);
1239 	return err;
1240 }
1241 
1242 static int igt_mmap_gpu(void *arg)
1243 {
1244 	struct drm_i915_private *i915 = arg;
1245 	struct intel_memory_region *mr;
1246 	enum intel_region_id id;
1247 
1248 	for_each_memory_region(mr, i915, id) {
1249 		struct drm_i915_gem_object *obj;
1250 		int err;
1251 
1252 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1253 		if (obj == ERR_PTR(-ENODEV))
1254 			continue;
1255 
1256 		if (IS_ERR(obj))
1257 			return PTR_ERR(obj);
1258 
1259 		err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1260 		if (err == 0)
1261 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1262 		if (err == 0)
1263 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
1264 
1265 		i915_gem_object_put(obj);
1266 		if (err)
1267 			return err;
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1274 {
1275 	if (!pte_present(*pte) || pte_none(*pte)) {
1276 		pr_err("missing PTE:%lx\n",
1277 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1278 		return -EINVAL;
1279 	}
1280 
1281 	return 0;
1282 }
1283 
1284 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1285 {
1286 	if (pte_present(*pte) && !pte_none(*pte)) {
1287 		pr_err("present PTE:%lx; expected to be revoked\n",
1288 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1289 		return -EINVAL;
1290 	}
1291 
1292 	return 0;
1293 }
1294 
1295 static int check_present(unsigned long addr, unsigned long len)
1296 {
1297 	return apply_to_page_range(current->mm, addr, len,
1298 				   check_present_pte, (void *)addr);
1299 }
1300 
1301 static int check_absent(unsigned long addr, unsigned long len)
1302 {
1303 	return apply_to_page_range(current->mm, addr, len,
1304 				   check_absent_pte, (void *)addr);
1305 }
1306 
1307 static int prefault_range(u64 start, u64 len)
1308 {
1309 	const char __user *addr, *end;
1310 	char __maybe_unused c;
1311 	int err;
1312 
1313 	addr = u64_to_user_ptr(start);
1314 	end = addr + len;
1315 
1316 	for (; addr < end; addr += PAGE_SIZE) {
1317 		err = __get_user(c, addr);
1318 		if (err)
1319 			return err;
1320 	}
1321 
1322 	return __get_user(c, end - 1);
1323 }
1324 
1325 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1326 			     struct drm_i915_gem_object *obj,
1327 			     enum i915_mmap_type type)
1328 {
1329 	unsigned long addr;
1330 	int err;
1331 	u64 offset;
1332 
1333 	if (!can_mmap(obj, type))
1334 		return 0;
1335 
1336 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1337 	if (err)
1338 		return err;
1339 
1340 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1341 	if (IS_ERR_VALUE(addr))
1342 		return addr;
1343 
1344 	err = prefault_range(addr, obj->base.size);
1345 	if (err)
1346 		goto out_unmap;
1347 
1348 	err = check_present(addr, obj->base.size);
1349 	if (err) {
1350 		pr_err("%s: was not present\n", obj->mm.region->name);
1351 		goto out_unmap;
1352 	}
1353 
1354 	/*
1355 	 * After unbinding the object from the GGTT, its address may be reused
1356 	 * for other objects. Ergo we have to revoke the previous mmap PTE
1357 	 * access as it no longer points to the same object.
1358 	 */
1359 	i915_gem_object_lock(obj, NULL);
1360 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1361 	i915_gem_object_unlock(obj);
1362 	if (err) {
1363 		pr_err("Failed to unbind object!\n");
1364 		goto out_unmap;
1365 	}
1366 
1367 	if (type != I915_MMAP_TYPE_GTT) {
1368 		i915_gem_object_lock(obj, NULL);
1369 		__i915_gem_object_put_pages(obj);
1370 		i915_gem_object_unlock(obj);
1371 		if (i915_gem_object_has_pages(obj)) {
1372 			pr_err("Failed to put-pages object!\n");
1373 			err = -EINVAL;
1374 			goto out_unmap;
1375 		}
1376 	}
1377 
1378 	err = check_absent(addr, obj->base.size);
1379 	if (err) {
1380 		pr_err("%s: was not absent\n", obj->mm.region->name);
1381 		goto out_unmap;
1382 	}
1383 
1384 out_unmap:
1385 	vm_munmap(addr, obj->base.size);
1386 	return err;
1387 }
1388 
1389 static int igt_mmap_revoke(void *arg)
1390 {
1391 	struct drm_i915_private *i915 = arg;
1392 	struct intel_memory_region *mr;
1393 	enum intel_region_id id;
1394 
1395 	for_each_memory_region(mr, i915, id) {
1396 		struct drm_i915_gem_object *obj;
1397 		int err;
1398 
1399 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1400 		if (obj == ERR_PTR(-ENODEV))
1401 			continue;
1402 
1403 		if (IS_ERR(obj))
1404 			return PTR_ERR(obj);
1405 
1406 		err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1407 		if (err == 0)
1408 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1409 		if (err == 0)
1410 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
1411 
1412 		i915_gem_object_put(obj);
1413 		if (err)
1414 			return err;
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1421 {
1422 	static const struct i915_subtest tests[] = {
1423 		SUBTEST(igt_partial_tiling),
1424 		SUBTEST(igt_smoke_tiling),
1425 		SUBTEST(igt_mmap_offset_exhaustion),
1426 		SUBTEST(igt_mmap),
1427 		SUBTEST(igt_mmap_access),
1428 		SUBTEST(igt_mmap_revoke),
1429 		SUBTEST(igt_mmap_gpu),
1430 	};
1431 
1432 	return i915_subtests(tests, i915);
1433 }
1434