1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "gem/i915_gem_internal.h"
10 #include "gem/i915_gem_region.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_gpu_commands.h"
13 #include "gt/intel_gt.h"
14 #include "gt/intel_gt_pm.h"
15 
16 #include "huge_gem_object.h"
17 #include "i915_selftest.h"
18 #include "selftests/i915_random.h"
19 #include "selftests/igt_flush_test.h"
20 #include "selftests/igt_mmap.h"
21 
22 struct tile {
23 	unsigned int width;
24 	unsigned int height;
25 	unsigned int stride;
26 	unsigned int size;
27 	unsigned int tiling;
28 	unsigned int swizzle;
29 };
30 
31 static u64 swizzle_bit(unsigned int bit, u64 offset)
32 {
33 	return (offset & BIT_ULL(bit)) >> (bit - 6);
34 }
35 
36 static u64 tiled_offset(const struct tile *tile, u64 v)
37 {
38 	u64 x, y;
39 
40 	if (tile->tiling == I915_TILING_NONE)
41 		return v;
42 
43 	y = div64_u64_rem(v, tile->stride, &x);
44 	v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
45 
46 	if (tile->tiling == I915_TILING_X) {
47 		v += y * tile->width;
48 		v += div64_u64_rem(x, tile->width, &x) << tile->size;
49 		v += x;
50 	} else if (tile->width == 128) {
51 		const unsigned int ytile_span = 16;
52 		const unsigned int ytile_height = 512;
53 
54 		v += y * ytile_span;
55 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
56 		v += x;
57 	} else {
58 		const unsigned int ytile_span = 32;
59 		const unsigned int ytile_height = 256;
60 
61 		v += y * ytile_span;
62 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
63 		v += x;
64 	}
65 
66 	switch (tile->swizzle) {
67 	case I915_BIT_6_SWIZZLE_9:
68 		v ^= swizzle_bit(9, v);
69 		break;
70 	case I915_BIT_6_SWIZZLE_9_10:
71 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
72 		break;
73 	case I915_BIT_6_SWIZZLE_9_11:
74 		v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
75 		break;
76 	case I915_BIT_6_SWIZZLE_9_10_11:
77 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
78 		break;
79 	}
80 
81 	return v;
82 }
83 
84 static int check_partial_mapping(struct drm_i915_gem_object *obj,
85 				 const struct tile *tile,
86 				 struct rnd_state *prng)
87 {
88 	const unsigned long npages = obj->base.size / PAGE_SIZE;
89 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
90 	struct i915_ggtt_view view;
91 	struct i915_vma *vma;
92 	unsigned long page;
93 	u32 __iomem *io;
94 	struct page *p;
95 	unsigned int n;
96 	u64 offset;
97 	u32 *cpu;
98 	int err;
99 
100 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
101 	if (err) {
102 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
103 		       tile->tiling, tile->stride, err);
104 		return err;
105 	}
106 
107 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
108 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
109 
110 	i915_gem_object_lock(obj, NULL);
111 	err = i915_gem_object_set_to_gtt_domain(obj, true);
112 	i915_gem_object_unlock(obj);
113 	if (err) {
114 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
115 		return err;
116 	}
117 
118 	page = i915_prandom_u32_max_state(npages, prng);
119 	view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
120 
121 	vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
122 	if (IS_ERR(vma)) {
123 		pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
124 		       page, (int)PTR_ERR(vma));
125 		return PTR_ERR(vma);
126 	}
127 
128 	n = page - view.partial.offset;
129 	GEM_BUG_ON(n >= view.partial.size);
130 
131 	io = i915_vma_pin_iomap(vma);
132 	i915_vma_unpin(vma);
133 	if (IS_ERR(io)) {
134 		pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
135 		       page, (int)PTR_ERR(io));
136 		err = PTR_ERR(io);
137 		goto out;
138 	}
139 
140 	iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
141 	i915_vma_unpin_iomap(vma);
142 
143 	offset = tiled_offset(tile, page << PAGE_SHIFT);
144 	if (offset >= obj->base.size)
145 		goto out;
146 
147 	intel_gt_flush_ggtt_writes(to_gt(i915));
148 
149 	p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
150 	cpu = kmap(p) + offset_in_page(offset);
151 	drm_clflush_virt_range(cpu, sizeof(*cpu));
152 	if (*cpu != (u32)page) {
153 		pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
154 		       page, n,
155 		       view.partial.offset,
156 		       view.partial.size,
157 		       vma->size >> PAGE_SHIFT,
158 		       tile->tiling ? tile_row_pages(obj) : 0,
159 		       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
160 		       offset >> PAGE_SHIFT,
161 		       (unsigned int)offset_in_page(offset),
162 		       offset,
163 		       (u32)page, *cpu);
164 		err = -EINVAL;
165 	}
166 	*cpu = 0;
167 	drm_clflush_virt_range(cpu, sizeof(*cpu));
168 	kunmap(p);
169 
170 out:
171 	i915_gem_object_lock(obj, NULL);
172 	__i915_vma_put(vma);
173 	i915_gem_object_unlock(obj);
174 	return err;
175 }
176 
177 static int check_partial_mappings(struct drm_i915_gem_object *obj,
178 				  const struct tile *tile,
179 				  unsigned long end_time)
180 {
181 	const unsigned int nreal = obj->scratch / PAGE_SIZE;
182 	const unsigned long npages = obj->base.size / PAGE_SIZE;
183 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
184 	struct i915_vma *vma;
185 	unsigned long page;
186 	int err;
187 
188 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
189 	if (err) {
190 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
191 		       tile->tiling, tile->stride, err);
192 		return err;
193 	}
194 
195 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
196 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
197 
198 	i915_gem_object_lock(obj, NULL);
199 	err = i915_gem_object_set_to_gtt_domain(obj, true);
200 	i915_gem_object_unlock(obj);
201 	if (err) {
202 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
203 		return err;
204 	}
205 
206 	for_each_prime_number_from(page, 1, npages) {
207 		struct i915_ggtt_view view =
208 			compute_partial_view(obj, page, MIN_CHUNK_PAGES);
209 		u32 __iomem *io;
210 		struct page *p;
211 		unsigned int n;
212 		u64 offset;
213 		u32 *cpu;
214 
215 		GEM_BUG_ON(view.partial.size > nreal);
216 		cond_resched();
217 
218 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
219 		if (IS_ERR(vma)) {
220 			pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
221 			       page, (int)PTR_ERR(vma));
222 			return PTR_ERR(vma);
223 		}
224 
225 		n = page - view.partial.offset;
226 		GEM_BUG_ON(n >= view.partial.size);
227 
228 		io = i915_vma_pin_iomap(vma);
229 		i915_vma_unpin(vma);
230 		if (IS_ERR(io)) {
231 			pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
232 			       page, (int)PTR_ERR(io));
233 			return PTR_ERR(io);
234 		}
235 
236 		iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
237 		i915_vma_unpin_iomap(vma);
238 
239 		offset = tiled_offset(tile, page << PAGE_SHIFT);
240 		if (offset >= obj->base.size)
241 			continue;
242 
243 		intel_gt_flush_ggtt_writes(to_gt(i915));
244 
245 		p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
246 		cpu = kmap(p) + offset_in_page(offset);
247 		drm_clflush_virt_range(cpu, sizeof(*cpu));
248 		if (*cpu != (u32)page) {
249 			pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
250 			       page, n,
251 			       view.partial.offset,
252 			       view.partial.size,
253 			       vma->size >> PAGE_SHIFT,
254 			       tile->tiling ? tile_row_pages(obj) : 0,
255 			       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
256 			       offset >> PAGE_SHIFT,
257 			       (unsigned int)offset_in_page(offset),
258 			       offset,
259 			       (u32)page, *cpu);
260 			err = -EINVAL;
261 		}
262 		*cpu = 0;
263 		drm_clflush_virt_range(cpu, sizeof(*cpu));
264 		kunmap(p);
265 		if (err)
266 			return err;
267 
268 		i915_gem_object_lock(obj, NULL);
269 		__i915_vma_put(vma);
270 		i915_gem_object_unlock(obj);
271 
272 		if (igt_timeout(end_time,
273 				"%s: timed out after tiling=%d stride=%d\n",
274 				__func__, tile->tiling, tile->stride))
275 			return -EINTR;
276 	}
277 
278 	return 0;
279 }
280 
281 static unsigned int
282 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
283 {
284 	if (GRAPHICS_VER(i915) <= 2) {
285 		tile->height = 16;
286 		tile->width = 128;
287 		tile->size = 11;
288 	} else if (tile->tiling == I915_TILING_Y &&
289 		   HAS_128_BYTE_Y_TILING(i915)) {
290 		tile->height = 32;
291 		tile->width = 128;
292 		tile->size = 12;
293 	} else {
294 		tile->height = 8;
295 		tile->width = 512;
296 		tile->size = 12;
297 	}
298 
299 	if (GRAPHICS_VER(i915) < 4)
300 		return 8192 / tile->width;
301 	else if (GRAPHICS_VER(i915) < 7)
302 		return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
303 	else
304 		return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
305 }
306 
307 static int igt_partial_tiling(void *arg)
308 {
309 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
310 	struct drm_i915_private *i915 = arg;
311 	struct drm_i915_gem_object *obj;
312 	intel_wakeref_t wakeref;
313 	int tiling;
314 	int err;
315 
316 	if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
317 		return 0;
318 
319 	/* We want to check the page mapping and fencing of a large object
320 	 * mmapped through the GTT. The object we create is larger than can
321 	 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
322 	 * We then check that a write through each partial GGTT vma ends up
323 	 * in the right set of pages within the object, and with the expected
324 	 * tiling, which we verify by manual swizzling.
325 	 */
326 
327 	obj = huge_gem_object(i915,
328 			      nreal << PAGE_SHIFT,
329 			      (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
330 	if (IS_ERR(obj))
331 		return PTR_ERR(obj);
332 
333 	err = i915_gem_object_pin_pages_unlocked(obj);
334 	if (err) {
335 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
336 		       nreal, obj->base.size / PAGE_SIZE, err);
337 		goto out;
338 	}
339 
340 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
341 
342 	if (1) {
343 		IGT_TIMEOUT(end);
344 		struct tile tile;
345 
346 		tile.height = 1;
347 		tile.width = 1;
348 		tile.size = 0;
349 		tile.stride = 0;
350 		tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
351 		tile.tiling = I915_TILING_NONE;
352 
353 		err = check_partial_mappings(obj, &tile, end);
354 		if (err && err != -EINTR)
355 			goto out_unlock;
356 	}
357 
358 	for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
359 		IGT_TIMEOUT(end);
360 		unsigned int max_pitch;
361 		unsigned int pitch;
362 		struct tile tile;
363 
364 		if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
365 			/*
366 			 * The swizzling pattern is actually unknown as it
367 			 * varies based on physical address of each page.
368 			 * See i915_gem_detect_bit_6_swizzle().
369 			 */
370 			break;
371 
372 		tile.tiling = tiling;
373 		switch (tiling) {
374 		case I915_TILING_X:
375 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
376 			break;
377 		case I915_TILING_Y:
378 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
379 			break;
380 		}
381 
382 		GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
383 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
384 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
385 			continue;
386 
387 		max_pitch = setup_tile_size(&tile, i915);
388 
389 		for (pitch = max_pitch; pitch; pitch >>= 1) {
390 			tile.stride = tile.width * pitch;
391 			err = check_partial_mappings(obj, &tile, end);
392 			if (err == -EINTR)
393 				goto next_tiling;
394 			if (err)
395 				goto out_unlock;
396 
397 			if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
398 				tile.stride = tile.width * (pitch - 1);
399 				err = check_partial_mappings(obj, &tile, end);
400 				if (err == -EINTR)
401 					goto next_tiling;
402 				if (err)
403 					goto out_unlock;
404 			}
405 
406 			if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
407 				tile.stride = tile.width * (pitch + 1);
408 				err = check_partial_mappings(obj, &tile, end);
409 				if (err == -EINTR)
410 					goto next_tiling;
411 				if (err)
412 					goto out_unlock;
413 			}
414 		}
415 
416 		if (GRAPHICS_VER(i915) >= 4) {
417 			for_each_prime_number(pitch, max_pitch) {
418 				tile.stride = tile.width * pitch;
419 				err = check_partial_mappings(obj, &tile, end);
420 				if (err == -EINTR)
421 					goto next_tiling;
422 				if (err)
423 					goto out_unlock;
424 			}
425 		}
426 
427 next_tiling: ;
428 	}
429 
430 out_unlock:
431 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
432 	i915_gem_object_unpin_pages(obj);
433 out:
434 	i915_gem_object_put(obj);
435 	return err;
436 }
437 
438 static int igt_smoke_tiling(void *arg)
439 {
440 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
441 	struct drm_i915_private *i915 = arg;
442 	struct drm_i915_gem_object *obj;
443 	intel_wakeref_t wakeref;
444 	I915_RND_STATE(prng);
445 	unsigned long count;
446 	IGT_TIMEOUT(end);
447 	int err;
448 
449 	if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
450 		return 0;
451 
452 	/*
453 	 * igt_partial_tiling() does an exhastive check of partial tiling
454 	 * chunking, but will undoubtably run out of time. Here, we do a
455 	 * randomised search and hope over many runs of 1s with different
456 	 * seeds we will do a thorough check.
457 	 *
458 	 * Remember to look at the st_seed if we see a flip-flop in BAT!
459 	 */
460 
461 	if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
462 		return 0;
463 
464 	obj = huge_gem_object(i915,
465 			      nreal << PAGE_SHIFT,
466 			      (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
467 	if (IS_ERR(obj))
468 		return PTR_ERR(obj);
469 
470 	err = i915_gem_object_pin_pages_unlocked(obj);
471 	if (err) {
472 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
473 		       nreal, obj->base.size / PAGE_SIZE, err);
474 		goto out;
475 	}
476 
477 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
478 
479 	count = 0;
480 	do {
481 		struct tile tile;
482 
483 		tile.tiling =
484 			i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
485 		switch (tile.tiling) {
486 		case I915_TILING_NONE:
487 			tile.height = 1;
488 			tile.width = 1;
489 			tile.size = 0;
490 			tile.stride = 0;
491 			tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
492 			break;
493 
494 		case I915_TILING_X:
495 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
496 			break;
497 		case I915_TILING_Y:
498 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
499 			break;
500 		}
501 
502 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
503 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
504 			continue;
505 
506 		if (tile.tiling != I915_TILING_NONE) {
507 			unsigned int max_pitch = setup_tile_size(&tile, i915);
508 
509 			tile.stride =
510 				i915_prandom_u32_max_state(max_pitch, &prng);
511 			tile.stride = (1 + tile.stride) * tile.width;
512 			if (GRAPHICS_VER(i915) < 4)
513 				tile.stride = rounddown_pow_of_two(tile.stride);
514 		}
515 
516 		err = check_partial_mapping(obj, &tile, &prng);
517 		if (err)
518 			break;
519 
520 		count++;
521 	} while (!__igt_timeout(end, NULL));
522 
523 	pr_info("%s: Completed %lu trials\n", __func__, count);
524 
525 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
526 	i915_gem_object_unpin_pages(obj);
527 out:
528 	i915_gem_object_put(obj);
529 	return err;
530 }
531 
532 static int make_obj_busy(struct drm_i915_gem_object *obj)
533 {
534 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
535 	struct intel_engine_cs *engine;
536 
537 	for_each_uabi_engine(engine, i915) {
538 		struct i915_request *rq;
539 		struct i915_vma *vma;
540 		struct i915_gem_ww_ctx ww;
541 		int err;
542 
543 		vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
544 		if (IS_ERR(vma))
545 			return PTR_ERR(vma);
546 
547 		i915_gem_ww_ctx_init(&ww, false);
548 retry:
549 		err = i915_gem_object_lock(obj, &ww);
550 		if (!err)
551 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
552 		if (err)
553 			goto err;
554 
555 		rq = intel_engine_create_kernel_request(engine);
556 		if (IS_ERR(rq)) {
557 			err = PTR_ERR(rq);
558 			goto err_unpin;
559 		}
560 
561 		err = i915_request_await_object(rq, vma->obj, true);
562 		if (err == 0)
563 			err = i915_vma_move_to_active(vma, rq,
564 						      EXEC_OBJECT_WRITE);
565 
566 		i915_request_add(rq);
567 err_unpin:
568 		i915_vma_unpin(vma);
569 err:
570 		if (err == -EDEADLK) {
571 			err = i915_gem_ww_ctx_backoff(&ww);
572 			if (!err)
573 				goto retry;
574 		}
575 		i915_gem_ww_ctx_fini(&ww);
576 		if (err)
577 			return err;
578 	}
579 
580 	i915_gem_object_put(obj); /* leave it only alive via its active ref */
581 	return 0;
582 }
583 
584 static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
585 {
586 	if (HAS_LMEM(i915))
587 		return I915_MMAP_TYPE_FIXED;
588 
589 	return I915_MMAP_TYPE_GTT;
590 }
591 
592 static struct drm_i915_gem_object *
593 create_sys_or_internal(struct drm_i915_private *i915,
594 		       unsigned long size)
595 {
596 	if (HAS_LMEM(i915)) {
597 		struct intel_memory_region *sys_region =
598 			i915->mm.regions[INTEL_REGION_SMEM];
599 
600 		return __i915_gem_object_create_user(i915, size, &sys_region, 1);
601 	}
602 
603 	return i915_gem_object_create_internal(i915, size);
604 }
605 
606 static bool assert_mmap_offset(struct drm_i915_private *i915,
607 			       unsigned long size,
608 			       int expected)
609 {
610 	struct drm_i915_gem_object *obj;
611 	u64 offset;
612 	int ret;
613 
614 	obj = create_sys_or_internal(i915, size);
615 	if (IS_ERR(obj))
616 		return expected && expected == PTR_ERR(obj);
617 
618 	ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
619 	i915_gem_object_put(obj);
620 
621 	return ret == expected;
622 }
623 
624 static void disable_retire_worker(struct drm_i915_private *i915)
625 {
626 	i915_gem_driver_unregister__shrinker(i915);
627 	intel_gt_pm_get(to_gt(i915));
628 	cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
629 }
630 
631 static void restore_retire_worker(struct drm_i915_private *i915)
632 {
633 	igt_flush_test(i915);
634 	intel_gt_pm_put(to_gt(i915));
635 	i915_gem_driver_register__shrinker(i915);
636 }
637 
638 static void mmap_offset_lock(struct drm_i915_private *i915)
639 	__acquires(&i915->drm.vma_offset_manager->vm_lock)
640 {
641 	write_lock(&i915->drm.vma_offset_manager->vm_lock);
642 }
643 
644 static void mmap_offset_unlock(struct drm_i915_private *i915)
645 	__releases(&i915->drm.vma_offset_manager->vm_lock)
646 {
647 	write_unlock(&i915->drm.vma_offset_manager->vm_lock);
648 }
649 
650 static int igt_mmap_offset_exhaustion(void *arg)
651 {
652 	struct drm_i915_private *i915 = arg;
653 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
654 	struct drm_i915_gem_object *obj;
655 	struct drm_mm_node *hole, *next;
656 	int loop, err = 0;
657 	u64 offset;
658 	int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
659 
660 	/* Disable background reaper */
661 	disable_retire_worker(i915);
662 	GEM_BUG_ON(!to_gt(i915)->awake);
663 	intel_gt_retire_requests(to_gt(i915));
664 	i915_gem_drain_freed_objects(i915);
665 
666 	/* Trim the device mmap space to only a page */
667 	mmap_offset_lock(i915);
668 	loop = 1; /* PAGE_SIZE units */
669 	list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
670 		struct drm_mm_node *resv;
671 
672 		resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
673 		if (!resv) {
674 			err = -ENOMEM;
675 			goto out_park;
676 		}
677 
678 		resv->start = drm_mm_hole_node_start(hole) + loop;
679 		resv->size = hole->hole_size - loop;
680 		resv->color = -1ul;
681 		loop = 0;
682 
683 		if (!resv->size) {
684 			kfree(resv);
685 			continue;
686 		}
687 
688 		pr_debug("Reserving hole [%llx + %llx]\n",
689 			 resv->start, resv->size);
690 
691 		err = drm_mm_reserve_node(mm, resv);
692 		if (err) {
693 			pr_err("Failed to trim VMA manager, err=%d\n", err);
694 			kfree(resv);
695 			goto out_park;
696 		}
697 	}
698 	GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
699 	mmap_offset_unlock(i915);
700 
701 	/* Just fits! */
702 	if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
703 		pr_err("Unable to insert object into single page hole\n");
704 		err = -EINVAL;
705 		goto out;
706 	}
707 
708 	/* Too large */
709 	if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
710 		pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
711 		err = -EINVAL;
712 		goto out;
713 	}
714 
715 	/* Fill the hole, further allocation attempts should then fail */
716 	obj = create_sys_or_internal(i915, PAGE_SIZE);
717 	if (IS_ERR(obj)) {
718 		err = PTR_ERR(obj);
719 		pr_err("Unable to create object for reclaimed hole\n");
720 		goto out;
721 	}
722 
723 	err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
724 	if (err) {
725 		pr_err("Unable to insert object into reclaimed hole\n");
726 		goto err_obj;
727 	}
728 
729 	if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
730 		pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
731 		err = -EINVAL;
732 		goto err_obj;
733 	}
734 
735 	i915_gem_object_put(obj);
736 
737 	/* Now fill with busy dead objects that we expect to reap */
738 	for (loop = 0; loop < 3; loop++) {
739 		if (intel_gt_is_wedged(to_gt(i915)))
740 			break;
741 
742 		obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
743 		if (IS_ERR(obj)) {
744 			err = PTR_ERR(obj);
745 			goto out;
746 		}
747 
748 		err = make_obj_busy(obj);
749 		if (err) {
750 			pr_err("[loop %d] Failed to busy the object\n", loop);
751 			goto err_obj;
752 		}
753 	}
754 
755 out:
756 	mmap_offset_lock(i915);
757 out_park:
758 	drm_mm_for_each_node_safe(hole, next, mm) {
759 		if (hole->color != -1ul)
760 			continue;
761 
762 		drm_mm_remove_node(hole);
763 		kfree(hole);
764 	}
765 	mmap_offset_unlock(i915);
766 	restore_retire_worker(i915);
767 	return err;
768 err_obj:
769 	i915_gem_object_put(obj);
770 	goto out;
771 }
772 
773 static int gtt_set(struct drm_i915_gem_object *obj)
774 {
775 	struct i915_vma *vma;
776 	void __iomem *map;
777 	int err = 0;
778 
779 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
780 	if (IS_ERR(vma))
781 		return PTR_ERR(vma);
782 
783 	intel_gt_pm_get(vma->vm->gt);
784 	map = i915_vma_pin_iomap(vma);
785 	i915_vma_unpin(vma);
786 	if (IS_ERR(map)) {
787 		err = PTR_ERR(map);
788 		goto out;
789 	}
790 
791 	memset_io(map, POISON_INUSE, obj->base.size);
792 	i915_vma_unpin_iomap(vma);
793 
794 out:
795 	intel_gt_pm_put(vma->vm->gt);
796 	return err;
797 }
798 
799 static int gtt_check(struct drm_i915_gem_object *obj)
800 {
801 	struct i915_vma *vma;
802 	void __iomem *map;
803 	int err = 0;
804 
805 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
806 	if (IS_ERR(vma))
807 		return PTR_ERR(vma);
808 
809 	intel_gt_pm_get(vma->vm->gt);
810 	map = i915_vma_pin_iomap(vma);
811 	i915_vma_unpin(vma);
812 	if (IS_ERR(map)) {
813 		err = PTR_ERR(map);
814 		goto out;
815 	}
816 
817 	if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
818 		pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
819 		       obj->mm.region->name);
820 		err = -EINVAL;
821 	}
822 	i915_vma_unpin_iomap(vma);
823 
824 out:
825 	intel_gt_pm_put(vma->vm->gt);
826 	return err;
827 }
828 
829 static int wc_set(struct drm_i915_gem_object *obj)
830 {
831 	void *vaddr;
832 
833 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
834 	if (IS_ERR(vaddr))
835 		return PTR_ERR(vaddr);
836 
837 	memset(vaddr, POISON_INUSE, obj->base.size);
838 	i915_gem_object_flush_map(obj);
839 	i915_gem_object_unpin_map(obj);
840 
841 	return 0;
842 }
843 
844 static int wc_check(struct drm_i915_gem_object *obj)
845 {
846 	void *vaddr;
847 	int err = 0;
848 
849 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
850 	if (IS_ERR(vaddr))
851 		return PTR_ERR(vaddr);
852 
853 	if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
854 		pr_err("%s: Write via mmap did not land in backing store (WC)\n",
855 		       obj->mm.region->name);
856 		err = -EINVAL;
857 	}
858 	i915_gem_object_unpin_map(obj);
859 
860 	return err;
861 }
862 
863 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
864 {
865 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
866 	bool no_map;
867 
868 	if (obj->ops->mmap_offset)
869 		return type == I915_MMAP_TYPE_FIXED;
870 	else if (type == I915_MMAP_TYPE_FIXED)
871 		return false;
872 
873 	if (type == I915_MMAP_TYPE_GTT &&
874 	    !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
875 		return false;
876 
877 	i915_gem_object_lock(obj, NULL);
878 	no_map = (type != I915_MMAP_TYPE_GTT &&
879 		  !i915_gem_object_has_struct_page(obj) &&
880 		  !i915_gem_object_has_iomem(obj));
881 	i915_gem_object_unlock(obj);
882 
883 	return !no_map;
884 }
885 
886 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
887 static int __igt_mmap(struct drm_i915_private *i915,
888 		      struct drm_i915_gem_object *obj,
889 		      enum i915_mmap_type type)
890 {
891 	struct vm_area_struct *area;
892 	unsigned long addr;
893 	int err, i;
894 	u64 offset;
895 
896 	if (!can_mmap(obj, type))
897 		return 0;
898 
899 	err = wc_set(obj);
900 	if (err == -ENXIO)
901 		err = gtt_set(obj);
902 	if (err)
903 		return err;
904 
905 	err = __assign_mmap_offset(obj, type, &offset, NULL);
906 	if (err)
907 		return err;
908 
909 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
910 	if (IS_ERR_VALUE(addr))
911 		return addr;
912 
913 	pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
914 
915 	mmap_read_lock(current->mm);
916 	area = vma_lookup(current->mm, addr);
917 	mmap_read_unlock(current->mm);
918 	if (!area) {
919 		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
920 		       obj->mm.region->name);
921 		err = -EINVAL;
922 		goto out_unmap;
923 	}
924 
925 	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
926 		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
927 		u32 x;
928 
929 		if (get_user(x, ux)) {
930 			pr_err("%s: Unable to read from mmap, offset:%zd\n",
931 			       obj->mm.region->name, i * sizeof(x));
932 			err = -EFAULT;
933 			goto out_unmap;
934 		}
935 
936 		if (x != expand32(POISON_INUSE)) {
937 			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
938 			       obj->mm.region->name,
939 			       i * sizeof(x), x, expand32(POISON_INUSE));
940 			err = -EINVAL;
941 			goto out_unmap;
942 		}
943 
944 		x = expand32(POISON_FREE);
945 		if (put_user(x, ux)) {
946 			pr_err("%s: Unable to write to mmap, offset:%zd\n",
947 			       obj->mm.region->name, i * sizeof(x));
948 			err = -EFAULT;
949 			goto out_unmap;
950 		}
951 	}
952 
953 	if (type == I915_MMAP_TYPE_GTT)
954 		intel_gt_flush_ggtt_writes(to_gt(i915));
955 
956 	err = wc_check(obj);
957 	if (err == -ENXIO)
958 		err = gtt_check(obj);
959 out_unmap:
960 	vm_munmap(addr, obj->base.size);
961 	return err;
962 }
963 
964 static int igt_mmap(void *arg)
965 {
966 	struct drm_i915_private *i915 = arg;
967 	struct intel_memory_region *mr;
968 	enum intel_region_id id;
969 
970 	for_each_memory_region(mr, i915, id) {
971 		unsigned long sizes[] = {
972 			PAGE_SIZE,
973 			mr->min_page_size,
974 			SZ_4M,
975 		};
976 		int i;
977 
978 		for (i = 0; i < ARRAY_SIZE(sizes); i++) {
979 			struct drm_i915_gem_object *obj;
980 			int err;
981 
982 			obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
983 			if (obj == ERR_PTR(-ENODEV))
984 				continue;
985 
986 			if (IS_ERR(obj))
987 				return PTR_ERR(obj);
988 
989 			err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
990 			if (err == 0)
991 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
992 			if (err == 0)
993 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
994 
995 			i915_gem_object_put(obj);
996 			if (err)
997 				return err;
998 		}
999 	}
1000 
1001 	return 0;
1002 }
1003 
1004 static const char *repr_mmap_type(enum i915_mmap_type type)
1005 {
1006 	switch (type) {
1007 	case I915_MMAP_TYPE_GTT: return "gtt";
1008 	case I915_MMAP_TYPE_WB: return "wb";
1009 	case I915_MMAP_TYPE_WC: return "wc";
1010 	case I915_MMAP_TYPE_UC: return "uc";
1011 	case I915_MMAP_TYPE_FIXED: return "fixed";
1012 	default: return "unknown";
1013 	}
1014 }
1015 
1016 static bool can_access(struct drm_i915_gem_object *obj)
1017 {
1018 	bool access;
1019 
1020 	i915_gem_object_lock(obj, NULL);
1021 	access = i915_gem_object_has_struct_page(obj) ||
1022 		i915_gem_object_has_iomem(obj);
1023 	i915_gem_object_unlock(obj);
1024 
1025 	return access;
1026 }
1027 
1028 static int __igt_mmap_access(struct drm_i915_private *i915,
1029 			     struct drm_i915_gem_object *obj,
1030 			     enum i915_mmap_type type)
1031 {
1032 	unsigned long __user *ptr;
1033 	unsigned long A, B;
1034 	unsigned long x, y;
1035 	unsigned long addr;
1036 	int err;
1037 	u64 offset;
1038 
1039 	memset(&A, 0xAA, sizeof(A));
1040 	memset(&B, 0xBB, sizeof(B));
1041 
1042 	if (!can_mmap(obj, type) || !can_access(obj))
1043 		return 0;
1044 
1045 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1046 	if (err)
1047 		return err;
1048 
1049 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1050 	if (IS_ERR_VALUE(addr))
1051 		return addr;
1052 	ptr = (unsigned long __user *)addr;
1053 
1054 	err = __put_user(A, ptr);
1055 	if (err) {
1056 		pr_err("%s(%s): failed to write into user mmap\n",
1057 		       obj->mm.region->name, repr_mmap_type(type));
1058 		goto out_unmap;
1059 	}
1060 
1061 	intel_gt_flush_ggtt_writes(to_gt(i915));
1062 
1063 	err = access_process_vm(current, addr, &x, sizeof(x), 0);
1064 	if (err != sizeof(x)) {
1065 		pr_err("%s(%s): access_process_vm() read failed\n",
1066 		       obj->mm.region->name, repr_mmap_type(type));
1067 		goto out_unmap;
1068 	}
1069 
1070 	err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1071 	if (err != sizeof(B)) {
1072 		pr_err("%s(%s): access_process_vm() write failed\n",
1073 		       obj->mm.region->name, repr_mmap_type(type));
1074 		goto out_unmap;
1075 	}
1076 
1077 	intel_gt_flush_ggtt_writes(to_gt(i915));
1078 
1079 	err = __get_user(y, ptr);
1080 	if (err) {
1081 		pr_err("%s(%s): failed to read from user mmap\n",
1082 		       obj->mm.region->name, repr_mmap_type(type));
1083 		goto out_unmap;
1084 	}
1085 
1086 	if (x != A || y != B) {
1087 		pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1088 		       obj->mm.region->name, repr_mmap_type(type),
1089 		       x, y);
1090 		err = -EINVAL;
1091 		goto out_unmap;
1092 	}
1093 
1094 out_unmap:
1095 	vm_munmap(addr, obj->base.size);
1096 	return err;
1097 }
1098 
1099 static int igt_mmap_access(void *arg)
1100 {
1101 	struct drm_i915_private *i915 = arg;
1102 	struct intel_memory_region *mr;
1103 	enum intel_region_id id;
1104 
1105 	for_each_memory_region(mr, i915, id) {
1106 		struct drm_i915_gem_object *obj;
1107 		int err;
1108 
1109 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1110 		if (obj == ERR_PTR(-ENODEV))
1111 			continue;
1112 
1113 		if (IS_ERR(obj))
1114 			return PTR_ERR(obj);
1115 
1116 		err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1117 		if (err == 0)
1118 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1119 		if (err == 0)
1120 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1121 		if (err == 0)
1122 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1123 		if (err == 0)
1124 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
1125 
1126 		i915_gem_object_put(obj);
1127 		if (err)
1128 			return err;
1129 	}
1130 
1131 	return 0;
1132 }
1133 
1134 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1135 			  struct drm_i915_gem_object *obj,
1136 			  enum i915_mmap_type type)
1137 {
1138 	struct intel_engine_cs *engine;
1139 	unsigned long addr;
1140 	u32 __user *ux;
1141 	u32 bbe;
1142 	int err;
1143 	u64 offset;
1144 
1145 	/*
1146 	 * Verify that the mmap access into the backing store aligns with
1147 	 * that of the GPU, i.e. that mmap is indeed writing into the same
1148 	 * page as being read by the GPU.
1149 	 */
1150 
1151 	if (!can_mmap(obj, type))
1152 		return 0;
1153 
1154 	err = wc_set(obj);
1155 	if (err == -ENXIO)
1156 		err = gtt_set(obj);
1157 	if (err)
1158 		return err;
1159 
1160 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1161 	if (err)
1162 		return err;
1163 
1164 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1165 	if (IS_ERR_VALUE(addr))
1166 		return addr;
1167 
1168 	ux = u64_to_user_ptr((u64)addr);
1169 	bbe = MI_BATCH_BUFFER_END;
1170 	if (put_user(bbe, ux)) {
1171 		pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1172 		err = -EFAULT;
1173 		goto out_unmap;
1174 	}
1175 
1176 	if (type == I915_MMAP_TYPE_GTT)
1177 		intel_gt_flush_ggtt_writes(to_gt(i915));
1178 
1179 	for_each_uabi_engine(engine, i915) {
1180 		struct i915_request *rq;
1181 		struct i915_vma *vma;
1182 		struct i915_gem_ww_ctx ww;
1183 
1184 		vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1185 		if (IS_ERR(vma)) {
1186 			err = PTR_ERR(vma);
1187 			goto out_unmap;
1188 		}
1189 
1190 		i915_gem_ww_ctx_init(&ww, false);
1191 retry:
1192 		err = i915_gem_object_lock(obj, &ww);
1193 		if (!err)
1194 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
1195 		if (err)
1196 			goto out_ww;
1197 
1198 		rq = i915_request_create(engine->kernel_context);
1199 		if (IS_ERR(rq)) {
1200 			err = PTR_ERR(rq);
1201 			goto out_unpin;
1202 		}
1203 
1204 		err = i915_request_await_object(rq, vma->obj, false);
1205 		if (err == 0)
1206 			err = i915_vma_move_to_active(vma, rq, 0);
1207 
1208 		err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1209 		i915_request_get(rq);
1210 		i915_request_add(rq);
1211 
1212 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1213 			struct drm_printer p =
1214 				drm_info_printer(engine->i915->drm.dev);
1215 
1216 			pr_err("%s(%s, %s): Failed to execute batch\n",
1217 			       __func__, engine->name, obj->mm.region->name);
1218 			intel_engine_dump(engine, &p,
1219 					  "%s\n", engine->name);
1220 
1221 			intel_gt_set_wedged(engine->gt);
1222 			err = -EIO;
1223 		}
1224 		i915_request_put(rq);
1225 
1226 out_unpin:
1227 		i915_vma_unpin(vma);
1228 out_ww:
1229 		if (err == -EDEADLK) {
1230 			err = i915_gem_ww_ctx_backoff(&ww);
1231 			if (!err)
1232 				goto retry;
1233 		}
1234 		i915_gem_ww_ctx_fini(&ww);
1235 		if (err)
1236 			goto out_unmap;
1237 	}
1238 
1239 out_unmap:
1240 	vm_munmap(addr, obj->base.size);
1241 	return err;
1242 }
1243 
1244 static int igt_mmap_gpu(void *arg)
1245 {
1246 	struct drm_i915_private *i915 = arg;
1247 	struct intel_memory_region *mr;
1248 	enum intel_region_id id;
1249 
1250 	for_each_memory_region(mr, i915, id) {
1251 		struct drm_i915_gem_object *obj;
1252 		int err;
1253 
1254 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1255 		if (obj == ERR_PTR(-ENODEV))
1256 			continue;
1257 
1258 		if (IS_ERR(obj))
1259 			return PTR_ERR(obj);
1260 
1261 		err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1262 		if (err == 0)
1263 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1264 		if (err == 0)
1265 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
1266 
1267 		i915_gem_object_put(obj);
1268 		if (err)
1269 			return err;
1270 	}
1271 
1272 	return 0;
1273 }
1274 
1275 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1276 {
1277 	if (!pte_present(*pte) || pte_none(*pte)) {
1278 		pr_err("missing PTE:%lx\n",
1279 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1280 		return -EINVAL;
1281 	}
1282 
1283 	return 0;
1284 }
1285 
1286 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1287 {
1288 	if (pte_present(*pte) && !pte_none(*pte)) {
1289 		pr_err("present PTE:%lx; expected to be revoked\n",
1290 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1291 		return -EINVAL;
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 static int check_present(unsigned long addr, unsigned long len)
1298 {
1299 	return apply_to_page_range(current->mm, addr, len,
1300 				   check_present_pte, (void *)addr);
1301 }
1302 
1303 static int check_absent(unsigned long addr, unsigned long len)
1304 {
1305 	return apply_to_page_range(current->mm, addr, len,
1306 				   check_absent_pte, (void *)addr);
1307 }
1308 
1309 static int prefault_range(u64 start, u64 len)
1310 {
1311 	const char __user *addr, *end;
1312 	char __maybe_unused c;
1313 	int err;
1314 
1315 	addr = u64_to_user_ptr(start);
1316 	end = addr + len;
1317 
1318 	for (; addr < end; addr += PAGE_SIZE) {
1319 		err = __get_user(c, addr);
1320 		if (err)
1321 			return err;
1322 	}
1323 
1324 	return __get_user(c, end - 1);
1325 }
1326 
1327 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1328 			     struct drm_i915_gem_object *obj,
1329 			     enum i915_mmap_type type)
1330 {
1331 	unsigned long addr;
1332 	int err;
1333 	u64 offset;
1334 
1335 	if (!can_mmap(obj, type))
1336 		return 0;
1337 
1338 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1339 	if (err)
1340 		return err;
1341 
1342 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1343 	if (IS_ERR_VALUE(addr))
1344 		return addr;
1345 
1346 	err = prefault_range(addr, obj->base.size);
1347 	if (err)
1348 		goto out_unmap;
1349 
1350 	err = check_present(addr, obj->base.size);
1351 	if (err) {
1352 		pr_err("%s: was not present\n", obj->mm.region->name);
1353 		goto out_unmap;
1354 	}
1355 
1356 	/*
1357 	 * After unbinding the object from the GGTT, its address may be reused
1358 	 * for other objects. Ergo we have to revoke the previous mmap PTE
1359 	 * access as it no longer points to the same object.
1360 	 */
1361 	i915_gem_object_lock(obj, NULL);
1362 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1363 	i915_gem_object_unlock(obj);
1364 	if (err) {
1365 		pr_err("Failed to unbind object!\n");
1366 		goto out_unmap;
1367 	}
1368 
1369 	if (type != I915_MMAP_TYPE_GTT) {
1370 		i915_gem_object_lock(obj, NULL);
1371 		__i915_gem_object_put_pages(obj);
1372 		i915_gem_object_unlock(obj);
1373 		if (i915_gem_object_has_pages(obj)) {
1374 			pr_err("Failed to put-pages object!\n");
1375 			err = -EINVAL;
1376 			goto out_unmap;
1377 		}
1378 	}
1379 
1380 	err = check_absent(addr, obj->base.size);
1381 	if (err) {
1382 		pr_err("%s: was not absent\n", obj->mm.region->name);
1383 		goto out_unmap;
1384 	}
1385 
1386 out_unmap:
1387 	vm_munmap(addr, obj->base.size);
1388 	return err;
1389 }
1390 
1391 static int igt_mmap_revoke(void *arg)
1392 {
1393 	struct drm_i915_private *i915 = arg;
1394 	struct intel_memory_region *mr;
1395 	enum intel_region_id id;
1396 
1397 	for_each_memory_region(mr, i915, id) {
1398 		struct drm_i915_gem_object *obj;
1399 		int err;
1400 
1401 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1402 		if (obj == ERR_PTR(-ENODEV))
1403 			continue;
1404 
1405 		if (IS_ERR(obj))
1406 			return PTR_ERR(obj);
1407 
1408 		err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1409 		if (err == 0)
1410 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1411 		if (err == 0)
1412 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
1413 
1414 		i915_gem_object_put(obj);
1415 		if (err)
1416 			return err;
1417 	}
1418 
1419 	return 0;
1420 }
1421 
1422 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1423 {
1424 	static const struct i915_subtest tests[] = {
1425 		SUBTEST(igt_partial_tiling),
1426 		SUBTEST(igt_smoke_tiling),
1427 		SUBTEST(igt_mmap_offset_exhaustion),
1428 		SUBTEST(igt_mmap),
1429 		SUBTEST(igt_mmap_access),
1430 		SUBTEST(igt_mmap_revoke),
1431 		SUBTEST(igt_mmap_gpu),
1432 	};
1433 
1434 	return i915_subtests(tests, i915);
1435 }
1436