1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_gpu_commands.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_pm.h"
13 #include "gem/i915_gem_region.h"
14 #include "huge_gem_object.h"
15 #include "i915_selftest.h"
16 #include "selftests/i915_random.h"
17 #include "selftests/igt_flush_test.h"
18 #include "selftests/igt_mmap.h"
19 
20 struct tile {
21 	unsigned int width;
22 	unsigned int height;
23 	unsigned int stride;
24 	unsigned int size;
25 	unsigned int tiling;
26 	unsigned int swizzle;
27 };
28 
29 static u64 swizzle_bit(unsigned int bit, u64 offset)
30 {
31 	return (offset & BIT_ULL(bit)) >> (bit - 6);
32 }
33 
34 static u64 tiled_offset(const struct tile *tile, u64 v)
35 {
36 	u64 x, y;
37 
38 	if (tile->tiling == I915_TILING_NONE)
39 		return v;
40 
41 	y = div64_u64_rem(v, tile->stride, &x);
42 	v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
43 
44 	if (tile->tiling == I915_TILING_X) {
45 		v += y * tile->width;
46 		v += div64_u64_rem(x, tile->width, &x) << tile->size;
47 		v += x;
48 	} else if (tile->width == 128) {
49 		const unsigned int ytile_span = 16;
50 		const unsigned int ytile_height = 512;
51 
52 		v += y * ytile_span;
53 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
54 		v += x;
55 	} else {
56 		const unsigned int ytile_span = 32;
57 		const unsigned int ytile_height = 256;
58 
59 		v += y * ytile_span;
60 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
61 		v += x;
62 	}
63 
64 	switch (tile->swizzle) {
65 	case I915_BIT_6_SWIZZLE_9:
66 		v ^= swizzle_bit(9, v);
67 		break;
68 	case I915_BIT_6_SWIZZLE_9_10:
69 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
70 		break;
71 	case I915_BIT_6_SWIZZLE_9_11:
72 		v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
73 		break;
74 	case I915_BIT_6_SWIZZLE_9_10_11:
75 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
76 		break;
77 	}
78 
79 	return v;
80 }
81 
82 static int check_partial_mapping(struct drm_i915_gem_object *obj,
83 				 const struct tile *tile,
84 				 struct rnd_state *prng)
85 {
86 	const unsigned long npages = obj->base.size / PAGE_SIZE;
87 	struct i915_ggtt_view view;
88 	struct i915_vma *vma;
89 	unsigned long page;
90 	u32 __iomem *io;
91 	struct page *p;
92 	unsigned int n;
93 	u64 offset;
94 	u32 *cpu;
95 	int err;
96 
97 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
98 	if (err) {
99 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
100 		       tile->tiling, tile->stride, err);
101 		return err;
102 	}
103 
104 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
105 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
106 
107 	i915_gem_object_lock(obj, NULL);
108 	err = i915_gem_object_set_to_gtt_domain(obj, true);
109 	i915_gem_object_unlock(obj);
110 	if (err) {
111 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
112 		return err;
113 	}
114 
115 	page = i915_prandom_u32_max_state(npages, prng);
116 	view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
117 
118 	vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
119 	if (IS_ERR(vma)) {
120 		pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
121 		       page, (int)PTR_ERR(vma));
122 		return PTR_ERR(vma);
123 	}
124 
125 	n = page - view.partial.offset;
126 	GEM_BUG_ON(n >= view.partial.size);
127 
128 	io = i915_vma_pin_iomap(vma);
129 	i915_vma_unpin(vma);
130 	if (IS_ERR(io)) {
131 		pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
132 		       page, (int)PTR_ERR(io));
133 		err = PTR_ERR(io);
134 		goto out;
135 	}
136 
137 	iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
138 	i915_vma_unpin_iomap(vma);
139 
140 	offset = tiled_offset(tile, page << PAGE_SHIFT);
141 	if (offset >= obj->base.size)
142 		goto out;
143 
144 	intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
145 
146 	p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
147 	cpu = kmap(p) + offset_in_page(offset);
148 	drm_clflush_virt_range(cpu, sizeof(*cpu));
149 	if (*cpu != (u32)page) {
150 		pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
151 		       page, n,
152 		       view.partial.offset,
153 		       view.partial.size,
154 		       vma->size >> PAGE_SHIFT,
155 		       tile->tiling ? tile_row_pages(obj) : 0,
156 		       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
157 		       offset >> PAGE_SHIFT,
158 		       (unsigned int)offset_in_page(offset),
159 		       offset,
160 		       (u32)page, *cpu);
161 		err = -EINVAL;
162 	}
163 	*cpu = 0;
164 	drm_clflush_virt_range(cpu, sizeof(*cpu));
165 	kunmap(p);
166 
167 out:
168 	__i915_vma_put(vma);
169 	return err;
170 }
171 
172 static int check_partial_mappings(struct drm_i915_gem_object *obj,
173 				  const struct tile *tile,
174 				  unsigned long end_time)
175 {
176 	const unsigned int nreal = obj->scratch / PAGE_SIZE;
177 	const unsigned long npages = obj->base.size / PAGE_SIZE;
178 	struct i915_vma *vma;
179 	unsigned long page;
180 	int err;
181 
182 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
183 	if (err) {
184 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
185 		       tile->tiling, tile->stride, err);
186 		return err;
187 	}
188 
189 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
190 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
191 
192 	i915_gem_object_lock(obj, NULL);
193 	err = i915_gem_object_set_to_gtt_domain(obj, true);
194 	i915_gem_object_unlock(obj);
195 	if (err) {
196 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
197 		return err;
198 	}
199 
200 	for_each_prime_number_from(page, 1, npages) {
201 		struct i915_ggtt_view view =
202 			compute_partial_view(obj, page, MIN_CHUNK_PAGES);
203 		u32 __iomem *io;
204 		struct page *p;
205 		unsigned int n;
206 		u64 offset;
207 		u32 *cpu;
208 
209 		GEM_BUG_ON(view.partial.size > nreal);
210 		cond_resched();
211 
212 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
213 		if (IS_ERR(vma)) {
214 			pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
215 			       page, (int)PTR_ERR(vma));
216 			return PTR_ERR(vma);
217 		}
218 
219 		n = page - view.partial.offset;
220 		GEM_BUG_ON(n >= view.partial.size);
221 
222 		io = i915_vma_pin_iomap(vma);
223 		i915_vma_unpin(vma);
224 		if (IS_ERR(io)) {
225 			pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
226 			       page, (int)PTR_ERR(io));
227 			return PTR_ERR(io);
228 		}
229 
230 		iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
231 		i915_vma_unpin_iomap(vma);
232 
233 		offset = tiled_offset(tile, page << PAGE_SHIFT);
234 		if (offset >= obj->base.size)
235 			continue;
236 
237 		intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
238 
239 		p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
240 		cpu = kmap(p) + offset_in_page(offset);
241 		drm_clflush_virt_range(cpu, sizeof(*cpu));
242 		if (*cpu != (u32)page) {
243 			pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
244 			       page, n,
245 			       view.partial.offset,
246 			       view.partial.size,
247 			       vma->size >> PAGE_SHIFT,
248 			       tile->tiling ? tile_row_pages(obj) : 0,
249 			       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
250 			       offset >> PAGE_SHIFT,
251 			       (unsigned int)offset_in_page(offset),
252 			       offset,
253 			       (u32)page, *cpu);
254 			err = -EINVAL;
255 		}
256 		*cpu = 0;
257 		drm_clflush_virt_range(cpu, sizeof(*cpu));
258 		kunmap(p);
259 		if (err)
260 			return err;
261 
262 		__i915_vma_put(vma);
263 
264 		if (igt_timeout(end_time,
265 				"%s: timed out after tiling=%d stride=%d\n",
266 				__func__, tile->tiling, tile->stride))
267 			return -EINTR;
268 	}
269 
270 	return 0;
271 }
272 
273 static unsigned int
274 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
275 {
276 	if (INTEL_GEN(i915) <= 2) {
277 		tile->height = 16;
278 		tile->width = 128;
279 		tile->size = 11;
280 	} else if (tile->tiling == I915_TILING_Y &&
281 		   HAS_128_BYTE_Y_TILING(i915)) {
282 		tile->height = 32;
283 		tile->width = 128;
284 		tile->size = 12;
285 	} else {
286 		tile->height = 8;
287 		tile->width = 512;
288 		tile->size = 12;
289 	}
290 
291 	if (INTEL_GEN(i915) < 4)
292 		return 8192 / tile->width;
293 	else if (INTEL_GEN(i915) < 7)
294 		return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
295 	else
296 		return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
297 }
298 
299 static int igt_partial_tiling(void *arg)
300 {
301 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
302 	struct drm_i915_private *i915 = arg;
303 	struct drm_i915_gem_object *obj;
304 	intel_wakeref_t wakeref;
305 	int tiling;
306 	int err;
307 
308 	if (!i915_ggtt_has_aperture(&i915->ggtt))
309 		return 0;
310 
311 	/* We want to check the page mapping and fencing of a large object
312 	 * mmapped through the GTT. The object we create is larger than can
313 	 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
314 	 * We then check that a write through each partial GGTT vma ends up
315 	 * in the right set of pages within the object, and with the expected
316 	 * tiling, which we verify by manual swizzling.
317 	 */
318 
319 	obj = huge_gem_object(i915,
320 			      nreal << PAGE_SHIFT,
321 			      (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
322 	if (IS_ERR(obj))
323 		return PTR_ERR(obj);
324 
325 	err = i915_gem_object_pin_pages_unlocked(obj);
326 	if (err) {
327 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
328 		       nreal, obj->base.size / PAGE_SIZE, err);
329 		goto out;
330 	}
331 
332 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
333 
334 	if (1) {
335 		IGT_TIMEOUT(end);
336 		struct tile tile;
337 
338 		tile.height = 1;
339 		tile.width = 1;
340 		tile.size = 0;
341 		tile.stride = 0;
342 		tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
343 		tile.tiling = I915_TILING_NONE;
344 
345 		err = check_partial_mappings(obj, &tile, end);
346 		if (err && err != -EINTR)
347 			goto out_unlock;
348 	}
349 
350 	for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
351 		IGT_TIMEOUT(end);
352 		unsigned int max_pitch;
353 		unsigned int pitch;
354 		struct tile tile;
355 
356 		if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
357 			/*
358 			 * The swizzling pattern is actually unknown as it
359 			 * varies based on physical address of each page.
360 			 * See i915_gem_detect_bit_6_swizzle().
361 			 */
362 			break;
363 
364 		tile.tiling = tiling;
365 		switch (tiling) {
366 		case I915_TILING_X:
367 			tile.swizzle = i915->ggtt.bit_6_swizzle_x;
368 			break;
369 		case I915_TILING_Y:
370 			tile.swizzle = i915->ggtt.bit_6_swizzle_y;
371 			break;
372 		}
373 
374 		GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
375 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
376 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
377 			continue;
378 
379 		max_pitch = setup_tile_size(&tile, i915);
380 
381 		for (pitch = max_pitch; pitch; pitch >>= 1) {
382 			tile.stride = tile.width * pitch;
383 			err = check_partial_mappings(obj, &tile, end);
384 			if (err == -EINTR)
385 				goto next_tiling;
386 			if (err)
387 				goto out_unlock;
388 
389 			if (pitch > 2 && INTEL_GEN(i915) >= 4) {
390 				tile.stride = tile.width * (pitch - 1);
391 				err = check_partial_mappings(obj, &tile, end);
392 				if (err == -EINTR)
393 					goto next_tiling;
394 				if (err)
395 					goto out_unlock;
396 			}
397 
398 			if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
399 				tile.stride = tile.width * (pitch + 1);
400 				err = check_partial_mappings(obj, &tile, end);
401 				if (err == -EINTR)
402 					goto next_tiling;
403 				if (err)
404 					goto out_unlock;
405 			}
406 		}
407 
408 		if (INTEL_GEN(i915) >= 4) {
409 			for_each_prime_number(pitch, max_pitch) {
410 				tile.stride = tile.width * pitch;
411 				err = check_partial_mappings(obj, &tile, end);
412 				if (err == -EINTR)
413 					goto next_tiling;
414 				if (err)
415 					goto out_unlock;
416 			}
417 		}
418 
419 next_tiling: ;
420 	}
421 
422 out_unlock:
423 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
424 	i915_gem_object_unpin_pages(obj);
425 out:
426 	i915_gem_object_put(obj);
427 	return err;
428 }
429 
430 static int igt_smoke_tiling(void *arg)
431 {
432 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
433 	struct drm_i915_private *i915 = arg;
434 	struct drm_i915_gem_object *obj;
435 	intel_wakeref_t wakeref;
436 	I915_RND_STATE(prng);
437 	unsigned long count;
438 	IGT_TIMEOUT(end);
439 	int err;
440 
441 	if (!i915_ggtt_has_aperture(&i915->ggtt))
442 		return 0;
443 
444 	/*
445 	 * igt_partial_tiling() does an exhastive check of partial tiling
446 	 * chunking, but will undoubtably run out of time. Here, we do a
447 	 * randomised search and hope over many runs of 1s with different
448 	 * seeds we will do a thorough check.
449 	 *
450 	 * Remember to look at the st_seed if we see a flip-flop in BAT!
451 	 */
452 
453 	if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
454 		return 0;
455 
456 	obj = huge_gem_object(i915,
457 			      nreal << PAGE_SHIFT,
458 			      (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
459 	if (IS_ERR(obj))
460 		return PTR_ERR(obj);
461 
462 	err = i915_gem_object_pin_pages_unlocked(obj);
463 	if (err) {
464 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
465 		       nreal, obj->base.size / PAGE_SIZE, err);
466 		goto out;
467 	}
468 
469 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
470 
471 	count = 0;
472 	do {
473 		struct tile tile;
474 
475 		tile.tiling =
476 			i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
477 		switch (tile.tiling) {
478 		case I915_TILING_NONE:
479 			tile.height = 1;
480 			tile.width = 1;
481 			tile.size = 0;
482 			tile.stride = 0;
483 			tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
484 			break;
485 
486 		case I915_TILING_X:
487 			tile.swizzle = i915->ggtt.bit_6_swizzle_x;
488 			break;
489 		case I915_TILING_Y:
490 			tile.swizzle = i915->ggtt.bit_6_swizzle_y;
491 			break;
492 		}
493 
494 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
495 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
496 			continue;
497 
498 		if (tile.tiling != I915_TILING_NONE) {
499 			unsigned int max_pitch = setup_tile_size(&tile, i915);
500 
501 			tile.stride =
502 				i915_prandom_u32_max_state(max_pitch, &prng);
503 			tile.stride = (1 + tile.stride) * tile.width;
504 			if (INTEL_GEN(i915) < 4)
505 				tile.stride = rounddown_pow_of_two(tile.stride);
506 		}
507 
508 		err = check_partial_mapping(obj, &tile, &prng);
509 		if (err)
510 			break;
511 
512 		count++;
513 	} while (!__igt_timeout(end, NULL));
514 
515 	pr_info("%s: Completed %lu trials\n", __func__, count);
516 
517 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
518 	i915_gem_object_unpin_pages(obj);
519 out:
520 	i915_gem_object_put(obj);
521 	return err;
522 }
523 
524 static int make_obj_busy(struct drm_i915_gem_object *obj)
525 {
526 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
527 	struct intel_engine_cs *engine;
528 
529 	for_each_uabi_engine(engine, i915) {
530 		struct i915_request *rq;
531 		struct i915_vma *vma;
532 		struct i915_gem_ww_ctx ww;
533 		int err;
534 
535 		vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
536 		if (IS_ERR(vma))
537 			return PTR_ERR(vma);
538 
539 		i915_gem_ww_ctx_init(&ww, false);
540 retry:
541 		err = i915_gem_object_lock(obj, &ww);
542 		if (!err)
543 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
544 		if (err)
545 			goto err;
546 
547 		rq = intel_engine_create_kernel_request(engine);
548 		if (IS_ERR(rq)) {
549 			err = PTR_ERR(rq);
550 			goto err_unpin;
551 		}
552 
553 		err = i915_request_await_object(rq, vma->obj, true);
554 		if (err == 0)
555 			err = i915_vma_move_to_active(vma, rq,
556 						      EXEC_OBJECT_WRITE);
557 
558 		i915_request_add(rq);
559 err_unpin:
560 		i915_vma_unpin(vma);
561 err:
562 		if (err == -EDEADLK) {
563 			err = i915_gem_ww_ctx_backoff(&ww);
564 			if (!err)
565 				goto retry;
566 		}
567 		i915_gem_ww_ctx_fini(&ww);
568 		if (err)
569 			return err;
570 	}
571 
572 	i915_gem_object_put(obj); /* leave it only alive via its active ref */
573 	return 0;
574 }
575 
576 static bool assert_mmap_offset(struct drm_i915_private *i915,
577 			       unsigned long size,
578 			       int expected)
579 {
580 	struct drm_i915_gem_object *obj;
581 	struct i915_mmap_offset *mmo;
582 
583 	obj = i915_gem_object_create_internal(i915, size);
584 	if (IS_ERR(obj))
585 		return false;
586 
587 	mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
588 	i915_gem_object_put(obj);
589 
590 	return PTR_ERR_OR_ZERO(mmo) == expected;
591 }
592 
593 static void disable_retire_worker(struct drm_i915_private *i915)
594 {
595 	i915_gem_driver_unregister__shrinker(i915);
596 	intel_gt_pm_get(&i915->gt);
597 	cancel_delayed_work_sync(&i915->gt.requests.retire_work);
598 }
599 
600 static void restore_retire_worker(struct drm_i915_private *i915)
601 {
602 	igt_flush_test(i915);
603 	intel_gt_pm_put(&i915->gt);
604 	i915_gem_driver_register__shrinker(i915);
605 }
606 
607 static void mmap_offset_lock(struct drm_i915_private *i915)
608 	__acquires(&i915->drm.vma_offset_manager->vm_lock)
609 {
610 	write_lock(&i915->drm.vma_offset_manager->vm_lock);
611 }
612 
613 static void mmap_offset_unlock(struct drm_i915_private *i915)
614 	__releases(&i915->drm.vma_offset_manager->vm_lock)
615 {
616 	write_unlock(&i915->drm.vma_offset_manager->vm_lock);
617 }
618 
619 static int igt_mmap_offset_exhaustion(void *arg)
620 {
621 	struct drm_i915_private *i915 = arg;
622 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
623 	struct drm_i915_gem_object *obj;
624 	struct drm_mm_node *hole, *next;
625 	struct i915_mmap_offset *mmo;
626 	int loop, err = 0;
627 
628 	/* Disable background reaper */
629 	disable_retire_worker(i915);
630 	GEM_BUG_ON(!i915->gt.awake);
631 	intel_gt_retire_requests(&i915->gt);
632 	i915_gem_drain_freed_objects(i915);
633 
634 	/* Trim the device mmap space to only a page */
635 	mmap_offset_lock(i915);
636 	loop = 1; /* PAGE_SIZE units */
637 	list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
638 		struct drm_mm_node *resv;
639 
640 		resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
641 		if (!resv) {
642 			err = -ENOMEM;
643 			goto out_park;
644 		}
645 
646 		resv->start = drm_mm_hole_node_start(hole) + loop;
647 		resv->size = hole->hole_size - loop;
648 		resv->color = -1ul;
649 		loop = 0;
650 
651 		if (!resv->size) {
652 			kfree(resv);
653 			continue;
654 		}
655 
656 		pr_debug("Reserving hole [%llx + %llx]\n",
657 			 resv->start, resv->size);
658 
659 		err = drm_mm_reserve_node(mm, resv);
660 		if (err) {
661 			pr_err("Failed to trim VMA manager, err=%d\n", err);
662 			kfree(resv);
663 			goto out_park;
664 		}
665 	}
666 	GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
667 	mmap_offset_unlock(i915);
668 
669 	/* Just fits! */
670 	if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
671 		pr_err("Unable to insert object into single page hole\n");
672 		err = -EINVAL;
673 		goto out;
674 	}
675 
676 	/* Too large */
677 	if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
678 		pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
679 		err = -EINVAL;
680 		goto out;
681 	}
682 
683 	/* Fill the hole, further allocation attempts should then fail */
684 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
685 	if (IS_ERR(obj)) {
686 		err = PTR_ERR(obj);
687 		goto out;
688 	}
689 
690 	mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
691 	if (IS_ERR(mmo)) {
692 		pr_err("Unable to insert object into reclaimed hole\n");
693 		err = PTR_ERR(mmo);
694 		goto err_obj;
695 	}
696 
697 	if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
698 		pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
699 		err = -EINVAL;
700 		goto err_obj;
701 	}
702 
703 	i915_gem_object_put(obj);
704 
705 	/* Now fill with busy dead objects that we expect to reap */
706 	for (loop = 0; loop < 3; loop++) {
707 		if (intel_gt_is_wedged(&i915->gt))
708 			break;
709 
710 		obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
711 		if (IS_ERR(obj)) {
712 			err = PTR_ERR(obj);
713 			goto out;
714 		}
715 
716 		err = make_obj_busy(obj);
717 		if (err) {
718 			pr_err("[loop %d] Failed to busy the object\n", loop);
719 			goto err_obj;
720 		}
721 	}
722 
723 out:
724 	mmap_offset_lock(i915);
725 out_park:
726 	drm_mm_for_each_node_safe(hole, next, mm) {
727 		if (hole->color != -1ul)
728 			continue;
729 
730 		drm_mm_remove_node(hole);
731 		kfree(hole);
732 	}
733 	mmap_offset_unlock(i915);
734 	restore_retire_worker(i915);
735 	return err;
736 err_obj:
737 	i915_gem_object_put(obj);
738 	goto out;
739 }
740 
741 static int gtt_set(struct drm_i915_gem_object *obj)
742 {
743 	struct i915_vma *vma;
744 	void __iomem *map;
745 	int err = 0;
746 
747 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
748 	if (IS_ERR(vma))
749 		return PTR_ERR(vma);
750 
751 	intel_gt_pm_get(vma->vm->gt);
752 	map = i915_vma_pin_iomap(vma);
753 	i915_vma_unpin(vma);
754 	if (IS_ERR(map)) {
755 		err = PTR_ERR(map);
756 		goto out;
757 	}
758 
759 	memset_io(map, POISON_INUSE, obj->base.size);
760 	i915_vma_unpin_iomap(vma);
761 
762 out:
763 	intel_gt_pm_put(vma->vm->gt);
764 	return err;
765 }
766 
767 static int gtt_check(struct drm_i915_gem_object *obj)
768 {
769 	struct i915_vma *vma;
770 	void __iomem *map;
771 	int err = 0;
772 
773 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
774 	if (IS_ERR(vma))
775 		return PTR_ERR(vma);
776 
777 	intel_gt_pm_get(vma->vm->gt);
778 	map = i915_vma_pin_iomap(vma);
779 	i915_vma_unpin(vma);
780 	if (IS_ERR(map)) {
781 		err = PTR_ERR(map);
782 		goto out;
783 	}
784 
785 	if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
786 		pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
787 		       obj->mm.region->name);
788 		err = -EINVAL;
789 	}
790 	i915_vma_unpin_iomap(vma);
791 
792 out:
793 	intel_gt_pm_put(vma->vm->gt);
794 	return err;
795 }
796 
797 static int wc_set(struct drm_i915_gem_object *obj)
798 {
799 	void *vaddr;
800 
801 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
802 	if (IS_ERR(vaddr))
803 		return PTR_ERR(vaddr);
804 
805 	memset(vaddr, POISON_INUSE, obj->base.size);
806 	i915_gem_object_flush_map(obj);
807 	i915_gem_object_unpin_map(obj);
808 
809 	return 0;
810 }
811 
812 static int wc_check(struct drm_i915_gem_object *obj)
813 {
814 	void *vaddr;
815 	int err = 0;
816 
817 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
818 	if (IS_ERR(vaddr))
819 		return PTR_ERR(vaddr);
820 
821 	if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
822 		pr_err("%s: Write via mmap did not land in backing store (WC)\n",
823 		       obj->mm.region->name);
824 		err = -EINVAL;
825 	}
826 	i915_gem_object_unpin_map(obj);
827 
828 	return err;
829 }
830 
831 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
832 {
833 	if (type == I915_MMAP_TYPE_GTT &&
834 	    !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
835 		return false;
836 
837 	if (type != I915_MMAP_TYPE_GTT &&
838 	    !i915_gem_object_has_struct_page(obj) &&
839 	    !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
840 		return false;
841 
842 	return true;
843 }
844 
845 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
846 static int __igt_mmap(struct drm_i915_private *i915,
847 		      struct drm_i915_gem_object *obj,
848 		      enum i915_mmap_type type)
849 {
850 	struct i915_mmap_offset *mmo;
851 	struct vm_area_struct *area;
852 	unsigned long addr;
853 	int err, i;
854 
855 	if (!can_mmap(obj, type))
856 		return 0;
857 
858 	err = wc_set(obj);
859 	if (err == -ENXIO)
860 		err = gtt_set(obj);
861 	if (err)
862 		return err;
863 
864 	mmo = mmap_offset_attach(obj, type, NULL);
865 	if (IS_ERR(mmo))
866 		return PTR_ERR(mmo);
867 
868 	addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
869 	if (IS_ERR_VALUE(addr))
870 		return addr;
871 
872 	pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
873 
874 	area = find_vma(current->mm, addr);
875 	if (!area) {
876 		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
877 		       obj->mm.region->name);
878 		err = -EINVAL;
879 		goto out_unmap;
880 	}
881 
882 	if (area->vm_private_data != mmo) {
883 		pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n",
884 		       obj->mm.region->name);
885 		err = -EINVAL;
886 		goto out_unmap;
887 	}
888 
889 	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
890 		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
891 		u32 x;
892 
893 		if (get_user(x, ux)) {
894 			pr_err("%s: Unable to read from mmap, offset:%zd\n",
895 			       obj->mm.region->name, i * sizeof(x));
896 			err = -EFAULT;
897 			goto out_unmap;
898 		}
899 
900 		if (x != expand32(POISON_INUSE)) {
901 			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
902 			       obj->mm.region->name,
903 			       i * sizeof(x), x, expand32(POISON_INUSE));
904 			err = -EINVAL;
905 			goto out_unmap;
906 		}
907 
908 		x = expand32(POISON_FREE);
909 		if (put_user(x, ux)) {
910 			pr_err("%s: Unable to write to mmap, offset:%zd\n",
911 			       obj->mm.region->name, i * sizeof(x));
912 			err = -EFAULT;
913 			goto out_unmap;
914 		}
915 	}
916 
917 	if (type == I915_MMAP_TYPE_GTT)
918 		intel_gt_flush_ggtt_writes(&i915->gt);
919 
920 	err = wc_check(obj);
921 	if (err == -ENXIO)
922 		err = gtt_check(obj);
923 out_unmap:
924 	vm_munmap(addr, obj->base.size);
925 	return err;
926 }
927 
928 static int igt_mmap(void *arg)
929 {
930 	struct drm_i915_private *i915 = arg;
931 	struct intel_memory_region *mr;
932 	enum intel_region_id id;
933 
934 	for_each_memory_region(mr, i915, id) {
935 		unsigned long sizes[] = {
936 			PAGE_SIZE,
937 			mr->min_page_size,
938 			SZ_4M,
939 		};
940 		int i;
941 
942 		for (i = 0; i < ARRAY_SIZE(sizes); i++) {
943 			struct drm_i915_gem_object *obj;
944 			int err;
945 
946 			obj = i915_gem_object_create_region(mr, sizes[i], 0);
947 			if (obj == ERR_PTR(-ENODEV))
948 				continue;
949 
950 			if (IS_ERR(obj))
951 				return PTR_ERR(obj);
952 
953 			err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
954 			if (err == 0)
955 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
956 
957 			i915_gem_object_put(obj);
958 			if (err)
959 				return err;
960 		}
961 	}
962 
963 	return 0;
964 }
965 
966 static const char *repr_mmap_type(enum i915_mmap_type type)
967 {
968 	switch (type) {
969 	case I915_MMAP_TYPE_GTT: return "gtt";
970 	case I915_MMAP_TYPE_WB: return "wb";
971 	case I915_MMAP_TYPE_WC: return "wc";
972 	case I915_MMAP_TYPE_UC: return "uc";
973 	default: return "unknown";
974 	}
975 }
976 
977 static bool can_access(const struct drm_i915_gem_object *obj)
978 {
979 	return i915_gem_object_has_struct_page(obj) ||
980 	       i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
981 }
982 
983 static int __igt_mmap_access(struct drm_i915_private *i915,
984 			     struct drm_i915_gem_object *obj,
985 			     enum i915_mmap_type type)
986 {
987 	struct i915_mmap_offset *mmo;
988 	unsigned long __user *ptr;
989 	unsigned long A, B;
990 	unsigned long x, y;
991 	unsigned long addr;
992 	int err;
993 
994 	memset(&A, 0xAA, sizeof(A));
995 	memset(&B, 0xBB, sizeof(B));
996 
997 	if (!can_mmap(obj, type) || !can_access(obj))
998 		return 0;
999 
1000 	mmo = mmap_offset_attach(obj, type, NULL);
1001 	if (IS_ERR(mmo))
1002 		return PTR_ERR(mmo);
1003 
1004 	addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
1005 	if (IS_ERR_VALUE(addr))
1006 		return addr;
1007 	ptr = (unsigned long __user *)addr;
1008 
1009 	err = __put_user(A, ptr);
1010 	if (err) {
1011 		pr_err("%s(%s): failed to write into user mmap\n",
1012 		       obj->mm.region->name, repr_mmap_type(type));
1013 		goto out_unmap;
1014 	}
1015 
1016 	intel_gt_flush_ggtt_writes(&i915->gt);
1017 
1018 	err = access_process_vm(current, addr, &x, sizeof(x), 0);
1019 	if (err != sizeof(x)) {
1020 		pr_err("%s(%s): access_process_vm() read failed\n",
1021 		       obj->mm.region->name, repr_mmap_type(type));
1022 		goto out_unmap;
1023 	}
1024 
1025 	err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1026 	if (err != sizeof(B)) {
1027 		pr_err("%s(%s): access_process_vm() write failed\n",
1028 		       obj->mm.region->name, repr_mmap_type(type));
1029 		goto out_unmap;
1030 	}
1031 
1032 	intel_gt_flush_ggtt_writes(&i915->gt);
1033 
1034 	err = __get_user(y, ptr);
1035 	if (err) {
1036 		pr_err("%s(%s): failed to read from user mmap\n",
1037 		       obj->mm.region->name, repr_mmap_type(type));
1038 		goto out_unmap;
1039 	}
1040 
1041 	if (x != A || y != B) {
1042 		pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1043 		       obj->mm.region->name, repr_mmap_type(type),
1044 		       x, y);
1045 		err = -EINVAL;
1046 		goto out_unmap;
1047 	}
1048 
1049 out_unmap:
1050 	vm_munmap(addr, obj->base.size);
1051 	return err;
1052 }
1053 
1054 static int igt_mmap_access(void *arg)
1055 {
1056 	struct drm_i915_private *i915 = arg;
1057 	struct intel_memory_region *mr;
1058 	enum intel_region_id id;
1059 
1060 	for_each_memory_region(mr, i915, id) {
1061 		struct drm_i915_gem_object *obj;
1062 		int err;
1063 
1064 		obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
1065 		if (obj == ERR_PTR(-ENODEV))
1066 			continue;
1067 
1068 		if (IS_ERR(obj))
1069 			return PTR_ERR(obj);
1070 
1071 		err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1072 		if (err == 0)
1073 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1074 		if (err == 0)
1075 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1076 		if (err == 0)
1077 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1078 
1079 		i915_gem_object_put(obj);
1080 		if (err)
1081 			return err;
1082 	}
1083 
1084 	return 0;
1085 }
1086 
1087 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1088 			  struct drm_i915_gem_object *obj,
1089 			  enum i915_mmap_type type)
1090 {
1091 	struct intel_engine_cs *engine;
1092 	struct i915_mmap_offset *mmo;
1093 	unsigned long addr;
1094 	u32 __user *ux;
1095 	u32 bbe;
1096 	int err;
1097 
1098 	/*
1099 	 * Verify that the mmap access into the backing store aligns with
1100 	 * that of the GPU, i.e. that mmap is indeed writing into the same
1101 	 * page as being read by the GPU.
1102 	 */
1103 
1104 	if (!can_mmap(obj, type))
1105 		return 0;
1106 
1107 	err = wc_set(obj);
1108 	if (err == -ENXIO)
1109 		err = gtt_set(obj);
1110 	if (err)
1111 		return err;
1112 
1113 	mmo = mmap_offset_attach(obj, type, NULL);
1114 	if (IS_ERR(mmo))
1115 		return PTR_ERR(mmo);
1116 
1117 	addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
1118 	if (IS_ERR_VALUE(addr))
1119 		return addr;
1120 
1121 	ux = u64_to_user_ptr((u64)addr);
1122 	bbe = MI_BATCH_BUFFER_END;
1123 	if (put_user(bbe, ux)) {
1124 		pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1125 		err = -EFAULT;
1126 		goto out_unmap;
1127 	}
1128 
1129 	if (type == I915_MMAP_TYPE_GTT)
1130 		intel_gt_flush_ggtt_writes(&i915->gt);
1131 
1132 	for_each_uabi_engine(engine, i915) {
1133 		struct i915_request *rq;
1134 		struct i915_vma *vma;
1135 		struct i915_gem_ww_ctx ww;
1136 
1137 		vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1138 		if (IS_ERR(vma)) {
1139 			err = PTR_ERR(vma);
1140 			goto out_unmap;
1141 		}
1142 
1143 		i915_gem_ww_ctx_init(&ww, false);
1144 retry:
1145 		err = i915_gem_object_lock(obj, &ww);
1146 		if (!err)
1147 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
1148 		if (err)
1149 			goto out_ww;
1150 
1151 		rq = i915_request_create(engine->kernel_context);
1152 		if (IS_ERR(rq)) {
1153 			err = PTR_ERR(rq);
1154 			goto out_unpin;
1155 		}
1156 
1157 		err = i915_request_await_object(rq, vma->obj, false);
1158 		if (err == 0)
1159 			err = i915_vma_move_to_active(vma, rq, 0);
1160 
1161 		err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1162 		i915_request_get(rq);
1163 		i915_request_add(rq);
1164 
1165 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1166 			struct drm_printer p =
1167 				drm_info_printer(engine->i915->drm.dev);
1168 
1169 			pr_err("%s(%s, %s): Failed to execute batch\n",
1170 			       __func__, engine->name, obj->mm.region->name);
1171 			intel_engine_dump(engine, &p,
1172 					  "%s\n", engine->name);
1173 
1174 			intel_gt_set_wedged(engine->gt);
1175 			err = -EIO;
1176 		}
1177 		i915_request_put(rq);
1178 
1179 out_unpin:
1180 		i915_vma_unpin(vma);
1181 out_ww:
1182 		if (err == -EDEADLK) {
1183 			err = i915_gem_ww_ctx_backoff(&ww);
1184 			if (!err)
1185 				goto retry;
1186 		}
1187 		i915_gem_ww_ctx_fini(&ww);
1188 		if (err)
1189 			goto out_unmap;
1190 	}
1191 
1192 out_unmap:
1193 	vm_munmap(addr, obj->base.size);
1194 	return err;
1195 }
1196 
1197 static int igt_mmap_gpu(void *arg)
1198 {
1199 	struct drm_i915_private *i915 = arg;
1200 	struct intel_memory_region *mr;
1201 	enum intel_region_id id;
1202 
1203 	for_each_memory_region(mr, i915, id) {
1204 		struct drm_i915_gem_object *obj;
1205 		int err;
1206 
1207 		obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
1208 		if (obj == ERR_PTR(-ENODEV))
1209 			continue;
1210 
1211 		if (IS_ERR(obj))
1212 			return PTR_ERR(obj);
1213 
1214 		err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1215 		if (err == 0)
1216 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1217 
1218 		i915_gem_object_put(obj);
1219 		if (err)
1220 			return err;
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1227 {
1228 	if (!pte_present(*pte) || pte_none(*pte)) {
1229 		pr_err("missing PTE:%lx\n",
1230 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1231 		return -EINVAL;
1232 	}
1233 
1234 	return 0;
1235 }
1236 
1237 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1238 {
1239 	if (pte_present(*pte) && !pte_none(*pte)) {
1240 		pr_err("present PTE:%lx; expected to be revoked\n",
1241 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1242 		return -EINVAL;
1243 	}
1244 
1245 	return 0;
1246 }
1247 
1248 static int check_present(unsigned long addr, unsigned long len)
1249 {
1250 	return apply_to_page_range(current->mm, addr, len,
1251 				   check_present_pte, (void *)addr);
1252 }
1253 
1254 static int check_absent(unsigned long addr, unsigned long len)
1255 {
1256 	return apply_to_page_range(current->mm, addr, len,
1257 				   check_absent_pte, (void *)addr);
1258 }
1259 
1260 static int prefault_range(u64 start, u64 len)
1261 {
1262 	const char __user *addr, *end;
1263 	char __maybe_unused c;
1264 	int err;
1265 
1266 	addr = u64_to_user_ptr(start);
1267 	end = addr + len;
1268 
1269 	for (; addr < end; addr += PAGE_SIZE) {
1270 		err = __get_user(c, addr);
1271 		if (err)
1272 			return err;
1273 	}
1274 
1275 	return __get_user(c, end - 1);
1276 }
1277 
1278 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1279 			     struct drm_i915_gem_object *obj,
1280 			     enum i915_mmap_type type)
1281 {
1282 	struct i915_mmap_offset *mmo;
1283 	unsigned long addr;
1284 	int err;
1285 
1286 	if (!can_mmap(obj, type))
1287 		return 0;
1288 
1289 	mmo = mmap_offset_attach(obj, type, NULL);
1290 	if (IS_ERR(mmo))
1291 		return PTR_ERR(mmo);
1292 
1293 	addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
1294 	if (IS_ERR_VALUE(addr))
1295 		return addr;
1296 
1297 	err = prefault_range(addr, obj->base.size);
1298 	if (err)
1299 		goto out_unmap;
1300 
1301 	err = check_present(addr, obj->base.size);
1302 	if (err) {
1303 		pr_err("%s: was not present\n", obj->mm.region->name);
1304 		goto out_unmap;
1305 	}
1306 
1307 	/*
1308 	 * After unbinding the object from the GGTT, its address may be reused
1309 	 * for other objects. Ergo we have to revoke the previous mmap PTE
1310 	 * access as it no longer points to the same object.
1311 	 */
1312 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1313 	if (err) {
1314 		pr_err("Failed to unbind object!\n");
1315 		goto out_unmap;
1316 	}
1317 
1318 	if (type != I915_MMAP_TYPE_GTT) {
1319 		i915_gem_object_lock(obj, NULL);
1320 		__i915_gem_object_put_pages(obj);
1321 		i915_gem_object_unlock(obj);
1322 		if (i915_gem_object_has_pages(obj)) {
1323 			pr_err("Failed to put-pages object!\n");
1324 			err = -EINVAL;
1325 			goto out_unmap;
1326 		}
1327 	}
1328 
1329 	err = check_absent(addr, obj->base.size);
1330 	if (err) {
1331 		pr_err("%s: was not absent\n", obj->mm.region->name);
1332 		goto out_unmap;
1333 	}
1334 
1335 out_unmap:
1336 	vm_munmap(addr, obj->base.size);
1337 	return err;
1338 }
1339 
1340 static int igt_mmap_revoke(void *arg)
1341 {
1342 	struct drm_i915_private *i915 = arg;
1343 	struct intel_memory_region *mr;
1344 	enum intel_region_id id;
1345 
1346 	for_each_memory_region(mr, i915, id) {
1347 		struct drm_i915_gem_object *obj;
1348 		int err;
1349 
1350 		obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
1351 		if (obj == ERR_PTR(-ENODEV))
1352 			continue;
1353 
1354 		if (IS_ERR(obj))
1355 			return PTR_ERR(obj);
1356 
1357 		err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1358 		if (err == 0)
1359 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1360 
1361 		i915_gem_object_put(obj);
1362 		if (err)
1363 			return err;
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1370 {
1371 	static const struct i915_subtest tests[] = {
1372 		SUBTEST(igt_partial_tiling),
1373 		SUBTEST(igt_smoke_tiling),
1374 		SUBTEST(igt_mmap_offset_exhaustion),
1375 		SUBTEST(igt_mmap),
1376 		SUBTEST(igt_mmap_access),
1377 		SUBTEST(igt_mmap_revoke),
1378 		SUBTEST(igt_mmap_gpu),
1379 	};
1380 
1381 	return i915_subtests(tests, i915);
1382 }
1383