1b414fcd5SChris Wilson /*
2b414fcd5SChris Wilson  * SPDX-License-Identifier: MIT
3b414fcd5SChris Wilson  *
4b414fcd5SChris Wilson  * Copyright © 2016 Intel Corporation
5b414fcd5SChris Wilson  */
6b414fcd5SChris Wilson 
7e9b67ec2SJani Nikula #include <linux/highmem.h>
8b414fcd5SChris Wilson #include <linux/prime_numbers.h>
9b414fcd5SChris Wilson 
10b508d01fSJani Nikula #include "gem/i915_gem_internal.h"
11d09aa852SJani Nikula #include "gem/i915_gem_lmem.h"
12b508d01fSJani Nikula #include "gem/i915_gem_region.h"
13fb87550dSMatthew Auld #include "gem/i915_gem_ttm.h"
14bfe53be2SMatthew Auld #include "gem/i915_gem_ttm_move.h"
15de5825beSChris Wilson #include "gt/intel_engine_pm.h"
1645233ab2SChris Wilson #include "gt/intel_gpu_commands.h"
17a1c8a09eSTvrtko Ursulin #include "gt/intel_gt.h"
18b414fcd5SChris Wilson #include "gt/intel_gt_pm.h"
19fb87550dSMatthew Auld #include "gt/intel_migrate.h"
20801543b2SJani Nikula #include "i915_reg.h"
21fb87550dSMatthew Auld #include "i915_ttm_buddy_manager.h"
22b508d01fSJani Nikula 
2310be98a7SChris Wilson #include "huge_gem_object.h"
24b414fcd5SChris Wilson #include "i915_selftest.h"
2507e98eb0SChris Wilson #include "selftests/i915_random.h"
26b414fcd5SChris Wilson #include "selftests/igt_flush_test.h"
27bfe53be2SMatthew Auld #include "selftests/igt_reset.h"
286fedafacSChris Wilson #include "selftests/igt_mmap.h"
29b414fcd5SChris Wilson 
30b414fcd5SChris Wilson struct tile {
31b414fcd5SChris Wilson 	unsigned int width;
32b414fcd5SChris Wilson 	unsigned int height;
33b414fcd5SChris Wilson 	unsigned int stride;
34b414fcd5SChris Wilson 	unsigned int size;
35b414fcd5SChris Wilson 	unsigned int tiling;
36b414fcd5SChris Wilson 	unsigned int swizzle;
37b414fcd5SChris Wilson };
38b414fcd5SChris Wilson 
swizzle_bit(unsigned int bit,u64 offset)39b414fcd5SChris Wilson static u64 swizzle_bit(unsigned int bit, u64 offset)
40b414fcd5SChris Wilson {
41b414fcd5SChris Wilson 	return (offset & BIT_ULL(bit)) >> (bit - 6);
42b414fcd5SChris Wilson }
43b414fcd5SChris Wilson 
tiled_offset(const struct tile * tile,u64 v)44b414fcd5SChris Wilson static u64 tiled_offset(const struct tile *tile, u64 v)
45b414fcd5SChris Wilson {
46b414fcd5SChris Wilson 	u64 x, y;
47b414fcd5SChris Wilson 
48b414fcd5SChris Wilson 	if (tile->tiling == I915_TILING_NONE)
49b414fcd5SChris Wilson 		return v;
50b414fcd5SChris Wilson 
51b414fcd5SChris Wilson 	y = div64_u64_rem(v, tile->stride, &x);
52b414fcd5SChris Wilson 	v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
53b414fcd5SChris Wilson 
54b414fcd5SChris Wilson 	if (tile->tiling == I915_TILING_X) {
55b414fcd5SChris Wilson 		v += y * tile->width;
56b414fcd5SChris Wilson 		v += div64_u64_rem(x, tile->width, &x) << tile->size;
57b414fcd5SChris Wilson 		v += x;
58b414fcd5SChris Wilson 	} else if (tile->width == 128) {
59b414fcd5SChris Wilson 		const unsigned int ytile_span = 16;
60b414fcd5SChris Wilson 		const unsigned int ytile_height = 512;
61b414fcd5SChris Wilson 
62b414fcd5SChris Wilson 		v += y * ytile_span;
63b414fcd5SChris Wilson 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
64b414fcd5SChris Wilson 		v += x;
65b414fcd5SChris Wilson 	} else {
66b414fcd5SChris Wilson 		const unsigned int ytile_span = 32;
67b414fcd5SChris Wilson 		const unsigned int ytile_height = 256;
68b414fcd5SChris Wilson 
69b414fcd5SChris Wilson 		v += y * ytile_span;
70b414fcd5SChris Wilson 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
71b414fcd5SChris Wilson 		v += x;
72b414fcd5SChris Wilson 	}
73b414fcd5SChris Wilson 
74b414fcd5SChris Wilson 	switch (tile->swizzle) {
75b414fcd5SChris Wilson 	case I915_BIT_6_SWIZZLE_9:
76b414fcd5SChris Wilson 		v ^= swizzle_bit(9, v);
77b414fcd5SChris Wilson 		break;
78b414fcd5SChris Wilson 	case I915_BIT_6_SWIZZLE_9_10:
79b414fcd5SChris Wilson 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
80b414fcd5SChris Wilson 		break;
81b414fcd5SChris Wilson 	case I915_BIT_6_SWIZZLE_9_11:
82b414fcd5SChris Wilson 		v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
83b414fcd5SChris Wilson 		break;
84b414fcd5SChris Wilson 	case I915_BIT_6_SWIZZLE_9_10_11:
85b414fcd5SChris Wilson 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
86b414fcd5SChris Wilson 		break;
87b414fcd5SChris Wilson 	}
88b414fcd5SChris Wilson 
89b414fcd5SChris Wilson 	return v;
90b414fcd5SChris Wilson }
91b414fcd5SChris Wilson 
check_partial_mapping(struct drm_i915_gem_object * obj,const struct tile * tile,struct rnd_state * prng)92b414fcd5SChris Wilson static int check_partial_mapping(struct drm_i915_gem_object *obj,
93b414fcd5SChris Wilson 				 const struct tile *tile,
9407e98eb0SChris Wilson 				 struct rnd_state *prng)
9507e98eb0SChris Wilson {
9607e98eb0SChris Wilson 	const unsigned long npages = obj->base.size / PAGE_SIZE;
971a9c4db4SMichał Winiarski 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
983bb6a442SNiranjana Vishwanathapura 	struct i915_gtt_view view;
9907e98eb0SChris Wilson 	struct i915_vma *vma;
100f47e6306SChris Wilson 	unsigned long offset;
10107e98eb0SChris Wilson 	unsigned long page;
10207e98eb0SChris Wilson 	u32 __iomem *io;
10307e98eb0SChris Wilson 	struct page *p;
10407e98eb0SChris Wilson 	unsigned int n;
10507e98eb0SChris Wilson 	u32 *cpu;
10607e98eb0SChris Wilson 	int err;
10707e98eb0SChris Wilson 
10807e98eb0SChris Wilson 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
10907e98eb0SChris Wilson 	if (err) {
11007e98eb0SChris Wilson 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
11107e98eb0SChris Wilson 		       tile->tiling, tile->stride, err);
11207e98eb0SChris Wilson 		return err;
11307e98eb0SChris Wilson 	}
11407e98eb0SChris Wilson 
11507e98eb0SChris Wilson 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
11607e98eb0SChris Wilson 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
11707e98eb0SChris Wilson 
11880f0b679SMaarten Lankhorst 	i915_gem_object_lock(obj, NULL);
11907e98eb0SChris Wilson 	err = i915_gem_object_set_to_gtt_domain(obj, true);
12007e98eb0SChris Wilson 	i915_gem_object_unlock(obj);
12107e98eb0SChris Wilson 	if (err) {
12207e98eb0SChris Wilson 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
12307e98eb0SChris Wilson 		return err;
12407e98eb0SChris Wilson 	}
12507e98eb0SChris Wilson 
12607e98eb0SChris Wilson 	page = i915_prandom_u32_max_state(npages, prng);
12707e98eb0SChris Wilson 	view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
12807e98eb0SChris Wilson 
12907e98eb0SChris Wilson 	vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
13007e98eb0SChris Wilson 	if (IS_ERR(vma)) {
13107e98eb0SChris Wilson 		pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
13207e98eb0SChris Wilson 		       page, (int)PTR_ERR(vma));
13307e98eb0SChris Wilson 		return PTR_ERR(vma);
13407e98eb0SChris Wilson 	}
13507e98eb0SChris Wilson 
13607e98eb0SChris Wilson 	n = page - view.partial.offset;
13707e98eb0SChris Wilson 	GEM_BUG_ON(n >= view.partial.size);
13807e98eb0SChris Wilson 
13907e98eb0SChris Wilson 	io = i915_vma_pin_iomap(vma);
14007e98eb0SChris Wilson 	i915_vma_unpin(vma);
14107e98eb0SChris Wilson 	if (IS_ERR(io)) {
14207e98eb0SChris Wilson 		pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
14307e98eb0SChris Wilson 		       page, (int)PTR_ERR(io));
14407e98eb0SChris Wilson 		err = PTR_ERR(io);
14507e98eb0SChris Wilson 		goto out;
14607e98eb0SChris Wilson 	}
14707e98eb0SChris Wilson 
14807e98eb0SChris Wilson 	iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
14907e98eb0SChris Wilson 	i915_vma_unpin_iomap(vma);
15007e98eb0SChris Wilson 
15107e98eb0SChris Wilson 	offset = tiled_offset(tile, page << PAGE_SHIFT);
15207e98eb0SChris Wilson 	if (offset >= obj->base.size)
15307e98eb0SChris Wilson 		goto out;
15407e98eb0SChris Wilson 
1551a9c4db4SMichał Winiarski 	intel_gt_flush_ggtt_writes(to_gt(i915));
15607e98eb0SChris Wilson 
15707e98eb0SChris Wilson 	p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
15807e98eb0SChris Wilson 	cpu = kmap(p) + offset_in_page(offset);
15907e98eb0SChris Wilson 	drm_clflush_virt_range(cpu, sizeof(*cpu));
16007e98eb0SChris Wilson 	if (*cpu != (u32)page) {
161f47e6306SChris Wilson 		pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
16207e98eb0SChris Wilson 		       page, n,
16307e98eb0SChris Wilson 		       view.partial.offset,
16407e98eb0SChris Wilson 		       view.partial.size,
16507e98eb0SChris Wilson 		       vma->size >> PAGE_SHIFT,
16607e98eb0SChris Wilson 		       tile->tiling ? tile_row_pages(obj) : 0,
16707e98eb0SChris Wilson 		       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
16807e98eb0SChris Wilson 		       offset >> PAGE_SHIFT,
16907e98eb0SChris Wilson 		       (unsigned int)offset_in_page(offset),
17007e98eb0SChris Wilson 		       offset,
17107e98eb0SChris Wilson 		       (u32)page, *cpu);
17207e98eb0SChris Wilson 		err = -EINVAL;
17307e98eb0SChris Wilson 	}
17407e98eb0SChris Wilson 	*cpu = 0;
17507e98eb0SChris Wilson 	drm_clflush_virt_range(cpu, sizeof(*cpu));
17607e98eb0SChris Wilson 	kunmap(p);
17707e98eb0SChris Wilson 
17807e98eb0SChris Wilson out:
1790f341974SMaarten Lankhorst 	i915_gem_object_lock(obj, NULL);
180c03d9826SThomas Hellström 	i915_vma_destroy(vma);
1810f341974SMaarten Lankhorst 	i915_gem_object_unlock(obj);
18207e98eb0SChris Wilson 	return err;
18307e98eb0SChris Wilson }
18407e98eb0SChris Wilson 
check_partial_mappings(struct drm_i915_gem_object * obj,const struct tile * tile,unsigned long end_time)18507e98eb0SChris Wilson static int check_partial_mappings(struct drm_i915_gem_object *obj,
18607e98eb0SChris Wilson 				  const struct tile *tile,
187b414fcd5SChris Wilson 				  unsigned long end_time)
188b414fcd5SChris Wilson {
189b414fcd5SChris Wilson 	const unsigned int nreal = obj->scratch / PAGE_SIZE;
190b414fcd5SChris Wilson 	const unsigned long npages = obj->base.size / PAGE_SIZE;
1911a9c4db4SMichał Winiarski 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
192b414fcd5SChris Wilson 	struct i915_vma *vma;
193b414fcd5SChris Wilson 	unsigned long page;
194b414fcd5SChris Wilson 	int err;
195b414fcd5SChris Wilson 
196b414fcd5SChris Wilson 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
197b414fcd5SChris Wilson 	if (err) {
198b414fcd5SChris Wilson 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
199b414fcd5SChris Wilson 		       tile->tiling, tile->stride, err);
200b414fcd5SChris Wilson 		return err;
201b414fcd5SChris Wilson 	}
202b414fcd5SChris Wilson 
203b414fcd5SChris Wilson 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
204b414fcd5SChris Wilson 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
205b414fcd5SChris Wilson 
20680f0b679SMaarten Lankhorst 	i915_gem_object_lock(obj, NULL);
20787d1372dSChris Wilson 	err = i915_gem_object_set_to_gtt_domain(obj, true);
20887d1372dSChris Wilson 	i915_gem_object_unlock(obj);
20987d1372dSChris Wilson 	if (err) {
21087d1372dSChris Wilson 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
21187d1372dSChris Wilson 		return err;
21287d1372dSChris Wilson 	}
21387d1372dSChris Wilson 
214b414fcd5SChris Wilson 	for_each_prime_number_from(page, 1, npages) {
2153bb6a442SNiranjana Vishwanathapura 		struct i915_gtt_view view =
216b414fcd5SChris Wilson 			compute_partial_view(obj, page, MIN_CHUNK_PAGES);
217f47e6306SChris Wilson 		unsigned long offset;
218b414fcd5SChris Wilson 		u32 __iomem *io;
219b414fcd5SChris Wilson 		struct page *p;
220b414fcd5SChris Wilson 		unsigned int n;
221b414fcd5SChris Wilson 		u32 *cpu;
222b414fcd5SChris Wilson 
223b414fcd5SChris Wilson 		GEM_BUG_ON(view.partial.size > nreal);
224b414fcd5SChris Wilson 		cond_resched();
225b414fcd5SChris Wilson 
226b414fcd5SChris Wilson 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
227b414fcd5SChris Wilson 		if (IS_ERR(vma)) {
228b414fcd5SChris Wilson 			pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
229b414fcd5SChris Wilson 			       page, (int)PTR_ERR(vma));
230b414fcd5SChris Wilson 			return PTR_ERR(vma);
231b414fcd5SChris Wilson 		}
232b414fcd5SChris Wilson 
233b414fcd5SChris Wilson 		n = page - view.partial.offset;
234b414fcd5SChris Wilson 		GEM_BUG_ON(n >= view.partial.size);
235b414fcd5SChris Wilson 
236b414fcd5SChris Wilson 		io = i915_vma_pin_iomap(vma);
237b414fcd5SChris Wilson 		i915_vma_unpin(vma);
238b414fcd5SChris Wilson 		if (IS_ERR(io)) {
239b414fcd5SChris Wilson 			pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
240b414fcd5SChris Wilson 			       page, (int)PTR_ERR(io));
241b414fcd5SChris Wilson 			return PTR_ERR(io);
242b414fcd5SChris Wilson 		}
243b414fcd5SChris Wilson 
244b414fcd5SChris Wilson 		iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
245b414fcd5SChris Wilson 		i915_vma_unpin_iomap(vma);
246b414fcd5SChris Wilson 
247b414fcd5SChris Wilson 		offset = tiled_offset(tile, page << PAGE_SHIFT);
248b414fcd5SChris Wilson 		if (offset >= obj->base.size)
249b414fcd5SChris Wilson 			continue;
250b414fcd5SChris Wilson 
2511a9c4db4SMichał Winiarski 		intel_gt_flush_ggtt_writes(to_gt(i915));
252b414fcd5SChris Wilson 
253b414fcd5SChris Wilson 		p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
254b414fcd5SChris Wilson 		cpu = kmap(p) + offset_in_page(offset);
255b414fcd5SChris Wilson 		drm_clflush_virt_range(cpu, sizeof(*cpu));
256b414fcd5SChris Wilson 		if (*cpu != (u32)page) {
257f47e6306SChris Wilson 			pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
258b414fcd5SChris Wilson 			       page, n,
259b414fcd5SChris Wilson 			       view.partial.offset,
260b414fcd5SChris Wilson 			       view.partial.size,
261b414fcd5SChris Wilson 			       vma->size >> PAGE_SHIFT,
262b414fcd5SChris Wilson 			       tile->tiling ? tile_row_pages(obj) : 0,
263b414fcd5SChris Wilson 			       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
264b414fcd5SChris Wilson 			       offset >> PAGE_SHIFT,
265b414fcd5SChris Wilson 			       (unsigned int)offset_in_page(offset),
266b414fcd5SChris Wilson 			       offset,
267b414fcd5SChris Wilson 			       (u32)page, *cpu);
268b414fcd5SChris Wilson 			err = -EINVAL;
269b414fcd5SChris Wilson 		}
270b414fcd5SChris Wilson 		*cpu = 0;
271b414fcd5SChris Wilson 		drm_clflush_virt_range(cpu, sizeof(*cpu));
272b414fcd5SChris Wilson 		kunmap(p);
273b414fcd5SChris Wilson 		if (err)
274b414fcd5SChris Wilson 			return err;
275b414fcd5SChris Wilson 
2760f341974SMaarten Lankhorst 		i915_gem_object_lock(obj, NULL);
277c03d9826SThomas Hellström 		i915_vma_destroy(vma);
2780f341974SMaarten Lankhorst 		i915_gem_object_unlock(obj);
27907e98eb0SChris Wilson 
28007e98eb0SChris Wilson 		if (igt_timeout(end_time,
28107e98eb0SChris Wilson 				"%s: timed out after tiling=%d stride=%d\n",
28207e98eb0SChris Wilson 				__func__, tile->tiling, tile->stride))
28307e98eb0SChris Wilson 			return -EINTR;
284b414fcd5SChris Wilson 	}
285b414fcd5SChris Wilson 
286b414fcd5SChris Wilson 	return 0;
287b414fcd5SChris Wilson }
288b414fcd5SChris Wilson 
28907e98eb0SChris Wilson static unsigned int
setup_tile_size(struct tile * tile,struct drm_i915_private * i915)29007e98eb0SChris Wilson setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
29107e98eb0SChris Wilson {
29240e1956eSLucas De Marchi 	if (GRAPHICS_VER(i915) <= 2) {
29307e98eb0SChris Wilson 		tile->height = 16;
29407e98eb0SChris Wilson 		tile->width = 128;
29507e98eb0SChris Wilson 		tile->size = 11;
29607e98eb0SChris Wilson 	} else if (tile->tiling == I915_TILING_Y &&
29707e98eb0SChris Wilson 		   HAS_128_BYTE_Y_TILING(i915)) {
29807e98eb0SChris Wilson 		tile->height = 32;
29907e98eb0SChris Wilson 		tile->width = 128;
30007e98eb0SChris Wilson 		tile->size = 12;
30107e98eb0SChris Wilson 	} else {
30207e98eb0SChris Wilson 		tile->height = 8;
30307e98eb0SChris Wilson 		tile->width = 512;
30407e98eb0SChris Wilson 		tile->size = 12;
30507e98eb0SChris Wilson 	}
30607e98eb0SChris Wilson 
30740e1956eSLucas De Marchi 	if (GRAPHICS_VER(i915) < 4)
30807e98eb0SChris Wilson 		return 8192 / tile->width;
30940e1956eSLucas De Marchi 	else if (GRAPHICS_VER(i915) < 7)
31007e98eb0SChris Wilson 		return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
31107e98eb0SChris Wilson 	else
31207e98eb0SChris Wilson 		return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
31307e98eb0SChris Wilson }
31407e98eb0SChris Wilson 
igt_partial_tiling(void * arg)315b414fcd5SChris Wilson static int igt_partial_tiling(void *arg)
316b414fcd5SChris Wilson {
317b414fcd5SChris Wilson 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
318b414fcd5SChris Wilson 	struct drm_i915_private *i915 = arg;
319b414fcd5SChris Wilson 	struct drm_i915_gem_object *obj;
320b414fcd5SChris Wilson 	intel_wakeref_t wakeref;
321b414fcd5SChris Wilson 	int tiling;
322b414fcd5SChris Wilson 	int err;
323b414fcd5SChris Wilson 
3245c24c9d2SMichał Winiarski 	if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
325e60f7bb7SMatthew Auld 		return 0;
326e60f7bb7SMatthew Auld 
327b414fcd5SChris Wilson 	/* We want to check the page mapping and fencing of a large object
328b414fcd5SChris Wilson 	 * mmapped through the GTT. The object we create is larger than can
329b414fcd5SChris Wilson 	 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
330b414fcd5SChris Wilson 	 * We then check that a write through each partial GGTT vma ends up
331b414fcd5SChris Wilson 	 * in the right set of pages within the object, and with the expected
332b414fcd5SChris Wilson 	 * tiling, which we verify by manual swizzling.
333b414fcd5SChris Wilson 	 */
334b414fcd5SChris Wilson 
335b414fcd5SChris Wilson 	obj = huge_gem_object(i915,
336b414fcd5SChris Wilson 			      nreal << PAGE_SHIFT,
3375c24c9d2SMichał Winiarski 			      (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
338b414fcd5SChris Wilson 	if (IS_ERR(obj))
339b414fcd5SChris Wilson 		return PTR_ERR(obj);
340b414fcd5SChris Wilson 
3416f791ffeSMaarten Lankhorst 	err = i915_gem_object_pin_pages_unlocked(obj);
342b414fcd5SChris Wilson 	if (err) {
343b414fcd5SChris Wilson 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
344b414fcd5SChris Wilson 		       nreal, obj->base.size / PAGE_SIZE, err);
345b414fcd5SChris Wilson 		goto out;
346b414fcd5SChris Wilson 	}
347b414fcd5SChris Wilson 
348d858d569SDaniele Ceraolo Spurio 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
349b414fcd5SChris Wilson 
350b414fcd5SChris Wilson 	if (1) {
351b414fcd5SChris Wilson 		IGT_TIMEOUT(end);
352b414fcd5SChris Wilson 		struct tile tile;
353b414fcd5SChris Wilson 
354b414fcd5SChris Wilson 		tile.height = 1;
355b414fcd5SChris Wilson 		tile.width = 1;
356b414fcd5SChris Wilson 		tile.size = 0;
357b414fcd5SChris Wilson 		tile.stride = 0;
358b414fcd5SChris Wilson 		tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
359b414fcd5SChris Wilson 		tile.tiling = I915_TILING_NONE;
360b414fcd5SChris Wilson 
36107e98eb0SChris Wilson 		err = check_partial_mappings(obj, &tile, end);
362b414fcd5SChris Wilson 		if (err && err != -EINTR)
363b414fcd5SChris Wilson 			goto out_unlock;
364b414fcd5SChris Wilson 	}
365b414fcd5SChris Wilson 
366b414fcd5SChris Wilson 	for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
367b414fcd5SChris Wilson 		IGT_TIMEOUT(end);
368b414fcd5SChris Wilson 		unsigned int max_pitch;
369b414fcd5SChris Wilson 		unsigned int pitch;
370b414fcd5SChris Wilson 		struct tile tile;
371b414fcd5SChris Wilson 
37295086cb9SJani Nikula 		if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
373b414fcd5SChris Wilson 			/*
374b414fcd5SChris Wilson 			 * The swizzling pattern is actually unknown as it
375b414fcd5SChris Wilson 			 * varies based on physical address of each page.
376b414fcd5SChris Wilson 			 * See i915_gem_detect_bit_6_swizzle().
377b414fcd5SChris Wilson 			 */
378b414fcd5SChris Wilson 			break;
379b414fcd5SChris Wilson 
380b414fcd5SChris Wilson 		tile.tiling = tiling;
381b414fcd5SChris Wilson 		switch (tiling) {
382b414fcd5SChris Wilson 		case I915_TILING_X:
3835c24c9d2SMichał Winiarski 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
384b414fcd5SChris Wilson 			break;
385b414fcd5SChris Wilson 		case I915_TILING_Y:
3865c24c9d2SMichał Winiarski 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
387b414fcd5SChris Wilson 			break;
388b414fcd5SChris Wilson 		}
389b414fcd5SChris Wilson 
390b414fcd5SChris Wilson 		GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
391b414fcd5SChris Wilson 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
392b414fcd5SChris Wilson 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
393b414fcd5SChris Wilson 			continue;
394b414fcd5SChris Wilson 
39507e98eb0SChris Wilson 		max_pitch = setup_tile_size(&tile, i915);
396b414fcd5SChris Wilson 
397b414fcd5SChris Wilson 		for (pitch = max_pitch; pitch; pitch >>= 1) {
398b414fcd5SChris Wilson 			tile.stride = tile.width * pitch;
39907e98eb0SChris Wilson 			err = check_partial_mappings(obj, &tile, end);
400b414fcd5SChris Wilson 			if (err == -EINTR)
401b414fcd5SChris Wilson 				goto next_tiling;
402b414fcd5SChris Wilson 			if (err)
403b414fcd5SChris Wilson 				goto out_unlock;
404b414fcd5SChris Wilson 
40540e1956eSLucas De Marchi 			if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
406b414fcd5SChris Wilson 				tile.stride = tile.width * (pitch - 1);
40707e98eb0SChris Wilson 				err = check_partial_mappings(obj, &tile, end);
408b414fcd5SChris Wilson 				if (err == -EINTR)
409b414fcd5SChris Wilson 					goto next_tiling;
410b414fcd5SChris Wilson 				if (err)
411b414fcd5SChris Wilson 					goto out_unlock;
412b414fcd5SChris Wilson 			}
413b414fcd5SChris Wilson 
41440e1956eSLucas De Marchi 			if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
415b414fcd5SChris Wilson 				tile.stride = tile.width * (pitch + 1);
41607e98eb0SChris Wilson 				err = check_partial_mappings(obj, &tile, end);
417b414fcd5SChris Wilson 				if (err == -EINTR)
418b414fcd5SChris Wilson 					goto next_tiling;
419b414fcd5SChris Wilson 				if (err)
420b414fcd5SChris Wilson 					goto out_unlock;
421b414fcd5SChris Wilson 			}
422b414fcd5SChris Wilson 		}
423b414fcd5SChris Wilson 
42440e1956eSLucas De Marchi 		if (GRAPHICS_VER(i915) >= 4) {
425b414fcd5SChris Wilson 			for_each_prime_number(pitch, max_pitch) {
426b414fcd5SChris Wilson 				tile.stride = tile.width * pitch;
42707e98eb0SChris Wilson 				err = check_partial_mappings(obj, &tile, end);
428b414fcd5SChris Wilson 				if (err == -EINTR)
429b414fcd5SChris Wilson 					goto next_tiling;
430b414fcd5SChris Wilson 				if (err)
431b414fcd5SChris Wilson 					goto out_unlock;
432b414fcd5SChris Wilson 			}
433b414fcd5SChris Wilson 		}
434b414fcd5SChris Wilson 
435b414fcd5SChris Wilson next_tiling: ;
436b414fcd5SChris Wilson 	}
437b414fcd5SChris Wilson 
438b414fcd5SChris Wilson out_unlock:
439d858d569SDaniele Ceraolo Spurio 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
440b414fcd5SChris Wilson 	i915_gem_object_unpin_pages(obj);
441b414fcd5SChris Wilson out:
442b414fcd5SChris Wilson 	i915_gem_object_put(obj);
443b414fcd5SChris Wilson 	return err;
444b414fcd5SChris Wilson }
445b414fcd5SChris Wilson 
igt_smoke_tiling(void * arg)44607e98eb0SChris Wilson static int igt_smoke_tiling(void *arg)
44707e98eb0SChris Wilson {
44807e98eb0SChris Wilson 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
44907e98eb0SChris Wilson 	struct drm_i915_private *i915 = arg;
45007e98eb0SChris Wilson 	struct drm_i915_gem_object *obj;
45107e98eb0SChris Wilson 	intel_wakeref_t wakeref;
45207e98eb0SChris Wilson 	I915_RND_STATE(prng);
45307e98eb0SChris Wilson 	unsigned long count;
45407e98eb0SChris Wilson 	IGT_TIMEOUT(end);
45507e98eb0SChris Wilson 	int err;
45607e98eb0SChris Wilson 
4575c24c9d2SMichał Winiarski 	if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
458e60f7bb7SMatthew Auld 		return 0;
459e60f7bb7SMatthew Auld 
46007e98eb0SChris Wilson 	/*
46107e98eb0SChris Wilson 	 * igt_partial_tiling() does an exhastive check of partial tiling
46207e98eb0SChris Wilson 	 * chunking, but will undoubtably run out of time. Here, we do a
46307e98eb0SChris Wilson 	 * randomised search and hope over many runs of 1s with different
46407e98eb0SChris Wilson 	 * seeds we will do a thorough check.
46507e98eb0SChris Wilson 	 *
46607e98eb0SChris Wilson 	 * Remember to look at the st_seed if we see a flip-flop in BAT!
46707e98eb0SChris Wilson 	 */
46807e98eb0SChris Wilson 
46995086cb9SJani Nikula 	if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
47007e98eb0SChris Wilson 		return 0;
47107e98eb0SChris Wilson 
47207e98eb0SChris Wilson 	obj = huge_gem_object(i915,
47307e98eb0SChris Wilson 			      nreal << PAGE_SHIFT,
4745c24c9d2SMichał Winiarski 			      (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
47507e98eb0SChris Wilson 	if (IS_ERR(obj))
47607e98eb0SChris Wilson 		return PTR_ERR(obj);
47707e98eb0SChris Wilson 
4786f791ffeSMaarten Lankhorst 	err = i915_gem_object_pin_pages_unlocked(obj);
47907e98eb0SChris Wilson 	if (err) {
48007e98eb0SChris Wilson 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
48107e98eb0SChris Wilson 		       nreal, obj->base.size / PAGE_SIZE, err);
48207e98eb0SChris Wilson 		goto out;
48307e98eb0SChris Wilson 	}
48407e98eb0SChris Wilson 
48507e98eb0SChris Wilson 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
48607e98eb0SChris Wilson 
48707e98eb0SChris Wilson 	count = 0;
48807e98eb0SChris Wilson 	do {
48907e98eb0SChris Wilson 		struct tile tile;
49007e98eb0SChris Wilson 
49107e98eb0SChris Wilson 		tile.tiling =
49207e98eb0SChris Wilson 			i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
49307e98eb0SChris Wilson 		switch (tile.tiling) {
49407e98eb0SChris Wilson 		case I915_TILING_NONE:
49507e98eb0SChris Wilson 			tile.height = 1;
49607e98eb0SChris Wilson 			tile.width = 1;
49707e98eb0SChris Wilson 			tile.size = 0;
49807e98eb0SChris Wilson 			tile.stride = 0;
49907e98eb0SChris Wilson 			tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
50007e98eb0SChris Wilson 			break;
50107e98eb0SChris Wilson 
50207e98eb0SChris Wilson 		case I915_TILING_X:
5035c24c9d2SMichał Winiarski 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
50407e98eb0SChris Wilson 			break;
50507e98eb0SChris Wilson 		case I915_TILING_Y:
5065c24c9d2SMichał Winiarski 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
50707e98eb0SChris Wilson 			break;
50807e98eb0SChris Wilson 		}
50907e98eb0SChris Wilson 
51007e98eb0SChris Wilson 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
51107e98eb0SChris Wilson 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
51207e98eb0SChris Wilson 			continue;
51307e98eb0SChris Wilson 
51407e98eb0SChris Wilson 		if (tile.tiling != I915_TILING_NONE) {
51507e98eb0SChris Wilson 			unsigned int max_pitch = setup_tile_size(&tile, i915);
51607e98eb0SChris Wilson 
51707e98eb0SChris Wilson 			tile.stride =
51807e98eb0SChris Wilson 				i915_prandom_u32_max_state(max_pitch, &prng);
51907e98eb0SChris Wilson 			tile.stride = (1 + tile.stride) * tile.width;
52040e1956eSLucas De Marchi 			if (GRAPHICS_VER(i915) < 4)
52107e98eb0SChris Wilson 				tile.stride = rounddown_pow_of_two(tile.stride);
52207e98eb0SChris Wilson 		}
52307e98eb0SChris Wilson 
52407e98eb0SChris Wilson 		err = check_partial_mapping(obj, &tile, &prng);
52507e98eb0SChris Wilson 		if (err)
52607e98eb0SChris Wilson 			break;
52707e98eb0SChris Wilson 
52807e98eb0SChris Wilson 		count++;
52907e98eb0SChris Wilson 	} while (!__igt_timeout(end, NULL));
53007e98eb0SChris Wilson 
53107e98eb0SChris Wilson 	pr_info("%s: Completed %lu trials\n", __func__, count);
53207e98eb0SChris Wilson 
53307e98eb0SChris Wilson 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
53407e98eb0SChris Wilson 	i915_gem_object_unpin_pages(obj);
53507e98eb0SChris Wilson out:
53607e98eb0SChris Wilson 	i915_gem_object_put(obj);
53707e98eb0SChris Wilson 	return err;
53807e98eb0SChris Wilson }
53907e98eb0SChris Wilson 
make_obj_busy(struct drm_i915_gem_object * obj)540b414fcd5SChris Wilson static int make_obj_busy(struct drm_i915_gem_object *obj)
541b414fcd5SChris Wilson {
542b414fcd5SChris Wilson 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
5438f856c74SChris Wilson 	struct intel_engine_cs *engine;
544e948761fSChris Wilson 
545e948761fSChris Wilson 	for_each_uabi_engine(engine, i915) {
546e948761fSChris Wilson 		struct i915_request *rq;
547b414fcd5SChris Wilson 		struct i915_vma *vma;
54815b6c924SMaarten Lankhorst 		struct i915_gem_ww_ctx ww;
549b414fcd5SChris Wilson 		int err;
550b414fcd5SChris Wilson 
551e948761fSChris Wilson 		vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
552b414fcd5SChris Wilson 		if (IS_ERR(vma))
553b414fcd5SChris Wilson 			return PTR_ERR(vma);
554b414fcd5SChris Wilson 
55515b6c924SMaarten Lankhorst 		i915_gem_ww_ctx_init(&ww, false);
55615b6c924SMaarten Lankhorst retry:
55715b6c924SMaarten Lankhorst 		err = i915_gem_object_lock(obj, &ww);
55815b6c924SMaarten Lankhorst 		if (!err)
55915b6c924SMaarten Lankhorst 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
560b414fcd5SChris Wilson 		if (err)
56115b6c924SMaarten Lankhorst 			goto err;
562b414fcd5SChris Wilson 
563de5825beSChris Wilson 		rq = intel_engine_create_kernel_request(engine);
564b414fcd5SChris Wilson 		if (IS_ERR(rq)) {
56515b6c924SMaarten Lankhorst 			err = PTR_ERR(rq);
56615b6c924SMaarten Lankhorst 			goto err_unpin;
567b414fcd5SChris Wilson 		}
568b414fcd5SChris Wilson 
56970d6894dSChris Wilson 		err = i915_vma_move_to_active(vma, rq,
57070d6894dSChris Wilson 					      EXEC_OBJECT_WRITE);
571b414fcd5SChris Wilson 
572b414fcd5SChris Wilson 		i915_request_add(rq);
57315b6c924SMaarten Lankhorst err_unpin:
574e948761fSChris Wilson 		i915_vma_unpin(vma);
57515b6c924SMaarten Lankhorst err:
57615b6c924SMaarten Lankhorst 		if (err == -EDEADLK) {
57715b6c924SMaarten Lankhorst 			err = i915_gem_ww_ctx_backoff(&ww);
57815b6c924SMaarten Lankhorst 			if (!err)
57915b6c924SMaarten Lankhorst 				goto retry;
58015b6c924SMaarten Lankhorst 		}
58115b6c924SMaarten Lankhorst 		i915_gem_ww_ctx_fini(&ww);
582e948761fSChris Wilson 		if (err)
583e948761fSChris Wilson 			return err;
5848f856c74SChris Wilson 	}
585b414fcd5SChris Wilson 
586c017cf6bSChris Wilson 	i915_gem_object_put(obj); /* leave it only alive via its active ref */
587e948761fSChris Wilson 	return 0;
588b414fcd5SChris Wilson }
589b414fcd5SChris Wilson 
default_mapping(struct drm_i915_private * i915)5907961c5b6SMaarten Lankhorst static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
5917961c5b6SMaarten Lankhorst {
5927961c5b6SMaarten Lankhorst 	if (HAS_LMEM(i915))
5937961c5b6SMaarten Lankhorst 		return I915_MMAP_TYPE_FIXED;
5947961c5b6SMaarten Lankhorst 
5957961c5b6SMaarten Lankhorst 	return I915_MMAP_TYPE_GTT;
5967961c5b6SMaarten Lankhorst }
5977961c5b6SMaarten Lankhorst 
598450cede7SThomas Hellström static struct drm_i915_gem_object *
create_sys_or_internal(struct drm_i915_private * i915,unsigned long size)599450cede7SThomas Hellström create_sys_or_internal(struct drm_i915_private *i915,
600450cede7SThomas Hellström 		       unsigned long size)
601450cede7SThomas Hellström {
602450cede7SThomas Hellström 	if (HAS_LMEM(i915)) {
603450cede7SThomas Hellström 		struct intel_memory_region *sys_region =
604450cede7SThomas Hellström 			i915->mm.regions[INTEL_REGION_SMEM];
605450cede7SThomas Hellström 
606450cede7SThomas Hellström 		return __i915_gem_object_create_user(i915, size, &sys_region, 1);
607450cede7SThomas Hellström 	}
608450cede7SThomas Hellström 
609450cede7SThomas Hellström 	return i915_gem_object_create_internal(i915, size);
610450cede7SThomas Hellström }
611450cede7SThomas Hellström 
assert_mmap_offset(struct drm_i915_private * i915,unsigned long size,int expected)612b414fcd5SChris Wilson static bool assert_mmap_offset(struct drm_i915_private *i915,
613b414fcd5SChris Wilson 			       unsigned long size,
614b414fcd5SChris Wilson 			       int expected)
615b414fcd5SChris Wilson {
616b414fcd5SChris Wilson 	struct drm_i915_gem_object *obj;
617cf3e3e86SMaarten Lankhorst 	u64 offset;
618cf3e3e86SMaarten Lankhorst 	int ret;
619b414fcd5SChris Wilson 
620450cede7SThomas Hellström 	obj = create_sys_or_internal(i915, size);
621b414fcd5SChris Wilson 	if (IS_ERR(obj))
622cf3e3e86SMaarten Lankhorst 		return expected && expected == PTR_ERR(obj);
623b414fcd5SChris Wilson 
6247961c5b6SMaarten Lankhorst 	ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
625b414fcd5SChris Wilson 	i915_gem_object_put(obj);
626b414fcd5SChris Wilson 
627cf3e3e86SMaarten Lankhorst 	return ret == expected;
628b414fcd5SChris Wilson }
629b414fcd5SChris Wilson 
disable_retire_worker(struct drm_i915_private * i915)630b414fcd5SChris Wilson static void disable_retire_worker(struct drm_i915_private *i915)
631b414fcd5SChris Wilson {
632c29579d2SChris Wilson 	i915_gem_driver_unregister__shrinker(i915);
6331a9c4db4SMichał Winiarski 	intel_gt_pm_get(to_gt(i915));
6341a9c4db4SMichał Winiarski 	cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
635b414fcd5SChris Wilson }
636b414fcd5SChris Wilson 
restore_retire_worker(struct drm_i915_private * i915)637b414fcd5SChris Wilson static void restore_retire_worker(struct drm_i915_private *i915)
638b414fcd5SChris Wilson {
6397e805762SChris Wilson 	igt_flush_test(i915);
6401a9c4db4SMichał Winiarski 	intel_gt_pm_put(to_gt(i915));
641c29579d2SChris Wilson 	i915_gem_driver_register__shrinker(i915);
642b414fcd5SChris Wilson }
643b414fcd5SChris Wilson 
mmap_offset_lock(struct drm_i915_private * i915)644f63dfc14SChris Wilson static void mmap_offset_lock(struct drm_i915_private *i915)
645f63dfc14SChris Wilson 	__acquires(&i915->drm.vma_offset_manager->vm_lock)
646f63dfc14SChris Wilson {
647f63dfc14SChris Wilson 	write_lock(&i915->drm.vma_offset_manager->vm_lock);
648f63dfc14SChris Wilson }
649f63dfc14SChris Wilson 
mmap_offset_unlock(struct drm_i915_private * i915)650f63dfc14SChris Wilson static void mmap_offset_unlock(struct drm_i915_private *i915)
651f63dfc14SChris Wilson 	__releases(&i915->drm.vma_offset_manager->vm_lock)
652f63dfc14SChris Wilson {
653f63dfc14SChris Wilson 	write_unlock(&i915->drm.vma_offset_manager->vm_lock);
654f63dfc14SChris Wilson }
655f63dfc14SChris Wilson 
igt_mmap_offset_exhaustion(void * arg)656b414fcd5SChris Wilson static int igt_mmap_offset_exhaustion(void *arg)
657b414fcd5SChris Wilson {
658b414fcd5SChris Wilson 	struct drm_i915_private *i915 = arg;
659b414fcd5SChris Wilson 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
660b414fcd5SChris Wilson 	struct drm_i915_gem_object *obj;
6611af65515SChris Wilson 	struct drm_mm_node *hole, *next;
662cc662126SAbdiel Janulgue 	int loop, err = 0;
663cf3e3e86SMaarten Lankhorst 	u64 offset;
664450cede7SThomas Hellström 	int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
665b414fcd5SChris Wilson 
666b414fcd5SChris Wilson 	/* Disable background reaper */
667b414fcd5SChris Wilson 	disable_retire_worker(i915);
6681a9c4db4SMichał Winiarski 	GEM_BUG_ON(!to_gt(i915)->awake);
6691a9c4db4SMichał Winiarski 	intel_gt_retire_requests(to_gt(i915));
6701af65515SChris Wilson 	i915_gem_drain_freed_objects(i915);
671b414fcd5SChris Wilson 
672b414fcd5SChris Wilson 	/* Trim the device mmap space to only a page */
673f63dfc14SChris Wilson 	mmap_offset_lock(i915);
6741af65515SChris Wilson 	loop = 1; /* PAGE_SIZE units */
6751af65515SChris Wilson 	list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
6761af65515SChris Wilson 		struct drm_mm_node *resv;
6771af65515SChris Wilson 
6781af65515SChris Wilson 		resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
6791af65515SChris Wilson 		if (!resv) {
6801af65515SChris Wilson 			err = -ENOMEM;
681b414fcd5SChris Wilson 			goto out_park;
682b414fcd5SChris Wilson 		}
6831af65515SChris Wilson 
6841af65515SChris Wilson 		resv->start = drm_mm_hole_node_start(hole) + loop;
6851af65515SChris Wilson 		resv->size = hole->hole_size - loop;
6861af65515SChris Wilson 		resv->color = -1ul;
6871af65515SChris Wilson 		loop = 0;
6881af65515SChris Wilson 
6891af65515SChris Wilson 		if (!resv->size) {
6901af65515SChris Wilson 			kfree(resv);
6911af65515SChris Wilson 			continue;
692b414fcd5SChris Wilson 		}
693b414fcd5SChris Wilson 
6941af65515SChris Wilson 		pr_debug("Reserving hole [%llx + %llx]\n",
6951af65515SChris Wilson 			 resv->start, resv->size);
6961af65515SChris Wilson 
6971af65515SChris Wilson 		err = drm_mm_reserve_node(mm, resv);
6981af65515SChris Wilson 		if (err) {
6991af65515SChris Wilson 			pr_err("Failed to trim VMA manager, err=%d\n", err);
7001af65515SChris Wilson 			kfree(resv);
7011af65515SChris Wilson 			goto out_park;
7021af65515SChris Wilson 		}
7031af65515SChris Wilson 	}
7041af65515SChris Wilson 	GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
7051af65515SChris Wilson 	mmap_offset_unlock(i915);
7061af65515SChris Wilson 
707b414fcd5SChris Wilson 	/* Just fits! */
708b414fcd5SChris Wilson 	if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
709b414fcd5SChris Wilson 		pr_err("Unable to insert object into single page hole\n");
710b414fcd5SChris Wilson 		err = -EINVAL;
711b414fcd5SChris Wilson 		goto out;
712b414fcd5SChris Wilson 	}
713b414fcd5SChris Wilson 
714b414fcd5SChris Wilson 	/* Too large */
715450cede7SThomas Hellström 	if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
716b414fcd5SChris Wilson 		pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
717b414fcd5SChris Wilson 		err = -EINVAL;
718b414fcd5SChris Wilson 		goto out;
719b414fcd5SChris Wilson 	}
720b414fcd5SChris Wilson 
721b414fcd5SChris Wilson 	/* Fill the hole, further allocation attempts should then fail */
722450cede7SThomas Hellström 	obj = create_sys_or_internal(i915, PAGE_SIZE);
723b414fcd5SChris Wilson 	if (IS_ERR(obj)) {
724b414fcd5SChris Wilson 		err = PTR_ERR(obj);
725cf3e3e86SMaarten Lankhorst 		pr_err("Unable to create object for reclaimed hole\n");
726b414fcd5SChris Wilson 		goto out;
727b414fcd5SChris Wilson 	}
728b414fcd5SChris Wilson 
7297961c5b6SMaarten Lankhorst 	err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
730cf3e3e86SMaarten Lankhorst 	if (err) {
731b414fcd5SChris Wilson 		pr_err("Unable to insert object into reclaimed hole\n");
732b414fcd5SChris Wilson 		goto err_obj;
733b414fcd5SChris Wilson 	}
734b414fcd5SChris Wilson 
735450cede7SThomas Hellström 	if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
736b414fcd5SChris Wilson 		pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
737b414fcd5SChris Wilson 		err = -EINVAL;
738b414fcd5SChris Wilson 		goto err_obj;
739b414fcd5SChris Wilson 	}
740b414fcd5SChris Wilson 
741b414fcd5SChris Wilson 	i915_gem_object_put(obj);
742b414fcd5SChris Wilson 
743b414fcd5SChris Wilson 	/* Now fill with busy dead objects that we expect to reap */
744b414fcd5SChris Wilson 	for (loop = 0; loop < 3; loop++) {
7451a9c4db4SMichał Winiarski 		if (intel_gt_is_wedged(to_gt(i915)))
746b414fcd5SChris Wilson 			break;
747b414fcd5SChris Wilson 
748b414fcd5SChris Wilson 		obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
749b414fcd5SChris Wilson 		if (IS_ERR(obj)) {
750b414fcd5SChris Wilson 			err = PTR_ERR(obj);
751b414fcd5SChris Wilson 			goto out;
752b414fcd5SChris Wilson 		}
753b414fcd5SChris Wilson 
754b414fcd5SChris Wilson 		err = make_obj_busy(obj);
755b414fcd5SChris Wilson 		if (err) {
756b414fcd5SChris Wilson 			pr_err("[loop %d] Failed to busy the object\n", loop);
757b414fcd5SChris Wilson 			goto err_obj;
758b414fcd5SChris Wilson 		}
759b414fcd5SChris Wilson 	}
760b414fcd5SChris Wilson 
761b414fcd5SChris Wilson out:
762f63dfc14SChris Wilson 	mmap_offset_lock(i915);
763b414fcd5SChris Wilson out_park:
7641af65515SChris Wilson 	drm_mm_for_each_node_safe(hole, next, mm) {
7651af65515SChris Wilson 		if (hole->color != -1ul)
7661af65515SChris Wilson 			continue;
7671af65515SChris Wilson 
7681af65515SChris Wilson 		drm_mm_remove_node(hole);
7691af65515SChris Wilson 		kfree(hole);
7701af65515SChris Wilson 	}
7711af65515SChris Wilson 	mmap_offset_unlock(i915);
772b414fcd5SChris Wilson 	restore_retire_worker(i915);
773b414fcd5SChris Wilson 	return err;
774b414fcd5SChris Wilson err_obj:
775b414fcd5SChris Wilson 	i915_gem_object_put(obj);
776b414fcd5SChris Wilson 	goto out;
777b414fcd5SChris Wilson }
778b414fcd5SChris Wilson 
gtt_set(struct drm_i915_gem_object * obj)7799771d5f7SAbdiel Janulgue static int gtt_set(struct drm_i915_gem_object *obj)
7806fedafacSChris Wilson {
7819771d5f7SAbdiel Janulgue 	struct i915_vma *vma;
7829771d5f7SAbdiel Janulgue 	void __iomem *map;
7839771d5f7SAbdiel Janulgue 	int err = 0;
7846fedafacSChris Wilson 
7859771d5f7SAbdiel Janulgue 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
7869771d5f7SAbdiel Janulgue 	if (IS_ERR(vma))
7879771d5f7SAbdiel Janulgue 		return PTR_ERR(vma);
7886fedafacSChris Wilson 
7899771d5f7SAbdiel Janulgue 	intel_gt_pm_get(vma->vm->gt);
7909771d5f7SAbdiel Janulgue 	map = i915_vma_pin_iomap(vma);
7919771d5f7SAbdiel Janulgue 	i915_vma_unpin(vma);
7929771d5f7SAbdiel Janulgue 	if (IS_ERR(map)) {
7939771d5f7SAbdiel Janulgue 		err = PTR_ERR(map);
7946fedafacSChris Wilson 		goto out;
7956fedafacSChris Wilson 	}
7969771d5f7SAbdiel Janulgue 
7979771d5f7SAbdiel Janulgue 	memset_io(map, POISON_INUSE, obj->base.size);
7989771d5f7SAbdiel Janulgue 	i915_vma_unpin_iomap(vma);
7999771d5f7SAbdiel Janulgue 
8009771d5f7SAbdiel Janulgue out:
8019771d5f7SAbdiel Janulgue 	intel_gt_pm_put(vma->vm->gt);
8029771d5f7SAbdiel Janulgue 	return err;
8039771d5f7SAbdiel Janulgue }
8049771d5f7SAbdiel Janulgue 
gtt_check(struct drm_i915_gem_object * obj)8059771d5f7SAbdiel Janulgue static int gtt_check(struct drm_i915_gem_object *obj)
8069771d5f7SAbdiel Janulgue {
8079771d5f7SAbdiel Janulgue 	struct i915_vma *vma;
8089771d5f7SAbdiel Janulgue 	void __iomem *map;
8099771d5f7SAbdiel Janulgue 	int err = 0;
8109771d5f7SAbdiel Janulgue 
8119771d5f7SAbdiel Janulgue 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
8129771d5f7SAbdiel Janulgue 	if (IS_ERR(vma))
8139771d5f7SAbdiel Janulgue 		return PTR_ERR(vma);
8149771d5f7SAbdiel Janulgue 
8159771d5f7SAbdiel Janulgue 	intel_gt_pm_get(vma->vm->gt);
8169771d5f7SAbdiel Janulgue 	map = i915_vma_pin_iomap(vma);
8179771d5f7SAbdiel Janulgue 	i915_vma_unpin(vma);
8189771d5f7SAbdiel Janulgue 	if (IS_ERR(map)) {
8199771d5f7SAbdiel Janulgue 		err = PTR_ERR(map);
8209771d5f7SAbdiel Janulgue 		goto out;
8219771d5f7SAbdiel Janulgue 	}
8229771d5f7SAbdiel Janulgue 
8239771d5f7SAbdiel Janulgue 	if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
8249771d5f7SAbdiel Janulgue 		pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
8259771d5f7SAbdiel Janulgue 		       obj->mm.region->name);
8269771d5f7SAbdiel Janulgue 		err = -EINVAL;
8279771d5f7SAbdiel Janulgue 	}
8289771d5f7SAbdiel Janulgue 	i915_vma_unpin_iomap(vma);
8299771d5f7SAbdiel Janulgue 
8309771d5f7SAbdiel Janulgue out:
8319771d5f7SAbdiel Janulgue 	intel_gt_pm_put(vma->vm->gt);
8329771d5f7SAbdiel Janulgue 	return err;
8339771d5f7SAbdiel Janulgue }
8349771d5f7SAbdiel Janulgue 
wc_set(struct drm_i915_gem_object * obj)8359771d5f7SAbdiel Janulgue static int wc_set(struct drm_i915_gem_object *obj)
8369771d5f7SAbdiel Janulgue {
8379771d5f7SAbdiel Janulgue 	void *vaddr;
8389771d5f7SAbdiel Janulgue 
8396f791ffeSMaarten Lankhorst 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
8409771d5f7SAbdiel Janulgue 	if (IS_ERR(vaddr))
8419771d5f7SAbdiel Janulgue 		return PTR_ERR(vaddr);
8429771d5f7SAbdiel Janulgue 
8439771d5f7SAbdiel Janulgue 	memset(vaddr, POISON_INUSE, obj->base.size);
8446fedafacSChris Wilson 	i915_gem_object_flush_map(obj);
8456fedafacSChris Wilson 	i915_gem_object_unpin_map(obj);
8466fedafacSChris Wilson 
8479771d5f7SAbdiel Janulgue 	return 0;
848cc662126SAbdiel Janulgue }
8496fedafacSChris Wilson 
wc_check(struct drm_i915_gem_object * obj)8509771d5f7SAbdiel Janulgue static int wc_check(struct drm_i915_gem_object *obj)
8519771d5f7SAbdiel Janulgue {
8529771d5f7SAbdiel Janulgue 	void *vaddr;
8539771d5f7SAbdiel Janulgue 	int err = 0;
8549771d5f7SAbdiel Janulgue 
8556f791ffeSMaarten Lankhorst 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
8569771d5f7SAbdiel Janulgue 	if (IS_ERR(vaddr))
8579771d5f7SAbdiel Janulgue 		return PTR_ERR(vaddr);
8589771d5f7SAbdiel Janulgue 
8599771d5f7SAbdiel Janulgue 	if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
8609771d5f7SAbdiel Janulgue 		pr_err("%s: Write via mmap did not land in backing store (WC)\n",
8619771d5f7SAbdiel Janulgue 		       obj->mm.region->name);
8629771d5f7SAbdiel Janulgue 		err = -EINVAL;
8639771d5f7SAbdiel Janulgue 	}
8649771d5f7SAbdiel Janulgue 	i915_gem_object_unpin_map(obj);
8659771d5f7SAbdiel Janulgue 
8669771d5f7SAbdiel Janulgue 	return err;
8679771d5f7SAbdiel Janulgue }
8689771d5f7SAbdiel Janulgue 
can_mmap(struct drm_i915_gem_object * obj,enum i915_mmap_type type)8699771d5f7SAbdiel Janulgue static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
8709771d5f7SAbdiel Janulgue {
8715c24c9d2SMichał Winiarski 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
8720ff37575SThomas Hellström 	bool no_map;
8730ff37575SThomas Hellström 
874450cede7SThomas Hellström 	if (obj->ops->mmap_offset)
8757961c5b6SMaarten Lankhorst 		return type == I915_MMAP_TYPE_FIXED;
8767961c5b6SMaarten Lankhorst 	else if (type == I915_MMAP_TYPE_FIXED)
8777961c5b6SMaarten Lankhorst 		return false;
8787961c5b6SMaarten Lankhorst 
8799771d5f7SAbdiel Janulgue 	if (type == I915_MMAP_TYPE_GTT &&
8805c24c9d2SMichał Winiarski 	    !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
8819771d5f7SAbdiel Janulgue 		return false;
8829771d5f7SAbdiel Janulgue 
8830ff37575SThomas Hellström 	i915_gem_object_lock(obj, NULL);
8840ff37575SThomas Hellström 	no_map = (type != I915_MMAP_TYPE_GTT &&
885c471748dSMaarten Lankhorst 		  !i915_gem_object_has_struct_page(obj) &&
8860ff37575SThomas Hellström 		  !i915_gem_object_has_iomem(obj));
8870ff37575SThomas Hellström 	i915_gem_object_unlock(obj);
8889771d5f7SAbdiel Janulgue 
8890ff37575SThomas Hellström 	return !no_map;
8909771d5f7SAbdiel Janulgue }
8919771d5f7SAbdiel Janulgue 
8929771d5f7SAbdiel Janulgue #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
__igt_mmap(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)8939771d5f7SAbdiel Janulgue static int __igt_mmap(struct drm_i915_private *i915,
8949771d5f7SAbdiel Janulgue 		      struct drm_i915_gem_object *obj,
8959771d5f7SAbdiel Janulgue 		      enum i915_mmap_type type)
8969771d5f7SAbdiel Janulgue {
8979771d5f7SAbdiel Janulgue 	struct vm_area_struct *area;
8989771d5f7SAbdiel Janulgue 	unsigned long addr;
8999771d5f7SAbdiel Janulgue 	int err, i;
900cf3e3e86SMaarten Lankhorst 	u64 offset;
9019771d5f7SAbdiel Janulgue 
9029771d5f7SAbdiel Janulgue 	if (!can_mmap(obj, type))
9039771d5f7SAbdiel Janulgue 		return 0;
9049771d5f7SAbdiel Janulgue 
9059771d5f7SAbdiel Janulgue 	err = wc_set(obj);
9069771d5f7SAbdiel Janulgue 	if (err == -ENXIO)
9079771d5f7SAbdiel Janulgue 		err = gtt_set(obj);
9089771d5f7SAbdiel Janulgue 	if (err)
9099771d5f7SAbdiel Janulgue 		return err;
9109771d5f7SAbdiel Janulgue 
911cf3e3e86SMaarten Lankhorst 	err = __assign_mmap_offset(obj, type, &offset, NULL);
912cf3e3e86SMaarten Lankhorst 	if (err)
913cf3e3e86SMaarten Lankhorst 		return err;
9149771d5f7SAbdiel Janulgue 
915cf3e3e86SMaarten Lankhorst 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
9169771d5f7SAbdiel Janulgue 	if (IS_ERR_VALUE(addr))
9179771d5f7SAbdiel Janulgue 		return addr;
9186fedafacSChris Wilson 
9199771d5f7SAbdiel Janulgue 	pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
9206fedafacSChris Wilson 
921ce079f6dSMaarten Lankhorst 	mmap_read_lock(current->mm);
922064b2663SLiam Howlett 	area = vma_lookup(current->mm, addr);
923ce079f6dSMaarten Lankhorst 	mmap_read_unlock(current->mm);
9246fedafacSChris Wilson 	if (!area) {
9259771d5f7SAbdiel Janulgue 		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
9269771d5f7SAbdiel Janulgue 		       obj->mm.region->name);
9276fedafacSChris Wilson 		err = -EINVAL;
9286fedafacSChris Wilson 		goto out_unmap;
9296fedafacSChris Wilson 	}
9306fedafacSChris Wilson 
9319771d5f7SAbdiel Janulgue 	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
9326fedafacSChris Wilson 		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
9336fedafacSChris Wilson 		u32 x;
9346fedafacSChris Wilson 
9356fedafacSChris Wilson 		if (get_user(x, ux)) {
9369771d5f7SAbdiel Janulgue 			pr_err("%s: Unable to read from mmap, offset:%zd\n",
9379771d5f7SAbdiel Janulgue 			       obj->mm.region->name, i * sizeof(x));
9386fedafacSChris Wilson 			err = -EFAULT;
9399771d5f7SAbdiel Janulgue 			goto out_unmap;
9406fedafacSChris Wilson 		}
9416fedafacSChris Wilson 
9426fedafacSChris Wilson 		if (x != expand32(POISON_INUSE)) {
9439771d5f7SAbdiel Janulgue 			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
9449771d5f7SAbdiel Janulgue 			       obj->mm.region->name,
9456fedafacSChris Wilson 			       i * sizeof(x), x, expand32(POISON_INUSE));
9466fedafacSChris Wilson 			err = -EINVAL;
9479771d5f7SAbdiel Janulgue 			goto out_unmap;
9486fedafacSChris Wilson 		}
9496fedafacSChris Wilson 
9506fedafacSChris Wilson 		x = expand32(POISON_FREE);
9516fedafacSChris Wilson 		if (put_user(x, ux)) {
9529771d5f7SAbdiel Janulgue 			pr_err("%s: Unable to write to mmap, offset:%zd\n",
9539771d5f7SAbdiel Janulgue 			       obj->mm.region->name, i * sizeof(x));
9546fedafacSChris Wilson 			err = -EFAULT;
9559771d5f7SAbdiel Janulgue 			goto out_unmap;
9566fedafacSChris Wilson 		}
9576fedafacSChris Wilson 	}
9586fedafacSChris Wilson 
9599771d5f7SAbdiel Janulgue 	if (type == I915_MMAP_TYPE_GTT)
9601a9c4db4SMichał Winiarski 		intel_gt_flush_ggtt_writes(to_gt(i915));
9619771d5f7SAbdiel Janulgue 
9629771d5f7SAbdiel Janulgue 	err = wc_check(obj);
9639771d5f7SAbdiel Janulgue 	if (err == -ENXIO)
9649771d5f7SAbdiel Janulgue 		err = gtt_check(obj);
9656fedafacSChris Wilson out_unmap:
9669771d5f7SAbdiel Janulgue 	vm_munmap(addr, obj->base.size);
9676fedafacSChris Wilson 	return err;
9686fedafacSChris Wilson }
9696fedafacSChris Wilson 
igt_mmap(void * arg)9709771d5f7SAbdiel Janulgue static int igt_mmap(void *arg)
971cc662126SAbdiel Janulgue {
9729771d5f7SAbdiel Janulgue 	struct drm_i915_private *i915 = arg;
9739771d5f7SAbdiel Janulgue 	struct intel_memory_region *mr;
9749771d5f7SAbdiel Janulgue 	enum intel_region_id id;
9759771d5f7SAbdiel Janulgue 
9769771d5f7SAbdiel Janulgue 	for_each_memory_region(mr, i915, id) {
9779771d5f7SAbdiel Janulgue 		unsigned long sizes[] = {
9789771d5f7SAbdiel Janulgue 			PAGE_SIZE,
9799771d5f7SAbdiel Janulgue 			mr->min_page_size,
9809771d5f7SAbdiel Janulgue 			SZ_4M,
9819771d5f7SAbdiel Janulgue 		};
9829771d5f7SAbdiel Janulgue 		int i;
9839771d5f7SAbdiel Janulgue 
984938d2fd1SMatthew Auld 		if (mr->private)
985938d2fd1SMatthew Auld 			continue;
986938d2fd1SMatthew Auld 
9879771d5f7SAbdiel Janulgue 		for (i = 0; i < ARRAY_SIZE(sizes); i++) {
9889771d5f7SAbdiel Janulgue 			struct drm_i915_gem_object *obj;
9899771d5f7SAbdiel Janulgue 			int err;
9909771d5f7SAbdiel Janulgue 
9916d0e4f07SMatthew Auld 			obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
9929771d5f7SAbdiel Janulgue 			if (obj == ERR_PTR(-ENODEV))
9939771d5f7SAbdiel Janulgue 				continue;
9949771d5f7SAbdiel Janulgue 
9959771d5f7SAbdiel Janulgue 			if (IS_ERR(obj))
9969771d5f7SAbdiel Janulgue 				return PTR_ERR(obj);
9979771d5f7SAbdiel Janulgue 
9989771d5f7SAbdiel Janulgue 			err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
9999771d5f7SAbdiel Janulgue 			if (err == 0)
10009771d5f7SAbdiel Janulgue 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
10017961c5b6SMaarten Lankhorst 			if (err == 0)
10027961c5b6SMaarten Lankhorst 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
10039771d5f7SAbdiel Janulgue 
10049771d5f7SAbdiel Janulgue 			i915_gem_object_put(obj);
10059771d5f7SAbdiel Janulgue 			if (err)
10069771d5f7SAbdiel Janulgue 				return err;
10079771d5f7SAbdiel Janulgue 		}
1008cc662126SAbdiel Janulgue 	}
1009cc662126SAbdiel Janulgue 
10109771d5f7SAbdiel Janulgue 	return 0;
1011cc662126SAbdiel Janulgue }
1012cc662126SAbdiel Janulgue 
igt_close_objects(struct drm_i915_private * i915,struct list_head * objects)1013fb87550dSMatthew Auld static void igt_close_objects(struct drm_i915_private *i915,
1014fb87550dSMatthew Auld 			      struct list_head *objects)
1015fb87550dSMatthew Auld {
1016fb87550dSMatthew Auld 	struct drm_i915_gem_object *obj, *on;
1017fb87550dSMatthew Auld 
1018fb87550dSMatthew Auld 	list_for_each_entry_safe(obj, on, objects, st_link) {
1019fb87550dSMatthew Auld 		i915_gem_object_lock(obj, NULL);
1020fb87550dSMatthew Auld 		if (i915_gem_object_has_pinned_pages(obj))
1021fb87550dSMatthew Auld 			i915_gem_object_unpin_pages(obj);
1022fb87550dSMatthew Auld 		/* No polluting the memory region between tests */
1023fb87550dSMatthew Auld 		__i915_gem_object_put_pages(obj);
1024fb87550dSMatthew Auld 		i915_gem_object_unlock(obj);
1025fb87550dSMatthew Auld 		list_del(&obj->st_link);
1026fb87550dSMatthew Auld 		i915_gem_object_put(obj);
1027fb87550dSMatthew Auld 	}
1028fb87550dSMatthew Auld 
1029fb87550dSMatthew Auld 	cond_resched();
1030fb87550dSMatthew Auld 
1031fb87550dSMatthew Auld 	i915_gem_drain_freed_objects(i915);
1032fb87550dSMatthew Auld }
1033fb87550dSMatthew Auld 
igt_make_evictable(struct list_head * objects)1034fb87550dSMatthew Auld static void igt_make_evictable(struct list_head *objects)
1035fb87550dSMatthew Auld {
1036fb87550dSMatthew Auld 	struct drm_i915_gem_object *obj;
1037fb87550dSMatthew Auld 
1038fb87550dSMatthew Auld 	list_for_each_entry(obj, objects, st_link) {
1039fb87550dSMatthew Auld 		i915_gem_object_lock(obj, NULL);
1040fb87550dSMatthew Auld 		if (i915_gem_object_has_pinned_pages(obj))
1041fb87550dSMatthew Auld 			i915_gem_object_unpin_pages(obj);
1042fb87550dSMatthew Auld 		i915_gem_object_unlock(obj);
1043fb87550dSMatthew Auld 	}
1044fb87550dSMatthew Auld 
1045fb87550dSMatthew Auld 	cond_resched();
1046fb87550dSMatthew Auld }
1047fb87550dSMatthew Auld 
igt_fill_mappable(struct intel_memory_region * mr,struct list_head * objects)1048fb87550dSMatthew Auld static int igt_fill_mappable(struct intel_memory_region *mr,
1049fb87550dSMatthew Auld 			     struct list_head *objects)
1050fb87550dSMatthew Auld {
1051fb87550dSMatthew Auld 	u64 size, total;
1052fb87550dSMatthew Auld 	int err;
1053fb87550dSMatthew Auld 
1054fb87550dSMatthew Auld 	total = 0;
1055fb87550dSMatthew Auld 	size = mr->io_size;
1056fb87550dSMatthew Auld 	do {
1057fb87550dSMatthew Auld 		struct drm_i915_gem_object *obj;
1058fb87550dSMatthew Auld 
1059fb87550dSMatthew Auld 		obj = i915_gem_object_create_region(mr, size, 0, 0);
1060fb87550dSMatthew Auld 		if (IS_ERR(obj)) {
1061fb87550dSMatthew Auld 			err = PTR_ERR(obj);
1062fb87550dSMatthew Auld 			goto err_close;
1063fb87550dSMatthew Auld 		}
1064fb87550dSMatthew Auld 
1065fb87550dSMatthew Auld 		list_add(&obj->st_link, objects);
1066fb87550dSMatthew Auld 
1067fb87550dSMatthew Auld 		err = i915_gem_object_pin_pages_unlocked(obj);
1068fb87550dSMatthew Auld 		if (err) {
1069fb87550dSMatthew Auld 			if (err != -ENXIO && err != -ENOMEM)
1070fb87550dSMatthew Auld 				goto err_close;
1071fb87550dSMatthew Auld 
1072fb87550dSMatthew Auld 			if (size == mr->min_page_size) {
1073fb87550dSMatthew Auld 				err = 0;
1074fb87550dSMatthew Auld 				break;
1075fb87550dSMatthew Auld 			}
1076fb87550dSMatthew Auld 
1077fb87550dSMatthew Auld 			size >>= 1;
1078fb87550dSMatthew Auld 			continue;
1079fb87550dSMatthew Auld 		}
1080fb87550dSMatthew Auld 
1081fb87550dSMatthew Auld 		total += obj->base.size;
1082fb87550dSMatthew Auld 	} while (1);
1083fb87550dSMatthew Auld 
1084fb87550dSMatthew Auld 	pr_info("%s filled=%lluMiB\n", __func__, total >> 20);
1085fb87550dSMatthew Auld 	return 0;
1086fb87550dSMatthew Auld 
1087fb87550dSMatthew Auld err_close:
1088fb87550dSMatthew Auld 	igt_close_objects(mr->i915, objects);
1089fb87550dSMatthew Auld 	return err;
1090fb87550dSMatthew Auld }
1091fb87550dSMatthew Auld 
___igt_mmap_migrate(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,unsigned long addr,bool unfaultable)1092fb87550dSMatthew Auld static int ___igt_mmap_migrate(struct drm_i915_private *i915,
1093fb87550dSMatthew Auld 			       struct drm_i915_gem_object *obj,
1094fb87550dSMatthew Auld 			       unsigned long addr,
1095fb87550dSMatthew Auld 			       bool unfaultable)
1096fb87550dSMatthew Auld {
1097fb87550dSMatthew Auld 	struct vm_area_struct *area;
1098fb87550dSMatthew Auld 	int err = 0, i;
1099fb87550dSMatthew Auld 
1100fb87550dSMatthew Auld 	pr_info("igt_mmap(%s, %d) @ %lx\n",
1101fb87550dSMatthew Auld 		obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
1102fb87550dSMatthew Auld 
1103fb87550dSMatthew Auld 	mmap_read_lock(current->mm);
1104fb87550dSMatthew Auld 	area = vma_lookup(current->mm, addr);
1105fb87550dSMatthew Auld 	mmap_read_unlock(current->mm);
1106fb87550dSMatthew Auld 	if (!area) {
1107fb87550dSMatthew Auld 		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
1108fb87550dSMatthew Auld 		       obj->mm.region->name);
1109fb87550dSMatthew Auld 		err = -EINVAL;
1110fb87550dSMatthew Auld 		goto out_unmap;
1111fb87550dSMatthew Auld 	}
1112fb87550dSMatthew Auld 
1113fb87550dSMatthew Auld 	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
1114fb87550dSMatthew Auld 		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
1115fb87550dSMatthew Auld 		u32 x;
1116fb87550dSMatthew Auld 
1117fb87550dSMatthew Auld 		if (get_user(x, ux)) {
1118fb87550dSMatthew Auld 			err = -EFAULT;
1119fb87550dSMatthew Auld 			if (!unfaultable) {
1120fb87550dSMatthew Auld 				pr_err("%s: Unable to read from mmap, offset:%zd\n",
1121fb87550dSMatthew Auld 				       obj->mm.region->name, i * sizeof(x));
1122fb87550dSMatthew Auld 				goto out_unmap;
1123fb87550dSMatthew Auld 			}
1124fb87550dSMatthew Auld 
1125fb87550dSMatthew Auld 			continue;
1126fb87550dSMatthew Auld 		}
1127fb87550dSMatthew Auld 
1128fb87550dSMatthew Auld 		if (unfaultable) {
1129fb87550dSMatthew Auld 			pr_err("%s: Faulted unmappable memory\n",
1130fb87550dSMatthew Auld 			       obj->mm.region->name);
1131fb87550dSMatthew Auld 			err = -EINVAL;
1132fb87550dSMatthew Auld 			goto out_unmap;
1133fb87550dSMatthew Auld 		}
1134fb87550dSMatthew Auld 
1135fb87550dSMatthew Auld 		if (x != expand32(POISON_INUSE)) {
1136fb87550dSMatthew Auld 			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
1137fb87550dSMatthew Auld 			       obj->mm.region->name,
1138fb87550dSMatthew Auld 			       i * sizeof(x), x, expand32(POISON_INUSE));
1139fb87550dSMatthew Auld 			err = -EINVAL;
1140fb87550dSMatthew Auld 			goto out_unmap;
1141fb87550dSMatthew Auld 		}
1142fb87550dSMatthew Auld 
1143fb87550dSMatthew Auld 		x = expand32(POISON_FREE);
1144fb87550dSMatthew Auld 		if (put_user(x, ux)) {
1145fb87550dSMatthew Auld 			pr_err("%s: Unable to write to mmap, offset:%zd\n",
1146fb87550dSMatthew Auld 			       obj->mm.region->name, i * sizeof(x));
1147fb87550dSMatthew Auld 			err = -EFAULT;
1148fb87550dSMatthew Auld 			goto out_unmap;
1149fb87550dSMatthew Auld 		}
1150fb87550dSMatthew Auld 	}
1151fb87550dSMatthew Auld 
1152fb87550dSMatthew Auld 	if (unfaultable) {
1153fb87550dSMatthew Auld 		if (err == -EFAULT)
1154fb87550dSMatthew Auld 			err = 0;
1155fb87550dSMatthew Auld 	} else {
1156fb87550dSMatthew Auld 		obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
1157fb87550dSMatthew Auld 		err = wc_check(obj);
1158fb87550dSMatthew Auld 	}
1159fb87550dSMatthew Auld out_unmap:
1160fb87550dSMatthew Auld 	vm_munmap(addr, obj->base.size);
1161fb87550dSMatthew Auld 	return err;
1162fb87550dSMatthew Auld }
1163fb87550dSMatthew Auld 
1164fb87550dSMatthew Auld #define IGT_MMAP_MIGRATE_TOPDOWN     (1 << 0)
1165fb87550dSMatthew Auld #define IGT_MMAP_MIGRATE_FILL        (1 << 1)
1166fb87550dSMatthew Auld #define IGT_MMAP_MIGRATE_EVICTABLE   (1 << 2)
1167fb87550dSMatthew Auld #define IGT_MMAP_MIGRATE_UNFAULTABLE (1 << 3)
1168bfe53be2SMatthew Auld #define IGT_MMAP_MIGRATE_FAIL_GPU    (1 << 4)
__igt_mmap_migrate(struct intel_memory_region ** placements,int n_placements,struct intel_memory_region * expected_mr,unsigned int flags)1169fb87550dSMatthew Auld static int __igt_mmap_migrate(struct intel_memory_region **placements,
1170fb87550dSMatthew Auld 			      int n_placements,
1171fb87550dSMatthew Auld 			      struct intel_memory_region *expected_mr,
1172fb87550dSMatthew Auld 			      unsigned int flags)
1173fb87550dSMatthew Auld {
1174fb87550dSMatthew Auld 	struct drm_i915_private *i915 = placements[0]->i915;
1175fb87550dSMatthew Auld 	struct drm_i915_gem_object *obj;
1176fb87550dSMatthew Auld 	struct i915_request *rq = NULL;
1177fb87550dSMatthew Auld 	unsigned long addr;
1178fb87550dSMatthew Auld 	LIST_HEAD(objects);
1179fb87550dSMatthew Auld 	u64 offset;
1180fb87550dSMatthew Auld 	int err;
1181fb87550dSMatthew Auld 
1182fb87550dSMatthew Auld 	obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
1183fb87550dSMatthew Auld 					    placements,
1184fb87550dSMatthew Auld 					    n_placements);
1185fb87550dSMatthew Auld 	if (IS_ERR(obj))
1186fb87550dSMatthew Auld 		return PTR_ERR(obj);
1187fb87550dSMatthew Auld 
1188fb87550dSMatthew Auld 	if (flags & IGT_MMAP_MIGRATE_TOPDOWN)
1189fb87550dSMatthew Auld 		obj->flags |= I915_BO_ALLOC_GPU_ONLY;
1190fb87550dSMatthew Auld 
1191fb87550dSMatthew Auld 	err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL);
1192fb87550dSMatthew Auld 	if (err)
1193fb87550dSMatthew Auld 		goto out_put;
1194fb87550dSMatthew Auld 
1195fb87550dSMatthew Auld 	/*
1196fb87550dSMatthew Auld 	 * This will eventually create a GEM context, due to opening dummy drm
1197fb87550dSMatthew Auld 	 * file, which needs a tiny amount of mappable device memory for the top
1198fb87550dSMatthew Auld 	 * level paging structures(and perhaps scratch), so make sure we
1199fb87550dSMatthew Auld 	 * allocate early, to avoid tears.
1200fb87550dSMatthew Auld 	 */
1201fb87550dSMatthew Auld 	addr = igt_mmap_offset(i915, offset, obj->base.size,
1202fb87550dSMatthew Auld 			       PROT_WRITE, MAP_SHARED);
1203fb87550dSMatthew Auld 	if (IS_ERR_VALUE(addr)) {
1204fb87550dSMatthew Auld 		err = addr;
1205fb87550dSMatthew Auld 		goto out_put;
1206fb87550dSMatthew Auld 	}
1207fb87550dSMatthew Auld 
1208fb87550dSMatthew Auld 	if (flags & IGT_MMAP_MIGRATE_FILL) {
1209fb87550dSMatthew Auld 		err = igt_fill_mappable(placements[0], &objects);
1210fb87550dSMatthew Auld 		if (err)
1211fb87550dSMatthew Auld 			goto out_put;
1212fb87550dSMatthew Auld 	}
1213fb87550dSMatthew Auld 
1214fb87550dSMatthew Auld 	err = i915_gem_object_lock(obj, NULL);
1215fb87550dSMatthew Auld 	if (err)
1216fb87550dSMatthew Auld 		goto out_put;
1217fb87550dSMatthew Auld 
1218fb87550dSMatthew Auld 	err = i915_gem_object_pin_pages(obj);
1219fb87550dSMatthew Auld 	if (err) {
1220fb87550dSMatthew Auld 		i915_gem_object_unlock(obj);
1221fb87550dSMatthew Auld 		goto out_put;
1222fb87550dSMatthew Auld 	}
1223fb87550dSMatthew Auld 
1224fb87550dSMatthew Auld 	err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
1225fb87550dSMatthew Auld 					  obj->mm.pages->sgl, obj->pat_index,
1226fb87550dSMatthew Auld 					  i915_gem_object_is_lmem(obj),
1227fb87550dSMatthew Auld 					  expand32(POISON_INUSE), &rq);
1228fb87550dSMatthew Auld 	i915_gem_object_unpin_pages(obj);
1229fb87550dSMatthew Auld 	if (rq) {
123011f01dcfSMatthew Auld 		err = dma_resv_reserve_fences(obj->base.resv, 1);
123111f01dcfSMatthew Auld 		if (!err)
123273511edfSChristian König 			dma_resv_add_fence(obj->base.resv, &rq->fence,
12331d7f5e6cSChristian König 					   DMA_RESV_USAGE_KERNEL);
1234fb87550dSMatthew Auld 		i915_request_put(rq);
1235fb87550dSMatthew Auld 	}
1236fb87550dSMatthew Auld 	i915_gem_object_unlock(obj);
1237fb87550dSMatthew Auld 	if (err)
1238fb87550dSMatthew Auld 		goto out_put;
1239fb87550dSMatthew Auld 
1240fb87550dSMatthew Auld 	if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
1241fb87550dSMatthew Auld 		igt_make_evictable(&objects);
1242fb87550dSMatthew Auld 
1243bfe53be2SMatthew Auld 	if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
1244bfe53be2SMatthew Auld 		err = i915_gem_object_lock(obj, NULL);
1245bfe53be2SMatthew Auld 		if (err)
1246bfe53be2SMatthew Auld 			goto out_put;
1247bfe53be2SMatthew Auld 
1248bfe53be2SMatthew Auld 		/*
1249bfe53be2SMatthew Auld 		 * Ensure we only simulate the gpu failuire when faulting the
1250bfe53be2SMatthew Auld 		 * pages.
1251bfe53be2SMatthew Auld 		 */
1252bfe53be2SMatthew Auld 		err = i915_gem_object_wait_moving_fence(obj, true);
1253bfe53be2SMatthew Auld 		i915_gem_object_unlock(obj);
1254bfe53be2SMatthew Auld 		if (err)
1255bfe53be2SMatthew Auld 			goto out_put;
1256bfe53be2SMatthew Auld 		i915_ttm_migrate_set_failure_modes(true, false);
1257bfe53be2SMatthew Auld 	}
1258bfe53be2SMatthew Auld 
1259fb87550dSMatthew Auld 	err = ___igt_mmap_migrate(i915, obj, addr,
1260fb87550dSMatthew Auld 				  flags & IGT_MMAP_MIGRATE_UNFAULTABLE);
1261bfe53be2SMatthew Auld 
1262fb87550dSMatthew Auld 	if (!err && obj->mm.region != expected_mr) {
1263fb87550dSMatthew Auld 		pr_err("%s region mismatch %s\n", __func__, expected_mr->name);
1264fb87550dSMatthew Auld 		err = -EINVAL;
1265fb87550dSMatthew Auld 	}
1266fb87550dSMatthew Auld 
1267bfe53be2SMatthew Auld 	if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
1268bfe53be2SMatthew Auld 		struct intel_gt *gt;
1269bfe53be2SMatthew Auld 		unsigned int id;
1270bfe53be2SMatthew Auld 
1271bfe53be2SMatthew Auld 		i915_ttm_migrate_set_failure_modes(false, false);
1272bfe53be2SMatthew Auld 
1273bfe53be2SMatthew Auld 		for_each_gt(gt, i915, id) {
1274bfe53be2SMatthew Auld 			intel_wakeref_t wakeref;
1275bfe53be2SMatthew Auld 			bool wedged;
1276bfe53be2SMatthew Auld 
1277bfe53be2SMatthew Auld 			mutex_lock(&gt->reset.mutex);
1278bfe53be2SMatthew Auld 			wedged = test_bit(I915_WEDGED, &gt->reset.flags);
1279bfe53be2SMatthew Auld 			mutex_unlock(&gt->reset.mutex);
1280bfe53be2SMatthew Auld 			if (!wedged) {
1281bfe53be2SMatthew Auld 				pr_err("gt(%u) not wedged\n", id);
1282bfe53be2SMatthew Auld 				err = -EINVAL;
1283bfe53be2SMatthew Auld 				continue;
1284bfe53be2SMatthew Auld 			}
1285bfe53be2SMatthew Auld 
1286bfe53be2SMatthew Auld 			wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1287bfe53be2SMatthew Auld 			igt_global_reset_lock(gt);
1288bfe53be2SMatthew Auld 			intel_gt_reset(gt, ALL_ENGINES, NULL);
1289bfe53be2SMatthew Auld 			igt_global_reset_unlock(gt);
1290bfe53be2SMatthew Auld 			intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1291bfe53be2SMatthew Auld 		}
1292bfe53be2SMatthew Auld 
1293bfe53be2SMatthew Auld 		if (!i915_gem_object_has_unknown_state(obj)) {
1294bfe53be2SMatthew Auld 			pr_err("object missing unknown_state\n");
1295bfe53be2SMatthew Auld 			err = -EINVAL;
1296bfe53be2SMatthew Auld 		}
1297bfe53be2SMatthew Auld 	}
1298bfe53be2SMatthew Auld 
1299fb87550dSMatthew Auld out_put:
1300fb87550dSMatthew Auld 	i915_gem_object_put(obj);
1301fb87550dSMatthew Auld 	igt_close_objects(i915, &objects);
1302fb87550dSMatthew Auld 	return err;
1303fb87550dSMatthew Auld }
1304fb87550dSMatthew Auld 
igt_mmap_migrate(void * arg)1305fb87550dSMatthew Auld static int igt_mmap_migrate(void *arg)
1306fb87550dSMatthew Auld {
1307fb87550dSMatthew Auld 	struct drm_i915_private *i915 = arg;
1308fb87550dSMatthew Auld 	struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM];
1309fb87550dSMatthew Auld 	struct intel_memory_region *mr;
1310fb87550dSMatthew Auld 	enum intel_region_id id;
1311fb87550dSMatthew Auld 
1312fb87550dSMatthew Auld 	for_each_memory_region(mr, i915, id) {
1313fb87550dSMatthew Auld 		struct intel_memory_region *mixed[] = { mr, system };
1314fb87550dSMatthew Auld 		struct intel_memory_region *single[] = { mr };
1315fb87550dSMatthew Auld 		struct ttm_resource_manager *man = mr->region_private;
1316fb87550dSMatthew Auld 		resource_size_t saved_io_size;
1317fb87550dSMatthew Auld 		int err;
1318fb87550dSMatthew Auld 
1319fb87550dSMatthew Auld 		if (mr->private)
1320fb87550dSMatthew Auld 			continue;
1321fb87550dSMatthew Auld 
1322fb87550dSMatthew Auld 		if (!mr->io_size)
1323fb87550dSMatthew Auld 			continue;
1324fb87550dSMatthew Auld 
1325fb87550dSMatthew Auld 		/*
1326fb87550dSMatthew Auld 		 * For testing purposes let's force small BAR, if not already
1327fb87550dSMatthew Auld 		 * present.
1328fb87550dSMatthew Auld 		 */
1329fb87550dSMatthew Auld 		saved_io_size = mr->io_size;
1330fb87550dSMatthew Auld 		if (mr->io_size == mr->total) {
1331fb87550dSMatthew Auld 			resource_size_t io_size = mr->io_size;
1332fb87550dSMatthew Auld 
1333fb87550dSMatthew Auld 			io_size = rounddown_pow_of_two(io_size >> 1);
1334fb87550dSMatthew Auld 			if (io_size < PAGE_SIZE)
1335fb87550dSMatthew Auld 				continue;
1336fb87550dSMatthew Auld 
1337fb87550dSMatthew Auld 			mr->io_size = io_size;
1338fb87550dSMatthew Auld 			i915_ttm_buddy_man_force_visible_size(man,
1339fb87550dSMatthew Auld 							      io_size >> PAGE_SHIFT);
1340fb87550dSMatthew Auld 		}
1341fb87550dSMatthew Auld 
1342fb87550dSMatthew Auld 		/*
1343fb87550dSMatthew Auld 		 * Allocate in the mappable portion, should be no suprises here.
1344fb87550dSMatthew Auld 		 */
1345fb87550dSMatthew Auld 		err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0);
1346fb87550dSMatthew Auld 		if (err)
1347fb87550dSMatthew Auld 			goto out_io_size;
1348fb87550dSMatthew Auld 
1349fb87550dSMatthew Auld 		/*
1350fb87550dSMatthew Auld 		 * Allocate in the non-mappable portion, but force migrating to
1351fb87550dSMatthew Auld 		 * the mappable portion on fault (LMEM -> LMEM)
1352fb87550dSMatthew Auld 		 */
1353fb87550dSMatthew Auld 		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1354fb87550dSMatthew Auld 					 IGT_MMAP_MIGRATE_TOPDOWN |
1355fb87550dSMatthew Auld 					 IGT_MMAP_MIGRATE_FILL |
1356fb87550dSMatthew Auld 					 IGT_MMAP_MIGRATE_EVICTABLE);
1357fb87550dSMatthew Auld 		if (err)
1358fb87550dSMatthew Auld 			goto out_io_size;
1359fb87550dSMatthew Auld 
1360fb87550dSMatthew Auld 		/*
1361fb87550dSMatthew Auld 		 * Allocate in the non-mappable portion, but force spilling into
1362fb87550dSMatthew Auld 		 * system memory on fault (LMEM -> SMEM)
1363fb87550dSMatthew Auld 		 */
1364fb87550dSMatthew Auld 		err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system,
1365fb87550dSMatthew Auld 					 IGT_MMAP_MIGRATE_TOPDOWN |
1366fb87550dSMatthew Auld 					 IGT_MMAP_MIGRATE_FILL);
1367fb87550dSMatthew Auld 		if (err)
1368fb87550dSMatthew Auld 			goto out_io_size;
1369fb87550dSMatthew Auld 
1370fb87550dSMatthew Auld 		/*
1371fb87550dSMatthew Auld 		 * Allocate in the non-mappable portion, but since the mappable
1372fb87550dSMatthew Auld 		 * portion is already full, and we can't spill to system memory,
1373fb87550dSMatthew Auld 		 * then we should expect the fault to fail.
1374fb87550dSMatthew Auld 		 */
1375fb87550dSMatthew Auld 		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1376fb87550dSMatthew Auld 					 IGT_MMAP_MIGRATE_TOPDOWN |
1377fb87550dSMatthew Auld 					 IGT_MMAP_MIGRATE_FILL |
1378fb87550dSMatthew Auld 					 IGT_MMAP_MIGRATE_UNFAULTABLE);
1379bfe53be2SMatthew Auld 		if (err)
1380bfe53be2SMatthew Auld 			goto out_io_size;
1381bfe53be2SMatthew Auld 
1382bfe53be2SMatthew Auld 		/*
1383bfe53be2SMatthew Auld 		 * Allocate in the non-mappable portion, but force migrating to
1384bfe53be2SMatthew Auld 		 * the mappable portion on fault (LMEM -> LMEM). We then also
1385bfe53be2SMatthew Auld 		 * simulate a gpu error when moving the pages when faulting the
1386bfe53be2SMatthew Auld 		 * pages, which should result in wedging the gpu and returning
1387bfe53be2SMatthew Auld 		 * SIGBUS in the fault handler, since we can't fallback to
1388bfe53be2SMatthew Auld 		 * memcpy.
1389bfe53be2SMatthew Auld 		 */
1390bfe53be2SMatthew Auld 		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1391bfe53be2SMatthew Auld 					 IGT_MMAP_MIGRATE_TOPDOWN |
1392bfe53be2SMatthew Auld 					 IGT_MMAP_MIGRATE_FILL |
1393bfe53be2SMatthew Auld 					 IGT_MMAP_MIGRATE_EVICTABLE |
1394bfe53be2SMatthew Auld 					 IGT_MMAP_MIGRATE_FAIL_GPU |
1395bfe53be2SMatthew Auld 					 IGT_MMAP_MIGRATE_UNFAULTABLE);
1396fb87550dSMatthew Auld out_io_size:
1397fb87550dSMatthew Auld 		mr->io_size = saved_io_size;
1398fb87550dSMatthew Auld 		i915_ttm_buddy_man_force_visible_size(man,
1399fb87550dSMatthew Auld 						      mr->io_size >> PAGE_SHIFT);
1400fb87550dSMatthew Auld 		if (err)
1401fb87550dSMatthew Auld 			return err;
1402fb87550dSMatthew Auld 	}
1403fb87550dSMatthew Auld 
1404fb87550dSMatthew Auld 	return 0;
1405fb87550dSMatthew Auld }
1406fb87550dSMatthew Auld 
repr_mmap_type(enum i915_mmap_type type)14079f909e21SChris Wilson static const char *repr_mmap_type(enum i915_mmap_type type)
14089f909e21SChris Wilson {
14099f909e21SChris Wilson 	switch (type) {
14109f909e21SChris Wilson 	case I915_MMAP_TYPE_GTT: return "gtt";
14119f909e21SChris Wilson 	case I915_MMAP_TYPE_WB: return "wb";
14129f909e21SChris Wilson 	case I915_MMAP_TYPE_WC: return "wc";
14139f909e21SChris Wilson 	case I915_MMAP_TYPE_UC: return "uc";
14147961c5b6SMaarten Lankhorst 	case I915_MMAP_TYPE_FIXED: return "fixed";
14159f909e21SChris Wilson 	default: return "unknown";
14169f909e21SChris Wilson 	}
14179f909e21SChris Wilson }
14189f909e21SChris Wilson 
can_access(struct drm_i915_gem_object * obj)14190ff37575SThomas Hellström static bool can_access(struct drm_i915_gem_object *obj)
14209f909e21SChris Wilson {
14210ff37575SThomas Hellström 	bool access;
14220ff37575SThomas Hellström 
14230ff37575SThomas Hellström 	i915_gem_object_lock(obj, NULL);
14240ff37575SThomas Hellström 	access = i915_gem_object_has_struct_page(obj) ||
14250ff37575SThomas Hellström 		i915_gem_object_has_iomem(obj);
14260ff37575SThomas Hellström 	i915_gem_object_unlock(obj);
14270ff37575SThomas Hellström 
14280ff37575SThomas Hellström 	return access;
14299f909e21SChris Wilson }
14309f909e21SChris Wilson 
__igt_mmap_access(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)14319f909e21SChris Wilson static int __igt_mmap_access(struct drm_i915_private *i915,
14329f909e21SChris Wilson 			     struct drm_i915_gem_object *obj,
14339f909e21SChris Wilson 			     enum i915_mmap_type type)
14349f909e21SChris Wilson {
14359f909e21SChris Wilson 	unsigned long __user *ptr;
14369f909e21SChris Wilson 	unsigned long A, B;
14379f909e21SChris Wilson 	unsigned long x, y;
14389f909e21SChris Wilson 	unsigned long addr;
14399f909e21SChris Wilson 	int err;
1440cf3e3e86SMaarten Lankhorst 	u64 offset;
14419f909e21SChris Wilson 
14429f909e21SChris Wilson 	memset(&A, 0xAA, sizeof(A));
14439f909e21SChris Wilson 	memset(&B, 0xBB, sizeof(B));
14449f909e21SChris Wilson 
14459f909e21SChris Wilson 	if (!can_mmap(obj, type) || !can_access(obj))
14469f909e21SChris Wilson 		return 0;
14479f909e21SChris Wilson 
1448cf3e3e86SMaarten Lankhorst 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1449cf3e3e86SMaarten Lankhorst 	if (err)
1450cf3e3e86SMaarten Lankhorst 		return err;
14519f909e21SChris Wilson 
1452cf3e3e86SMaarten Lankhorst 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
14539f909e21SChris Wilson 	if (IS_ERR_VALUE(addr))
14549f909e21SChris Wilson 		return addr;
14559f909e21SChris Wilson 	ptr = (unsigned long __user *)addr;
14569f909e21SChris Wilson 
14579f909e21SChris Wilson 	err = __put_user(A, ptr);
14589f909e21SChris Wilson 	if (err) {
14599f909e21SChris Wilson 		pr_err("%s(%s): failed to write into user mmap\n",
14609f909e21SChris Wilson 		       obj->mm.region->name, repr_mmap_type(type));
14619f909e21SChris Wilson 		goto out_unmap;
14629f909e21SChris Wilson 	}
14639f909e21SChris Wilson 
14641a9c4db4SMichał Winiarski 	intel_gt_flush_ggtt_writes(to_gt(i915));
14659f909e21SChris Wilson 
14669f909e21SChris Wilson 	err = access_process_vm(current, addr, &x, sizeof(x), 0);
14679f909e21SChris Wilson 	if (err != sizeof(x)) {
14689f909e21SChris Wilson 		pr_err("%s(%s): access_process_vm() read failed\n",
14699f909e21SChris Wilson 		       obj->mm.region->name, repr_mmap_type(type));
14709f909e21SChris Wilson 		goto out_unmap;
14719f909e21SChris Wilson 	}
14729f909e21SChris Wilson 
14739f909e21SChris Wilson 	err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
14749f909e21SChris Wilson 	if (err != sizeof(B)) {
14759f909e21SChris Wilson 		pr_err("%s(%s): access_process_vm() write failed\n",
14769f909e21SChris Wilson 		       obj->mm.region->name, repr_mmap_type(type));
14779f909e21SChris Wilson 		goto out_unmap;
14789f909e21SChris Wilson 	}
14799f909e21SChris Wilson 
14801a9c4db4SMichał Winiarski 	intel_gt_flush_ggtt_writes(to_gt(i915));
14819f909e21SChris Wilson 
14829f909e21SChris Wilson 	err = __get_user(y, ptr);
14839f909e21SChris Wilson 	if (err) {
14849f909e21SChris Wilson 		pr_err("%s(%s): failed to read from user mmap\n",
14859f909e21SChris Wilson 		       obj->mm.region->name, repr_mmap_type(type));
14869f909e21SChris Wilson 		goto out_unmap;
14879f909e21SChris Wilson 	}
14889f909e21SChris Wilson 
14899f909e21SChris Wilson 	if (x != A || y != B) {
14909f909e21SChris Wilson 		pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
14919f909e21SChris Wilson 		       obj->mm.region->name, repr_mmap_type(type),
14929f909e21SChris Wilson 		       x, y);
14939f909e21SChris Wilson 		err = -EINVAL;
14949f909e21SChris Wilson 		goto out_unmap;
14959f909e21SChris Wilson 	}
14969f909e21SChris Wilson 
14979f909e21SChris Wilson out_unmap:
14989f909e21SChris Wilson 	vm_munmap(addr, obj->base.size);
14999f909e21SChris Wilson 	return err;
15009f909e21SChris Wilson }
15019f909e21SChris Wilson 
igt_mmap_access(void * arg)15029f909e21SChris Wilson static int igt_mmap_access(void *arg)
15039f909e21SChris Wilson {
15049f909e21SChris Wilson 	struct drm_i915_private *i915 = arg;
15059f909e21SChris Wilson 	struct intel_memory_region *mr;
15069f909e21SChris Wilson 	enum intel_region_id id;
15079f909e21SChris Wilson 
15089f909e21SChris Wilson 	for_each_memory_region(mr, i915, id) {
15099f909e21SChris Wilson 		struct drm_i915_gem_object *obj;
15109f909e21SChris Wilson 		int err;
15119f909e21SChris Wilson 
1512938d2fd1SMatthew Auld 		if (mr->private)
1513938d2fd1SMatthew Auld 			continue;
1514938d2fd1SMatthew Auld 
15156d0e4f07SMatthew Auld 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
15169f909e21SChris Wilson 		if (obj == ERR_PTR(-ENODEV))
15179f909e21SChris Wilson 			continue;
15189f909e21SChris Wilson 
15199f909e21SChris Wilson 		if (IS_ERR(obj))
15209f909e21SChris Wilson 			return PTR_ERR(obj);
15219f909e21SChris Wilson 
15229f909e21SChris Wilson 		err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
15239f909e21SChris Wilson 		if (err == 0)
15249f909e21SChris Wilson 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
15259f909e21SChris Wilson 		if (err == 0)
15269f909e21SChris Wilson 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
15279f909e21SChris Wilson 		if (err == 0)
15289f909e21SChris Wilson 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
15297961c5b6SMaarten Lankhorst 		if (err == 0)
15307961c5b6SMaarten Lankhorst 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
15319f909e21SChris Wilson 
15329f909e21SChris Wilson 		i915_gem_object_put(obj);
15339f909e21SChris Wilson 		if (err)
15349f909e21SChris Wilson 			return err;
15359f909e21SChris Wilson 	}
15369f909e21SChris Wilson 
15379f909e21SChris Wilson 	return 0;
15389f909e21SChris Wilson }
15399f909e21SChris Wilson 
__igt_mmap_gpu(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)154006581862SChris Wilson static int __igt_mmap_gpu(struct drm_i915_private *i915,
154106581862SChris Wilson 			  struct drm_i915_gem_object *obj,
154206581862SChris Wilson 			  enum i915_mmap_type type)
154306581862SChris Wilson {
154406581862SChris Wilson 	struct intel_engine_cs *engine;
154506581862SChris Wilson 	unsigned long addr;
1546a5799832SChris Wilson 	u32 __user *ux;
1547a5799832SChris Wilson 	u32 bbe;
154806581862SChris Wilson 	int err;
1549cf3e3e86SMaarten Lankhorst 	u64 offset;
155006581862SChris Wilson 
155106581862SChris Wilson 	/*
155206581862SChris Wilson 	 * Verify that the mmap access into the backing store aligns with
155306581862SChris Wilson 	 * that of the GPU, i.e. that mmap is indeed writing into the same
155406581862SChris Wilson 	 * page as being read by the GPU.
155506581862SChris Wilson 	 */
155606581862SChris Wilson 
155706581862SChris Wilson 	if (!can_mmap(obj, type))
155806581862SChris Wilson 		return 0;
155906581862SChris Wilson 
156006581862SChris Wilson 	err = wc_set(obj);
156106581862SChris Wilson 	if (err == -ENXIO)
156206581862SChris Wilson 		err = gtt_set(obj);
156306581862SChris Wilson 	if (err)
156406581862SChris Wilson 		return err;
156506581862SChris Wilson 
1566cf3e3e86SMaarten Lankhorst 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1567cf3e3e86SMaarten Lankhorst 	if (err)
1568cf3e3e86SMaarten Lankhorst 		return err;
156906581862SChris Wilson 
1570cf3e3e86SMaarten Lankhorst 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
157106581862SChris Wilson 	if (IS_ERR_VALUE(addr))
157206581862SChris Wilson 		return addr;
157306581862SChris Wilson 
157406581862SChris Wilson 	ux = u64_to_user_ptr((u64)addr);
157506581862SChris Wilson 	bbe = MI_BATCH_BUFFER_END;
157606581862SChris Wilson 	if (put_user(bbe, ux)) {
157706581862SChris Wilson 		pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
157806581862SChris Wilson 		err = -EFAULT;
157906581862SChris Wilson 		goto out_unmap;
158006581862SChris Wilson 	}
158106581862SChris Wilson 
158206581862SChris Wilson 	if (type == I915_MMAP_TYPE_GTT)
15831a9c4db4SMichał Winiarski 		intel_gt_flush_ggtt_writes(to_gt(i915));
158406581862SChris Wilson 
158506581862SChris Wilson 	for_each_uabi_engine(engine, i915) {
158606581862SChris Wilson 		struct i915_request *rq;
158706581862SChris Wilson 		struct i915_vma *vma;
158815b6c924SMaarten Lankhorst 		struct i915_gem_ww_ctx ww;
158906581862SChris Wilson 
159006581862SChris Wilson 		vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
159106581862SChris Wilson 		if (IS_ERR(vma)) {
159206581862SChris Wilson 			err = PTR_ERR(vma);
159306581862SChris Wilson 			goto out_unmap;
159406581862SChris Wilson 		}
159506581862SChris Wilson 
159615b6c924SMaarten Lankhorst 		i915_gem_ww_ctx_init(&ww, false);
159715b6c924SMaarten Lankhorst retry:
159815b6c924SMaarten Lankhorst 		err = i915_gem_object_lock(obj, &ww);
159915b6c924SMaarten Lankhorst 		if (!err)
160015b6c924SMaarten Lankhorst 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
160106581862SChris Wilson 		if (err)
160215b6c924SMaarten Lankhorst 			goto out_ww;
160306581862SChris Wilson 
160406581862SChris Wilson 		rq = i915_request_create(engine->kernel_context);
160506581862SChris Wilson 		if (IS_ERR(rq)) {
160606581862SChris Wilson 			err = PTR_ERR(rq);
160706581862SChris Wilson 			goto out_unpin;
160806581862SChris Wilson 		}
160906581862SChris Wilson 
161006581862SChris Wilson 		err = i915_vma_move_to_active(vma, rq, 0);
161106581862SChris Wilson 
16128e4ee5e8SChris Wilson 		err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
161306581862SChris Wilson 		i915_request_get(rq);
161406581862SChris Wilson 		i915_request_add(rq);
161506581862SChris Wilson 
161606581862SChris Wilson 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
161706581862SChris Wilson 			struct drm_printer p =
161806581862SChris Wilson 				drm_info_printer(engine->i915->drm.dev);
161906581862SChris Wilson 
162006581862SChris Wilson 			pr_err("%s(%s, %s): Failed to execute batch\n",
162106581862SChris Wilson 			       __func__, engine->name, obj->mm.region->name);
162206581862SChris Wilson 			intel_engine_dump(engine, &p,
162306581862SChris Wilson 					  "%s\n", engine->name);
162406581862SChris Wilson 
162506581862SChris Wilson 			intel_gt_set_wedged(engine->gt);
162606581862SChris Wilson 			err = -EIO;
162706581862SChris Wilson 		}
162806581862SChris Wilson 		i915_request_put(rq);
162906581862SChris Wilson 
163006581862SChris Wilson out_unpin:
163106581862SChris Wilson 		i915_vma_unpin(vma);
163215b6c924SMaarten Lankhorst out_ww:
163315b6c924SMaarten Lankhorst 		if (err == -EDEADLK) {
163415b6c924SMaarten Lankhorst 			err = i915_gem_ww_ctx_backoff(&ww);
163515b6c924SMaarten Lankhorst 			if (!err)
163615b6c924SMaarten Lankhorst 				goto retry;
163715b6c924SMaarten Lankhorst 		}
163815b6c924SMaarten Lankhorst 		i915_gem_ww_ctx_fini(&ww);
163906581862SChris Wilson 		if (err)
164006581862SChris Wilson 			goto out_unmap;
164106581862SChris Wilson 	}
164206581862SChris Wilson 
164306581862SChris Wilson out_unmap:
164406581862SChris Wilson 	vm_munmap(addr, obj->base.size);
164506581862SChris Wilson 	return err;
164606581862SChris Wilson }
164706581862SChris Wilson 
igt_mmap_gpu(void * arg)164806581862SChris Wilson static int igt_mmap_gpu(void *arg)
164906581862SChris Wilson {
165006581862SChris Wilson 	struct drm_i915_private *i915 = arg;
165106581862SChris Wilson 	struct intel_memory_region *mr;
165206581862SChris Wilson 	enum intel_region_id id;
165306581862SChris Wilson 
165406581862SChris Wilson 	for_each_memory_region(mr, i915, id) {
165506581862SChris Wilson 		struct drm_i915_gem_object *obj;
165606581862SChris Wilson 		int err;
165706581862SChris Wilson 
1658938d2fd1SMatthew Auld 		if (mr->private)
1659938d2fd1SMatthew Auld 			continue;
1660938d2fd1SMatthew Auld 
16616d0e4f07SMatthew Auld 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
166206581862SChris Wilson 		if (obj == ERR_PTR(-ENODEV))
166306581862SChris Wilson 			continue;
166406581862SChris Wilson 
166506581862SChris Wilson 		if (IS_ERR(obj))
166606581862SChris Wilson 			return PTR_ERR(obj);
166706581862SChris Wilson 
166806581862SChris Wilson 		err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
166906581862SChris Wilson 		if (err == 0)
167006581862SChris Wilson 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
16717961c5b6SMaarten Lankhorst 		if (err == 0)
16727961c5b6SMaarten Lankhorst 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
167306581862SChris Wilson 
167406581862SChris Wilson 		i915_gem_object_put(obj);
167506581862SChris Wilson 		if (err)
167606581862SChris Wilson 			return err;
167706581862SChris Wilson 	}
167806581862SChris Wilson 
167906581862SChris Wilson 	return 0;
168006581862SChris Wilson }
168106581862SChris Wilson 
check_present_pte(pte_t * pte,unsigned long addr,void * data)16821d1d0af6SChris Wilson static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
16831d1d0af6SChris Wilson {
1684*c33c7948SRyan Roberts 	pte_t ptent = ptep_get(pte);
1685*c33c7948SRyan Roberts 
1686*c33c7948SRyan Roberts 	if (!pte_present(ptent) || pte_none(ptent)) {
16871d1d0af6SChris Wilson 		pr_err("missing PTE:%lx\n",
16881d1d0af6SChris Wilson 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
16891d1d0af6SChris Wilson 		return -EINVAL;
16901d1d0af6SChris Wilson 	}
16911d1d0af6SChris Wilson 
16921d1d0af6SChris Wilson 	return 0;
16931d1d0af6SChris Wilson }
16941d1d0af6SChris Wilson 
check_absent_pte(pte_t * pte,unsigned long addr,void * data)16951d1d0af6SChris Wilson static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
16961d1d0af6SChris Wilson {
1697*c33c7948SRyan Roberts 	pte_t ptent = ptep_get(pte);
1698*c33c7948SRyan Roberts 
1699*c33c7948SRyan Roberts 	if (pte_present(ptent) && !pte_none(ptent)) {
17001d1d0af6SChris Wilson 		pr_err("present PTE:%lx; expected to be revoked\n",
17011d1d0af6SChris Wilson 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
17021d1d0af6SChris Wilson 		return -EINVAL;
17031d1d0af6SChris Wilson 	}
17041d1d0af6SChris Wilson 
17051d1d0af6SChris Wilson 	return 0;
17061d1d0af6SChris Wilson }
17071d1d0af6SChris Wilson 
check_present(unsigned long addr,unsigned long len)17081d1d0af6SChris Wilson static int check_present(unsigned long addr, unsigned long len)
17091d1d0af6SChris Wilson {
17101d1d0af6SChris Wilson 	return apply_to_page_range(current->mm, addr, len,
17111d1d0af6SChris Wilson 				   check_present_pte, (void *)addr);
17121d1d0af6SChris Wilson }
17131d1d0af6SChris Wilson 
check_absent(unsigned long addr,unsigned long len)17141d1d0af6SChris Wilson static int check_absent(unsigned long addr, unsigned long len)
17151d1d0af6SChris Wilson {
17161d1d0af6SChris Wilson 	return apply_to_page_range(current->mm, addr, len,
17171d1d0af6SChris Wilson 				   check_absent_pte, (void *)addr);
17181d1d0af6SChris Wilson }
17191d1d0af6SChris Wilson 
prefault_range(u64 start,u64 len)17201d1d0af6SChris Wilson static int prefault_range(u64 start, u64 len)
17211d1d0af6SChris Wilson {
17221d1d0af6SChris Wilson 	const char __user *addr, *end;
17231d1d0af6SChris Wilson 	char __maybe_unused c;
17241d1d0af6SChris Wilson 	int err;
17251d1d0af6SChris Wilson 
17261d1d0af6SChris Wilson 	addr = u64_to_user_ptr(start);
17271d1d0af6SChris Wilson 	end = addr + len;
17281d1d0af6SChris Wilson 
17291d1d0af6SChris Wilson 	for (; addr < end; addr += PAGE_SIZE) {
17301d1d0af6SChris Wilson 		err = __get_user(c, addr);
17311d1d0af6SChris Wilson 		if (err)
17321d1d0af6SChris Wilson 			return err;
17331d1d0af6SChris Wilson 	}
17341d1d0af6SChris Wilson 
17351d1d0af6SChris Wilson 	return __get_user(c, end - 1);
17361d1d0af6SChris Wilson }
17371d1d0af6SChris Wilson 
__igt_mmap_revoke(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)17389771d5f7SAbdiel Janulgue static int __igt_mmap_revoke(struct drm_i915_private *i915,
17399771d5f7SAbdiel Janulgue 			     struct drm_i915_gem_object *obj,
17409771d5f7SAbdiel Janulgue 			     enum i915_mmap_type type)
17411d1d0af6SChris Wilson {
17421d1d0af6SChris Wilson 	unsigned long addr;
17431d1d0af6SChris Wilson 	int err;
1744cf3e3e86SMaarten Lankhorst 	u64 offset;
17451d1d0af6SChris Wilson 
17469771d5f7SAbdiel Janulgue 	if (!can_mmap(obj, type))
17471d1d0af6SChris Wilson 		return 0;
17481d1d0af6SChris Wilson 
1749cf3e3e86SMaarten Lankhorst 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1750cf3e3e86SMaarten Lankhorst 	if (err)
1751cf3e3e86SMaarten Lankhorst 		return err;
17521d1d0af6SChris Wilson 
1753cf3e3e86SMaarten Lankhorst 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
17549771d5f7SAbdiel Janulgue 	if (IS_ERR_VALUE(addr))
17559771d5f7SAbdiel Janulgue 		return addr;
17561d1d0af6SChris Wilson 
17571d1d0af6SChris Wilson 	err = prefault_range(addr, obj->base.size);
17581d1d0af6SChris Wilson 	if (err)
17591d1d0af6SChris Wilson 		goto out_unmap;
17601d1d0af6SChris Wilson 
17611d1d0af6SChris Wilson 	err = check_present(addr, obj->base.size);
17629771d5f7SAbdiel Janulgue 	if (err) {
17639771d5f7SAbdiel Janulgue 		pr_err("%s: was not present\n", obj->mm.region->name);
17641d1d0af6SChris Wilson 		goto out_unmap;
17659771d5f7SAbdiel Janulgue 	}
17661d1d0af6SChris Wilson 
17671d1d0af6SChris Wilson 	/*
17681d1d0af6SChris Wilson 	 * After unbinding the object from the GGTT, its address may be reused
17691d1d0af6SChris Wilson 	 * for other objects. Ergo we have to revoke the previous mmap PTE
17701d1d0af6SChris Wilson 	 * access as it no longer points to the same object.
17711d1d0af6SChris Wilson 	 */
17720f341974SMaarten Lankhorst 	i915_gem_object_lock(obj, NULL);
17731d1d0af6SChris Wilson 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
17740f341974SMaarten Lankhorst 	i915_gem_object_unlock(obj);
17751d1d0af6SChris Wilson 	if (err) {
17761d1d0af6SChris Wilson 		pr_err("Failed to unbind object!\n");
17771d1d0af6SChris Wilson 		goto out_unmap;
17781d1d0af6SChris Wilson 	}
17791d1d0af6SChris Wilson 
1780cc662126SAbdiel Janulgue 	if (type != I915_MMAP_TYPE_GTT) {
17816f791ffeSMaarten Lankhorst 		i915_gem_object_lock(obj, NULL);
1782cc662126SAbdiel Janulgue 		__i915_gem_object_put_pages(obj);
17836f791ffeSMaarten Lankhorst 		i915_gem_object_unlock(obj);
1784cc662126SAbdiel Janulgue 		if (i915_gem_object_has_pages(obj)) {
1785cc662126SAbdiel Janulgue 			pr_err("Failed to put-pages object!\n");
1786cc662126SAbdiel Janulgue 			err = -EINVAL;
1787cc662126SAbdiel Janulgue 			goto out_unmap;
1788cc662126SAbdiel Janulgue 		}
1789cc662126SAbdiel Janulgue 	}
1790cc662126SAbdiel Janulgue 
17911d1d0af6SChris Wilson 	err = check_absent(addr, obj->base.size);
17929771d5f7SAbdiel Janulgue 	if (err) {
17939771d5f7SAbdiel Janulgue 		pr_err("%s: was not absent\n", obj->mm.region->name);
17941d1d0af6SChris Wilson 		goto out_unmap;
17959771d5f7SAbdiel Janulgue 	}
17961d1d0af6SChris Wilson 
17971d1d0af6SChris Wilson out_unmap:
17981d1d0af6SChris Wilson 	vm_munmap(addr, obj->base.size);
17991d1d0af6SChris Wilson 	return err;
18001d1d0af6SChris Wilson }
18011d1d0af6SChris Wilson 
igt_mmap_revoke(void * arg)18029771d5f7SAbdiel Janulgue static int igt_mmap_revoke(void *arg)
1803cc662126SAbdiel Janulgue {
18049771d5f7SAbdiel Janulgue 	struct drm_i915_private *i915 = arg;
18059771d5f7SAbdiel Janulgue 	struct intel_memory_region *mr;
18069771d5f7SAbdiel Janulgue 	enum intel_region_id id;
18079771d5f7SAbdiel Janulgue 
18089771d5f7SAbdiel Janulgue 	for_each_memory_region(mr, i915, id) {
18099771d5f7SAbdiel Janulgue 		struct drm_i915_gem_object *obj;
18109771d5f7SAbdiel Janulgue 		int err;
18119771d5f7SAbdiel Janulgue 
1812938d2fd1SMatthew Auld 		if (mr->private)
1813938d2fd1SMatthew Auld 			continue;
1814938d2fd1SMatthew Auld 
18156d0e4f07SMatthew Auld 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
18169771d5f7SAbdiel Janulgue 		if (obj == ERR_PTR(-ENODEV))
18179771d5f7SAbdiel Janulgue 			continue;
18189771d5f7SAbdiel Janulgue 
18199771d5f7SAbdiel Janulgue 		if (IS_ERR(obj))
18209771d5f7SAbdiel Janulgue 			return PTR_ERR(obj);
18219771d5f7SAbdiel Janulgue 
18229771d5f7SAbdiel Janulgue 		err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
18239771d5f7SAbdiel Janulgue 		if (err == 0)
18249771d5f7SAbdiel Janulgue 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
18257961c5b6SMaarten Lankhorst 		if (err == 0)
18267961c5b6SMaarten Lankhorst 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
18279771d5f7SAbdiel Janulgue 
18289771d5f7SAbdiel Janulgue 		i915_gem_object_put(obj);
18299771d5f7SAbdiel Janulgue 		if (err)
18309771d5f7SAbdiel Janulgue 			return err;
1831cc662126SAbdiel Janulgue 	}
1832cc662126SAbdiel Janulgue 
18339771d5f7SAbdiel Janulgue 	return 0;
1834cc662126SAbdiel Janulgue }
1835cc662126SAbdiel Janulgue 
i915_gem_mman_live_selftests(struct drm_i915_private * i915)1836b414fcd5SChris Wilson int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1837b414fcd5SChris Wilson {
1838b414fcd5SChris Wilson 	static const struct i915_subtest tests[] = {
1839b414fcd5SChris Wilson 		SUBTEST(igt_partial_tiling),
184007e98eb0SChris Wilson 		SUBTEST(igt_smoke_tiling),
1841b414fcd5SChris Wilson 		SUBTEST(igt_mmap_offset_exhaustion),
18429771d5f7SAbdiel Janulgue 		SUBTEST(igt_mmap),
1843fb87550dSMatthew Auld 		SUBTEST(igt_mmap_migrate),
18449f909e21SChris Wilson 		SUBTEST(igt_mmap_access),
18459771d5f7SAbdiel Janulgue 		SUBTEST(igt_mmap_revoke),
184606581862SChris Wilson 		SUBTEST(igt_mmap_gpu),
1847b414fcd5SChris Wilson 	};
1848b414fcd5SChris Wilson 
184961faec5fSMatthew Brost 	return i915_live_subtests(tests, i915);
1850b414fcd5SChris Wilson }
1851