1b414fcd5SChris Wilson /*
2b414fcd5SChris Wilson  * SPDX-License-Identifier: MIT
3b414fcd5SChris Wilson  *
4b414fcd5SChris Wilson  * Copyright © 2016 Intel Corporation
5b414fcd5SChris Wilson  */
6b414fcd5SChris Wilson 
7b414fcd5SChris Wilson #include <linux/prime_numbers.h>
8b414fcd5SChris Wilson 
9a1c8a09eSTvrtko Ursulin #include "gt/intel_gt.h"
10b414fcd5SChris Wilson #include "gt/intel_gt_pm.h"
1110be98a7SChris Wilson #include "huge_gem_object.h"
12b414fcd5SChris Wilson #include "i915_selftest.h"
13b414fcd5SChris Wilson #include "selftests/igt_flush_test.h"
14b414fcd5SChris Wilson 
15b414fcd5SChris Wilson struct tile {
16b414fcd5SChris Wilson 	unsigned int width;
17b414fcd5SChris Wilson 	unsigned int height;
18b414fcd5SChris Wilson 	unsigned int stride;
19b414fcd5SChris Wilson 	unsigned int size;
20b414fcd5SChris Wilson 	unsigned int tiling;
21b414fcd5SChris Wilson 	unsigned int swizzle;
22b414fcd5SChris Wilson };
23b414fcd5SChris Wilson 
24b414fcd5SChris Wilson static u64 swizzle_bit(unsigned int bit, u64 offset)
25b414fcd5SChris Wilson {
26b414fcd5SChris Wilson 	return (offset & BIT_ULL(bit)) >> (bit - 6);
27b414fcd5SChris Wilson }
28b414fcd5SChris Wilson 
29b414fcd5SChris Wilson static u64 tiled_offset(const struct tile *tile, u64 v)
30b414fcd5SChris Wilson {
31b414fcd5SChris Wilson 	u64 x, y;
32b414fcd5SChris Wilson 
33b414fcd5SChris Wilson 	if (tile->tiling == I915_TILING_NONE)
34b414fcd5SChris Wilson 		return v;
35b414fcd5SChris Wilson 
36b414fcd5SChris Wilson 	y = div64_u64_rem(v, tile->stride, &x);
37b414fcd5SChris Wilson 	v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
38b414fcd5SChris Wilson 
39b414fcd5SChris Wilson 	if (tile->tiling == I915_TILING_X) {
40b414fcd5SChris Wilson 		v += y * tile->width;
41b414fcd5SChris Wilson 		v += div64_u64_rem(x, tile->width, &x) << tile->size;
42b414fcd5SChris Wilson 		v += x;
43b414fcd5SChris Wilson 	} else if (tile->width == 128) {
44b414fcd5SChris Wilson 		const unsigned int ytile_span = 16;
45b414fcd5SChris Wilson 		const unsigned int ytile_height = 512;
46b414fcd5SChris Wilson 
47b414fcd5SChris Wilson 		v += y * ytile_span;
48b414fcd5SChris Wilson 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
49b414fcd5SChris Wilson 		v += x;
50b414fcd5SChris Wilson 	} else {
51b414fcd5SChris Wilson 		const unsigned int ytile_span = 32;
52b414fcd5SChris Wilson 		const unsigned int ytile_height = 256;
53b414fcd5SChris Wilson 
54b414fcd5SChris Wilson 		v += y * ytile_span;
55b414fcd5SChris Wilson 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
56b414fcd5SChris Wilson 		v += x;
57b414fcd5SChris Wilson 	}
58b414fcd5SChris Wilson 
59b414fcd5SChris Wilson 	switch (tile->swizzle) {
60b414fcd5SChris Wilson 	case I915_BIT_6_SWIZZLE_9:
61b414fcd5SChris Wilson 		v ^= swizzle_bit(9, v);
62b414fcd5SChris Wilson 		break;
63b414fcd5SChris Wilson 	case I915_BIT_6_SWIZZLE_9_10:
64b414fcd5SChris Wilson 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
65b414fcd5SChris Wilson 		break;
66b414fcd5SChris Wilson 	case I915_BIT_6_SWIZZLE_9_11:
67b414fcd5SChris Wilson 		v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
68b414fcd5SChris Wilson 		break;
69b414fcd5SChris Wilson 	case I915_BIT_6_SWIZZLE_9_10_11:
70b414fcd5SChris Wilson 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
71b414fcd5SChris Wilson 		break;
72b414fcd5SChris Wilson 	}
73b414fcd5SChris Wilson 
74b414fcd5SChris Wilson 	return v;
75b414fcd5SChris Wilson }
76b414fcd5SChris Wilson 
77b414fcd5SChris Wilson static int check_partial_mapping(struct drm_i915_gem_object *obj,
78b414fcd5SChris Wilson 				 const struct tile *tile,
79b414fcd5SChris Wilson 				 unsigned long end_time)
80b414fcd5SChris Wilson {
81b414fcd5SChris Wilson 	const unsigned int nreal = obj->scratch / PAGE_SIZE;
82b414fcd5SChris Wilson 	const unsigned long npages = obj->base.size / PAGE_SIZE;
83b414fcd5SChris Wilson 	struct i915_vma *vma;
84b414fcd5SChris Wilson 	unsigned long page;
85b414fcd5SChris Wilson 	int err;
86b414fcd5SChris Wilson 
87b414fcd5SChris Wilson 	if (igt_timeout(end_time,
88b414fcd5SChris Wilson 			"%s: timed out before tiling=%d stride=%d\n",
89b414fcd5SChris Wilson 			__func__, tile->tiling, tile->stride))
90b414fcd5SChris Wilson 		return -EINTR;
91b414fcd5SChris Wilson 
92b414fcd5SChris Wilson 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
93b414fcd5SChris Wilson 	if (err) {
94b414fcd5SChris Wilson 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
95b414fcd5SChris Wilson 		       tile->tiling, tile->stride, err);
96b414fcd5SChris Wilson 		return err;
97b414fcd5SChris Wilson 	}
98b414fcd5SChris Wilson 
99b414fcd5SChris Wilson 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
100b414fcd5SChris Wilson 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
101b414fcd5SChris Wilson 
10287d1372dSChris Wilson 	i915_gem_object_lock(obj);
10387d1372dSChris Wilson 	err = i915_gem_object_set_to_gtt_domain(obj, true);
10487d1372dSChris Wilson 	i915_gem_object_unlock(obj);
10587d1372dSChris Wilson 	if (err) {
10687d1372dSChris Wilson 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
10787d1372dSChris Wilson 		return err;
10887d1372dSChris Wilson 	}
10987d1372dSChris Wilson 
110b414fcd5SChris Wilson 	for_each_prime_number_from(page, 1, npages) {
111b414fcd5SChris Wilson 		struct i915_ggtt_view view =
112b414fcd5SChris Wilson 			compute_partial_view(obj, page, MIN_CHUNK_PAGES);
113b414fcd5SChris Wilson 		u32 __iomem *io;
114b414fcd5SChris Wilson 		struct page *p;
115b414fcd5SChris Wilson 		unsigned int n;
116b414fcd5SChris Wilson 		u64 offset;
117b414fcd5SChris Wilson 		u32 *cpu;
118b414fcd5SChris Wilson 
119b414fcd5SChris Wilson 		GEM_BUG_ON(view.partial.size > nreal);
120b414fcd5SChris Wilson 		cond_resched();
121b414fcd5SChris Wilson 
122b414fcd5SChris Wilson 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
123b414fcd5SChris Wilson 		if (IS_ERR(vma)) {
124b414fcd5SChris Wilson 			pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
125b414fcd5SChris Wilson 			       page, (int)PTR_ERR(vma));
126b414fcd5SChris Wilson 			return PTR_ERR(vma);
127b414fcd5SChris Wilson 		}
128b414fcd5SChris Wilson 
129b414fcd5SChris Wilson 		n = page - view.partial.offset;
130b414fcd5SChris Wilson 		GEM_BUG_ON(n >= view.partial.size);
131b414fcd5SChris Wilson 
132b414fcd5SChris Wilson 		io = i915_vma_pin_iomap(vma);
133b414fcd5SChris Wilson 		i915_vma_unpin(vma);
134b414fcd5SChris Wilson 		if (IS_ERR(io)) {
135b414fcd5SChris Wilson 			pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
136b414fcd5SChris Wilson 			       page, (int)PTR_ERR(io));
137b414fcd5SChris Wilson 			return PTR_ERR(io);
138b414fcd5SChris Wilson 		}
139b414fcd5SChris Wilson 
140b414fcd5SChris Wilson 		iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
141b414fcd5SChris Wilson 		i915_vma_unpin_iomap(vma);
142b414fcd5SChris Wilson 
143b414fcd5SChris Wilson 		offset = tiled_offset(tile, page << PAGE_SHIFT);
144b414fcd5SChris Wilson 		if (offset >= obj->base.size)
145b414fcd5SChris Wilson 			continue;
146b414fcd5SChris Wilson 
147a1c8a09eSTvrtko Ursulin 		intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
148b414fcd5SChris Wilson 
149b414fcd5SChris Wilson 		p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
150b414fcd5SChris Wilson 		cpu = kmap(p) + offset_in_page(offset);
151b414fcd5SChris Wilson 		drm_clflush_virt_range(cpu, sizeof(*cpu));
152b414fcd5SChris Wilson 		if (*cpu != (u32)page) {
153b414fcd5SChris Wilson 			pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
154b414fcd5SChris Wilson 			       page, n,
155b414fcd5SChris Wilson 			       view.partial.offset,
156b414fcd5SChris Wilson 			       view.partial.size,
157b414fcd5SChris Wilson 			       vma->size >> PAGE_SHIFT,
158b414fcd5SChris Wilson 			       tile->tiling ? tile_row_pages(obj) : 0,
159b414fcd5SChris Wilson 			       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
160b414fcd5SChris Wilson 			       offset >> PAGE_SHIFT,
161b414fcd5SChris Wilson 			       (unsigned int)offset_in_page(offset),
162b414fcd5SChris Wilson 			       offset,
163b414fcd5SChris Wilson 			       (u32)page, *cpu);
164b414fcd5SChris Wilson 			err = -EINVAL;
165b414fcd5SChris Wilson 		}
166b414fcd5SChris Wilson 		*cpu = 0;
167b414fcd5SChris Wilson 		drm_clflush_virt_range(cpu, sizeof(*cpu));
168b414fcd5SChris Wilson 		kunmap(p);
169b414fcd5SChris Wilson 		if (err)
170b414fcd5SChris Wilson 			return err;
171b414fcd5SChris Wilson 
172b414fcd5SChris Wilson 		i915_vma_destroy(vma);
173b414fcd5SChris Wilson 	}
174b414fcd5SChris Wilson 
175b414fcd5SChris Wilson 	return 0;
176b414fcd5SChris Wilson }
177b414fcd5SChris Wilson 
178b414fcd5SChris Wilson static int igt_partial_tiling(void *arg)
179b414fcd5SChris Wilson {
180b414fcd5SChris Wilson 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
181b414fcd5SChris Wilson 	struct drm_i915_private *i915 = arg;
182b414fcd5SChris Wilson 	struct drm_i915_gem_object *obj;
183b414fcd5SChris Wilson 	intel_wakeref_t wakeref;
184b414fcd5SChris Wilson 	int tiling;
185b414fcd5SChris Wilson 	int err;
186b414fcd5SChris Wilson 
187b414fcd5SChris Wilson 	/* We want to check the page mapping and fencing of a large object
188b414fcd5SChris Wilson 	 * mmapped through the GTT. The object we create is larger than can
189b414fcd5SChris Wilson 	 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
190b414fcd5SChris Wilson 	 * We then check that a write through each partial GGTT vma ends up
191b414fcd5SChris Wilson 	 * in the right set of pages within the object, and with the expected
192b414fcd5SChris Wilson 	 * tiling, which we verify by manual swizzling.
193b414fcd5SChris Wilson 	 */
194b414fcd5SChris Wilson 
195b414fcd5SChris Wilson 	obj = huge_gem_object(i915,
196b414fcd5SChris Wilson 			      nreal << PAGE_SHIFT,
197b414fcd5SChris Wilson 			      (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
198b414fcd5SChris Wilson 	if (IS_ERR(obj))
199b414fcd5SChris Wilson 		return PTR_ERR(obj);
200b414fcd5SChris Wilson 
201b414fcd5SChris Wilson 	err = i915_gem_object_pin_pages(obj);
202b414fcd5SChris Wilson 	if (err) {
203b414fcd5SChris Wilson 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
204b414fcd5SChris Wilson 		       nreal, obj->base.size / PAGE_SIZE, err);
205b414fcd5SChris Wilson 		goto out;
206b414fcd5SChris Wilson 	}
207b414fcd5SChris Wilson 
208b414fcd5SChris Wilson 	mutex_lock(&i915->drm.struct_mutex);
209d858d569SDaniele Ceraolo Spurio 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
210b414fcd5SChris Wilson 
211b414fcd5SChris Wilson 	if (1) {
212b414fcd5SChris Wilson 		IGT_TIMEOUT(end);
213b414fcd5SChris Wilson 		struct tile tile;
214b414fcd5SChris Wilson 
215b414fcd5SChris Wilson 		tile.height = 1;
216b414fcd5SChris Wilson 		tile.width = 1;
217b414fcd5SChris Wilson 		tile.size = 0;
218b414fcd5SChris Wilson 		tile.stride = 0;
219b414fcd5SChris Wilson 		tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
220b414fcd5SChris Wilson 		tile.tiling = I915_TILING_NONE;
221b414fcd5SChris Wilson 
222b414fcd5SChris Wilson 		err = check_partial_mapping(obj, &tile, end);
223b414fcd5SChris Wilson 		if (err && err != -EINTR)
224b414fcd5SChris Wilson 			goto out_unlock;
225b414fcd5SChris Wilson 	}
226b414fcd5SChris Wilson 
227b414fcd5SChris Wilson 	for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
228b414fcd5SChris Wilson 		IGT_TIMEOUT(end);
229b414fcd5SChris Wilson 		unsigned int max_pitch;
230b414fcd5SChris Wilson 		unsigned int pitch;
231b414fcd5SChris Wilson 		struct tile tile;
232b414fcd5SChris Wilson 
233b414fcd5SChris Wilson 		if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
234b414fcd5SChris Wilson 			/*
235b414fcd5SChris Wilson 			 * The swizzling pattern is actually unknown as it
236b414fcd5SChris Wilson 			 * varies based on physical address of each page.
237b414fcd5SChris Wilson 			 * See i915_gem_detect_bit_6_swizzle().
238b414fcd5SChris Wilson 			 */
239b414fcd5SChris Wilson 			break;
240b414fcd5SChris Wilson 
241b414fcd5SChris Wilson 		tile.tiling = tiling;
242b414fcd5SChris Wilson 		switch (tiling) {
243b414fcd5SChris Wilson 		case I915_TILING_X:
244b414fcd5SChris Wilson 			tile.swizzle = i915->mm.bit_6_swizzle_x;
245b414fcd5SChris Wilson 			break;
246b414fcd5SChris Wilson 		case I915_TILING_Y:
247b414fcd5SChris Wilson 			tile.swizzle = i915->mm.bit_6_swizzle_y;
248b414fcd5SChris Wilson 			break;
249b414fcd5SChris Wilson 		}
250b414fcd5SChris Wilson 
251b414fcd5SChris Wilson 		GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
252b414fcd5SChris Wilson 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
253b414fcd5SChris Wilson 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
254b414fcd5SChris Wilson 			continue;
255b414fcd5SChris Wilson 
256b414fcd5SChris Wilson 		if (INTEL_GEN(i915) <= 2) {
257b414fcd5SChris Wilson 			tile.height = 16;
258b414fcd5SChris Wilson 			tile.width = 128;
259b414fcd5SChris Wilson 			tile.size = 11;
260b414fcd5SChris Wilson 		} else if (tile.tiling == I915_TILING_Y &&
261b414fcd5SChris Wilson 			   HAS_128_BYTE_Y_TILING(i915)) {
262b414fcd5SChris Wilson 			tile.height = 32;
263b414fcd5SChris Wilson 			tile.width = 128;
264b414fcd5SChris Wilson 			tile.size = 12;
265b414fcd5SChris Wilson 		} else {
266b414fcd5SChris Wilson 			tile.height = 8;
267b414fcd5SChris Wilson 			tile.width = 512;
268b414fcd5SChris Wilson 			tile.size = 12;
269b414fcd5SChris Wilson 		}
270b414fcd5SChris Wilson 
271b414fcd5SChris Wilson 		if (INTEL_GEN(i915) < 4)
272b414fcd5SChris Wilson 			max_pitch = 8192 / tile.width;
273b414fcd5SChris Wilson 		else if (INTEL_GEN(i915) < 7)
274b414fcd5SChris Wilson 			max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
275b414fcd5SChris Wilson 		else
276b414fcd5SChris Wilson 			max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
277b414fcd5SChris Wilson 
278b414fcd5SChris Wilson 		for (pitch = max_pitch; pitch; pitch >>= 1) {
279b414fcd5SChris Wilson 			tile.stride = tile.width * pitch;
280b414fcd5SChris Wilson 			err = check_partial_mapping(obj, &tile, end);
281b414fcd5SChris Wilson 			if (err == -EINTR)
282b414fcd5SChris Wilson 				goto next_tiling;
283b414fcd5SChris Wilson 			if (err)
284b414fcd5SChris Wilson 				goto out_unlock;
285b414fcd5SChris Wilson 
286b414fcd5SChris Wilson 			if (pitch > 2 && INTEL_GEN(i915) >= 4) {
287b414fcd5SChris Wilson 				tile.stride = tile.width * (pitch - 1);
288b414fcd5SChris Wilson 				err = check_partial_mapping(obj, &tile, end);
289b414fcd5SChris Wilson 				if (err == -EINTR)
290b414fcd5SChris Wilson 					goto next_tiling;
291b414fcd5SChris Wilson 				if (err)
292b414fcd5SChris Wilson 					goto out_unlock;
293b414fcd5SChris Wilson 			}
294b414fcd5SChris Wilson 
295b414fcd5SChris Wilson 			if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
296b414fcd5SChris Wilson 				tile.stride = tile.width * (pitch + 1);
297b414fcd5SChris Wilson 				err = check_partial_mapping(obj, &tile, end);
298b414fcd5SChris Wilson 				if (err == -EINTR)
299b414fcd5SChris Wilson 					goto next_tiling;
300b414fcd5SChris Wilson 				if (err)
301b414fcd5SChris Wilson 					goto out_unlock;
302b414fcd5SChris Wilson 			}
303b414fcd5SChris Wilson 		}
304b414fcd5SChris Wilson 
305b414fcd5SChris Wilson 		if (INTEL_GEN(i915) >= 4) {
306b414fcd5SChris Wilson 			for_each_prime_number(pitch, max_pitch) {
307b414fcd5SChris Wilson 				tile.stride = tile.width * pitch;
308b414fcd5SChris Wilson 				err = check_partial_mapping(obj, &tile, end);
309b414fcd5SChris Wilson 				if (err == -EINTR)
310b414fcd5SChris Wilson 					goto next_tiling;
311b414fcd5SChris Wilson 				if (err)
312b414fcd5SChris Wilson 					goto out_unlock;
313b414fcd5SChris Wilson 			}
314b414fcd5SChris Wilson 		}
315b414fcd5SChris Wilson 
316b414fcd5SChris Wilson next_tiling: ;
317b414fcd5SChris Wilson 	}
318b414fcd5SChris Wilson 
319b414fcd5SChris Wilson out_unlock:
320d858d569SDaniele Ceraolo Spurio 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
321b414fcd5SChris Wilson 	mutex_unlock(&i915->drm.struct_mutex);
322b414fcd5SChris Wilson 	i915_gem_object_unpin_pages(obj);
323b414fcd5SChris Wilson out:
324b414fcd5SChris Wilson 	i915_gem_object_put(obj);
325b414fcd5SChris Wilson 	return err;
326b414fcd5SChris Wilson }
327b414fcd5SChris Wilson 
328b414fcd5SChris Wilson static int make_obj_busy(struct drm_i915_gem_object *obj)
329b414fcd5SChris Wilson {
330b414fcd5SChris Wilson 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
3318f856c74SChris Wilson 	struct intel_engine_cs *engine;
3328f856c74SChris Wilson 	enum intel_engine_id id;
333b414fcd5SChris Wilson 	struct i915_vma *vma;
334b414fcd5SChris Wilson 	int err;
335b414fcd5SChris Wilson 
336b414fcd5SChris Wilson 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
337b414fcd5SChris Wilson 	if (IS_ERR(vma))
338b414fcd5SChris Wilson 		return PTR_ERR(vma);
339b414fcd5SChris Wilson 
340b414fcd5SChris Wilson 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
341b414fcd5SChris Wilson 	if (err)
342b414fcd5SChris Wilson 		return err;
343b414fcd5SChris Wilson 
3448f856c74SChris Wilson 	for_each_engine(engine, i915, id) {
3458f856c74SChris Wilson 		struct i915_request *rq;
3468f856c74SChris Wilson 
3478f856c74SChris Wilson 		rq = i915_request_create(engine->kernel_context);
348b414fcd5SChris Wilson 		if (IS_ERR(rq)) {
349b414fcd5SChris Wilson 			i915_vma_unpin(vma);
350b414fcd5SChris Wilson 			return PTR_ERR(rq);
351b414fcd5SChris Wilson 		}
352b414fcd5SChris Wilson 
3536951e589SChris Wilson 		i915_vma_lock(vma);
354b414fcd5SChris Wilson 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
3556951e589SChris Wilson 		i915_vma_unlock(vma);
356b414fcd5SChris Wilson 
357b414fcd5SChris Wilson 		i915_request_add(rq);
3588f856c74SChris Wilson 	}
359b414fcd5SChris Wilson 
360b414fcd5SChris Wilson 	i915_vma_unpin(vma);
361c017cf6bSChris Wilson 	i915_gem_object_put(obj); /* leave it only alive via its active ref */
362b414fcd5SChris Wilson 
363b414fcd5SChris Wilson 	return err;
364b414fcd5SChris Wilson }
365b414fcd5SChris Wilson 
366b414fcd5SChris Wilson static bool assert_mmap_offset(struct drm_i915_private *i915,
367b414fcd5SChris Wilson 			       unsigned long size,
368b414fcd5SChris Wilson 			       int expected)
369b414fcd5SChris Wilson {
370b414fcd5SChris Wilson 	struct drm_i915_gem_object *obj;
371b414fcd5SChris Wilson 	int err;
372b414fcd5SChris Wilson 
373b414fcd5SChris Wilson 	obj = i915_gem_object_create_internal(i915, size);
374b414fcd5SChris Wilson 	if (IS_ERR(obj))
375b414fcd5SChris Wilson 		return PTR_ERR(obj);
376b414fcd5SChris Wilson 
377b414fcd5SChris Wilson 	err = create_mmap_offset(obj);
378b414fcd5SChris Wilson 	i915_gem_object_put(obj);
379b414fcd5SChris Wilson 
380b414fcd5SChris Wilson 	return err == expected;
381b414fcd5SChris Wilson }
382b414fcd5SChris Wilson 
383b414fcd5SChris Wilson static void disable_retire_worker(struct drm_i915_private *i915)
384b414fcd5SChris Wilson {
385b414fcd5SChris Wilson 	i915_gem_shrinker_unregister(i915);
386b414fcd5SChris Wilson 
3870c91621cSChris Wilson 	intel_gt_pm_get(&i915->gt);
388b414fcd5SChris Wilson 
389b414fcd5SChris Wilson 	cancel_delayed_work_sync(&i915->gem.retire_work);
390b414fcd5SChris Wilson 	flush_work(&i915->gem.idle_work);
391b414fcd5SChris Wilson }
392b414fcd5SChris Wilson 
393b414fcd5SChris Wilson static void restore_retire_worker(struct drm_i915_private *i915)
394b414fcd5SChris Wilson {
3950c91621cSChris Wilson 	intel_gt_pm_put(&i915->gt);
396b414fcd5SChris Wilson 
397b414fcd5SChris Wilson 	mutex_lock(&i915->drm.struct_mutex);
398b414fcd5SChris Wilson 	igt_flush_test(i915, I915_WAIT_LOCKED);
399b414fcd5SChris Wilson 	mutex_unlock(&i915->drm.struct_mutex);
400b414fcd5SChris Wilson 
401b414fcd5SChris Wilson 	i915_gem_shrinker_register(i915);
402b414fcd5SChris Wilson }
403b414fcd5SChris Wilson 
404f63dfc14SChris Wilson static void mmap_offset_lock(struct drm_i915_private *i915)
405f63dfc14SChris Wilson 	__acquires(&i915->drm.vma_offset_manager->vm_lock)
406f63dfc14SChris Wilson {
407f63dfc14SChris Wilson 	write_lock(&i915->drm.vma_offset_manager->vm_lock);
408f63dfc14SChris Wilson }
409f63dfc14SChris Wilson 
410f63dfc14SChris Wilson static void mmap_offset_unlock(struct drm_i915_private *i915)
411f63dfc14SChris Wilson 	__releases(&i915->drm.vma_offset_manager->vm_lock)
412f63dfc14SChris Wilson {
413f63dfc14SChris Wilson 	write_unlock(&i915->drm.vma_offset_manager->vm_lock);
414f63dfc14SChris Wilson }
415f63dfc14SChris Wilson 
416b414fcd5SChris Wilson static int igt_mmap_offset_exhaustion(void *arg)
417b414fcd5SChris Wilson {
418b414fcd5SChris Wilson 	struct drm_i915_private *i915 = arg;
419b414fcd5SChris Wilson 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
420b414fcd5SChris Wilson 	struct drm_i915_gem_object *obj;
421b414fcd5SChris Wilson 	struct drm_mm_node resv, *hole;
422b414fcd5SChris Wilson 	u64 hole_start, hole_end;
423b414fcd5SChris Wilson 	int loop, err;
424b414fcd5SChris Wilson 
425b414fcd5SChris Wilson 	/* Disable background reaper */
426b414fcd5SChris Wilson 	disable_retire_worker(i915);
427b414fcd5SChris Wilson 	GEM_BUG_ON(!i915->gt.awake);
428b414fcd5SChris Wilson 
429b414fcd5SChris Wilson 	/* Trim the device mmap space to only a page */
430b414fcd5SChris Wilson 	memset(&resv, 0, sizeof(resv));
431b414fcd5SChris Wilson 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
432b414fcd5SChris Wilson 		resv.start = hole_start;
433b414fcd5SChris Wilson 		resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
434f63dfc14SChris Wilson 		mmap_offset_lock(i915);
435b414fcd5SChris Wilson 		err = drm_mm_reserve_node(mm, &resv);
436f63dfc14SChris Wilson 		mmap_offset_unlock(i915);
437b414fcd5SChris Wilson 		if (err) {
438b414fcd5SChris Wilson 			pr_err("Failed to trim VMA manager, err=%d\n", err);
439b414fcd5SChris Wilson 			goto out_park;
440b414fcd5SChris Wilson 		}
441b414fcd5SChris Wilson 		break;
442b414fcd5SChris Wilson 	}
443b414fcd5SChris Wilson 
444b414fcd5SChris Wilson 	/* Just fits! */
445b414fcd5SChris Wilson 	if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
446b414fcd5SChris Wilson 		pr_err("Unable to insert object into single page hole\n");
447b414fcd5SChris Wilson 		err = -EINVAL;
448b414fcd5SChris Wilson 		goto out;
449b414fcd5SChris Wilson 	}
450b414fcd5SChris Wilson 
451b414fcd5SChris Wilson 	/* Too large */
452b414fcd5SChris Wilson 	if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
453b414fcd5SChris Wilson 		pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
454b414fcd5SChris Wilson 		err = -EINVAL;
455b414fcd5SChris Wilson 		goto out;
456b414fcd5SChris Wilson 	}
457b414fcd5SChris Wilson 
458b414fcd5SChris Wilson 	/* Fill the hole, further allocation attempts should then fail */
459b414fcd5SChris Wilson 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
460b414fcd5SChris Wilson 	if (IS_ERR(obj)) {
461b414fcd5SChris Wilson 		err = PTR_ERR(obj);
462b414fcd5SChris Wilson 		goto out;
463b414fcd5SChris Wilson 	}
464b414fcd5SChris Wilson 
465b414fcd5SChris Wilson 	err = create_mmap_offset(obj);
466b414fcd5SChris Wilson 	if (err) {
467b414fcd5SChris Wilson 		pr_err("Unable to insert object into reclaimed hole\n");
468b414fcd5SChris Wilson 		goto err_obj;
469b414fcd5SChris Wilson 	}
470b414fcd5SChris Wilson 
471b414fcd5SChris Wilson 	if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
472b414fcd5SChris Wilson 		pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
473b414fcd5SChris Wilson 		err = -EINVAL;
474b414fcd5SChris Wilson 		goto err_obj;
475b414fcd5SChris Wilson 	}
476b414fcd5SChris Wilson 
477b414fcd5SChris Wilson 	i915_gem_object_put(obj);
478b414fcd5SChris Wilson 
479b414fcd5SChris Wilson 	/* Now fill with busy dead objects that we expect to reap */
480b414fcd5SChris Wilson 	for (loop = 0; loop < 3; loop++) {
481b414fcd5SChris Wilson 		if (i915_terminally_wedged(i915))
482b414fcd5SChris Wilson 			break;
483b414fcd5SChris Wilson 
484b414fcd5SChris Wilson 		obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
485b414fcd5SChris Wilson 		if (IS_ERR(obj)) {
486b414fcd5SChris Wilson 			err = PTR_ERR(obj);
487b414fcd5SChris Wilson 			goto out;
488b414fcd5SChris Wilson 		}
489b414fcd5SChris Wilson 
490b414fcd5SChris Wilson 		mutex_lock(&i915->drm.struct_mutex);
491b414fcd5SChris Wilson 		err = make_obj_busy(obj);
492b414fcd5SChris Wilson 		mutex_unlock(&i915->drm.struct_mutex);
493b414fcd5SChris Wilson 		if (err) {
494b414fcd5SChris Wilson 			pr_err("[loop %d] Failed to busy the object\n", loop);
495b414fcd5SChris Wilson 			goto err_obj;
496b414fcd5SChris Wilson 		}
497b414fcd5SChris Wilson 	}
498b414fcd5SChris Wilson 
499b414fcd5SChris Wilson out:
500f63dfc14SChris Wilson 	mmap_offset_lock(i915);
501b414fcd5SChris Wilson 	drm_mm_remove_node(&resv);
502f63dfc14SChris Wilson 	mmap_offset_unlock(i915);
503b414fcd5SChris Wilson out_park:
504b414fcd5SChris Wilson 	restore_retire_worker(i915);
505b414fcd5SChris Wilson 	return err;
506b414fcd5SChris Wilson err_obj:
507b414fcd5SChris Wilson 	i915_gem_object_put(obj);
508b414fcd5SChris Wilson 	goto out;
509b414fcd5SChris Wilson }
510b414fcd5SChris Wilson 
511b414fcd5SChris Wilson int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
512b414fcd5SChris Wilson {
513b414fcd5SChris Wilson 	static const struct i915_subtest tests[] = {
514b414fcd5SChris Wilson 		SUBTEST(igt_partial_tiling),
515b414fcd5SChris Wilson 		SUBTEST(igt_mmap_offset_exhaustion),
516b414fcd5SChris Wilson 	};
517b414fcd5SChris Wilson 
518b414fcd5SChris Wilson 	return i915_subtests(tests, i915);
519b414fcd5SChris Wilson }
520