1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "gt/intel_engine_pm.h"
7 #include "gt/intel_gpu_commands.h"
8 #include "i915_selftest.h"
9 
10 #include "gem/selftests/mock_context.h"
11 #include "selftests/igt_reset.h"
12 #include "selftests/igt_spinner.h"
13 
14 struct live_mocs {
15 	struct drm_i915_mocs_table mocs;
16 	struct drm_i915_mocs_table l3cc;
17 	struct i915_vma *scratch;
18 	void *vaddr;
19 };
20 
21 static struct intel_context *mocs_context_create(struct intel_engine_cs *engine)
22 {
23 	struct intel_context *ce;
24 
25 	ce = intel_context_create(engine);
26 	if (IS_ERR(ce))
27 		return ce;
28 
29 	/* We build large requests to read the registers from the ring */
30 	ce->ring = __intel_context_ring_size(SZ_16K);
31 
32 	return ce;
33 }
34 
35 static int request_add_sync(struct i915_request *rq, int err)
36 {
37 	i915_request_get(rq);
38 	i915_request_add(rq);
39 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
40 		err = -ETIME;
41 	i915_request_put(rq);
42 
43 	return err;
44 }
45 
46 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
47 {
48 	int err = 0;
49 
50 	i915_request_get(rq);
51 	i915_request_add(rq);
52 	if (spin && !igt_wait_for_spinner(spin, rq))
53 		err = -ETIME;
54 	i915_request_put(rq);
55 
56 	return err;
57 }
58 
59 static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
60 {
61 	struct drm_i915_mocs_table table;
62 	unsigned int flags;
63 	int err;
64 
65 	memset(arg, 0, sizeof(*arg));
66 
67 	flags = get_mocs_settings(gt->i915, &table);
68 	if (!flags)
69 		return -EINVAL;
70 
71 	if (flags & HAS_RENDER_L3CC)
72 		arg->l3cc = table;
73 
74 	if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
75 		arg->mocs = table;
76 
77 	arg->scratch = __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
78 	if (IS_ERR(arg->scratch))
79 		return PTR_ERR(arg->scratch);
80 
81 	arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
82 	if (IS_ERR(arg->vaddr)) {
83 		err = PTR_ERR(arg->vaddr);
84 		goto err_scratch;
85 	}
86 
87 	return 0;
88 
89 err_scratch:
90 	i915_vma_unpin_and_release(&arg->scratch, 0);
91 	return err;
92 }
93 
94 static void live_mocs_fini(struct live_mocs *arg)
95 {
96 	i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
97 }
98 
99 static int read_regs(struct i915_request *rq,
100 		     u32 addr, unsigned int count,
101 		     u32 *offset)
102 {
103 	unsigned int i;
104 	u32 *cs;
105 
106 	GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
107 
108 	cs = intel_ring_begin(rq, 4 * count);
109 	if (IS_ERR(cs))
110 		return PTR_ERR(cs);
111 
112 	for (i = 0; i < count; i++) {
113 		*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
114 		*cs++ = addr;
115 		*cs++ = *offset;
116 		*cs++ = 0;
117 
118 		addr += sizeof(u32);
119 		*offset += sizeof(u32);
120 	}
121 
122 	intel_ring_advance(rq, cs);
123 
124 	return 0;
125 }
126 
127 static int read_mocs_table(struct i915_request *rq,
128 			   const struct drm_i915_mocs_table *table,
129 			   u32 *offset)
130 {
131 	u32 addr;
132 
133 	if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
134 		addr = global_mocs_offset();
135 	else
136 		addr = mocs_offset(rq->engine);
137 
138 	return read_regs(rq, addr, table->n_entries, offset);
139 }
140 
141 static int read_l3cc_table(struct i915_request *rq,
142 			   const struct drm_i915_mocs_table *table,
143 			   u32 *offset)
144 {
145 	u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
146 
147 	return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
148 }
149 
150 static int check_mocs_table(struct intel_engine_cs *engine,
151 			    const struct drm_i915_mocs_table *table,
152 			    u32 **vaddr)
153 {
154 	unsigned int i;
155 	u32 expect;
156 
157 	for_each_mocs(expect, table, i) {
158 		if (**vaddr != expect) {
159 			pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
160 			       engine->name, i, **vaddr, expect);
161 			return -EINVAL;
162 		}
163 		++*vaddr;
164 	}
165 
166 	return 0;
167 }
168 
169 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
170 {
171 	/*
172 	 * Registers in this range are affected by the MCR selector
173 	 * which only controls CPU initiated MMIO. Routing does not
174 	 * work for CS access so we cannot verify them on this path.
175 	 */
176 	return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
177 }
178 
179 static int check_l3cc_table(struct intel_engine_cs *engine,
180 			    const struct drm_i915_mocs_table *table,
181 			    u32 **vaddr)
182 {
183 	/* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
184 	u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
185 	unsigned int i;
186 	u32 expect;
187 
188 	for_each_l3cc(expect, table, i) {
189 		if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
190 			pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
191 			       engine->name, i, **vaddr, expect);
192 			return -EINVAL;
193 		}
194 		++*vaddr;
195 		reg += 4;
196 	}
197 
198 	return 0;
199 }
200 
201 static int check_mocs_engine(struct live_mocs *arg,
202 			     struct intel_context *ce)
203 {
204 	struct i915_vma *vma = arg->scratch;
205 	struct i915_request *rq;
206 	u32 offset;
207 	u32 *vaddr;
208 	int err;
209 
210 	memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
211 
212 	rq = intel_context_create_request(ce);
213 	if (IS_ERR(rq))
214 		return PTR_ERR(rq);
215 
216 	i915_vma_lock(vma);
217 	err = i915_request_await_object(rq, vma->obj, true);
218 	if (!err)
219 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
220 	i915_vma_unlock(vma);
221 
222 	/* Read the mocs tables back using SRM */
223 	offset = i915_ggtt_offset(vma);
224 	if (!err)
225 		err = read_mocs_table(rq, &arg->mocs, &offset);
226 	if (!err && ce->engine->class == RENDER_CLASS)
227 		err = read_l3cc_table(rq, &arg->l3cc, &offset);
228 	offset -= i915_ggtt_offset(vma);
229 	GEM_BUG_ON(offset > PAGE_SIZE);
230 
231 	err = request_add_sync(rq, err);
232 	if (err)
233 		return err;
234 
235 	/* Compare the results against the expected tables */
236 	vaddr = arg->vaddr;
237 	if (!err)
238 		err = check_mocs_table(ce->engine, &arg->mocs, &vaddr);
239 	if (!err && ce->engine->class == RENDER_CLASS)
240 		err = check_l3cc_table(ce->engine, &arg->l3cc, &vaddr);
241 	if (err)
242 		return err;
243 
244 	GEM_BUG_ON(arg->vaddr + offset != vaddr);
245 	return 0;
246 }
247 
248 static int live_mocs_kernel(void *arg)
249 {
250 	struct intel_gt *gt = arg;
251 	struct intel_engine_cs *engine;
252 	enum intel_engine_id id;
253 	struct live_mocs mocs;
254 	int err;
255 
256 	/* Basic check the system is configured with the expected mocs table */
257 
258 	err = live_mocs_init(&mocs, gt);
259 	if (err)
260 		return err;
261 
262 	for_each_engine(engine, gt, id) {
263 		intel_engine_pm_get(engine);
264 		err = check_mocs_engine(&mocs, engine->kernel_context);
265 		intel_engine_pm_put(engine);
266 		if (err)
267 			break;
268 	}
269 
270 	live_mocs_fini(&mocs);
271 	return err;
272 }
273 
274 static int live_mocs_clean(void *arg)
275 {
276 	struct intel_gt *gt = arg;
277 	struct intel_engine_cs *engine;
278 	enum intel_engine_id id;
279 	struct live_mocs mocs;
280 	int err;
281 
282 	/* Every new context should see the same mocs table */
283 
284 	err = live_mocs_init(&mocs, gt);
285 	if (err)
286 		return err;
287 
288 	for_each_engine(engine, gt, id) {
289 		struct intel_context *ce;
290 
291 		ce = mocs_context_create(engine);
292 		if (IS_ERR(ce)) {
293 			err = PTR_ERR(ce);
294 			break;
295 		}
296 
297 		err = check_mocs_engine(&mocs, ce);
298 		intel_context_put(ce);
299 		if (err)
300 			break;
301 	}
302 
303 	live_mocs_fini(&mocs);
304 	return err;
305 }
306 
307 static int active_engine_reset(struct intel_context *ce,
308 			       const char *reason)
309 {
310 	struct igt_spinner spin;
311 	struct i915_request *rq;
312 	int err;
313 
314 	err = igt_spinner_init(&spin, ce->engine->gt);
315 	if (err)
316 		return err;
317 
318 	rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
319 	if (IS_ERR(rq)) {
320 		igt_spinner_fini(&spin);
321 		return PTR_ERR(rq);
322 	}
323 
324 	err = request_add_spin(rq, &spin);
325 	if (err == 0)
326 		err = intel_engine_reset(ce->engine, reason);
327 
328 	igt_spinner_end(&spin);
329 	igt_spinner_fini(&spin);
330 
331 	return err;
332 }
333 
334 static int __live_mocs_reset(struct live_mocs *mocs,
335 			     struct intel_context *ce)
336 {
337 	struct intel_gt *gt = ce->engine->gt;
338 	int err;
339 
340 	if (intel_has_reset_engine(gt)) {
341 		err = intel_engine_reset(ce->engine, "mocs");
342 		if (err)
343 			return err;
344 
345 		err = check_mocs_engine(mocs, ce);
346 		if (err)
347 			return err;
348 
349 		err = active_engine_reset(ce, "mocs");
350 		if (err)
351 			return err;
352 
353 		err = check_mocs_engine(mocs, ce);
354 		if (err)
355 			return err;
356 	}
357 
358 	if (intel_has_gpu_reset(gt)) {
359 		intel_gt_reset(gt, ce->engine->mask, "mocs");
360 
361 		err = check_mocs_engine(mocs, ce);
362 		if (err)
363 			return err;
364 	}
365 
366 	return 0;
367 }
368 
369 static int live_mocs_reset(void *arg)
370 {
371 	struct intel_gt *gt = arg;
372 	struct intel_engine_cs *engine;
373 	enum intel_engine_id id;
374 	struct live_mocs mocs;
375 	int err = 0;
376 
377 	/* Check the mocs setup is retained over per-engine and global resets */
378 
379 	err = live_mocs_init(&mocs, gt);
380 	if (err)
381 		return err;
382 
383 	igt_global_reset_lock(gt);
384 	for_each_engine(engine, gt, id) {
385 		struct intel_context *ce;
386 
387 		ce = mocs_context_create(engine);
388 		if (IS_ERR(ce)) {
389 			err = PTR_ERR(ce);
390 			break;
391 		}
392 
393 		intel_engine_pm_get(engine);
394 		err = __live_mocs_reset(&mocs, ce);
395 		intel_engine_pm_put(engine);
396 
397 		intel_context_put(ce);
398 		if (err)
399 			break;
400 	}
401 	igt_global_reset_unlock(gt);
402 
403 	live_mocs_fini(&mocs);
404 	return err;
405 }
406 
407 int intel_mocs_live_selftests(struct drm_i915_private *i915)
408 {
409 	static const struct i915_subtest tests[] = {
410 		SUBTEST(live_mocs_kernel),
411 		SUBTEST(live_mocs_clean),
412 		SUBTEST(live_mocs_reset),
413 	};
414 	struct drm_i915_mocs_table table;
415 
416 	if (!get_mocs_settings(i915, &table))
417 		return 0;
418 
419 	return intel_gt_live_subtests(tests, &i915->gt);
420 }
421