1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
12 
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
17 
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
20 
21 static const struct wo_register {
22 	enum intel_platform platform;
23 	u32 reg;
24 } wo_registers[] = {
25 	{ INTEL_GEMINILAKE, 0x731c }
26 };
27 
28 struct wa_lists {
29 	struct i915_wa_list gt_wa_list;
30 	struct {
31 		struct i915_wa_list wa_list;
32 		struct i915_wa_list ctx_wa_list;
33 	} engine[I915_NUM_ENGINES];
34 };
35 
36 static void
37 reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
38 {
39 	struct intel_engine_cs *engine;
40 	enum intel_engine_id id;
41 
42 	memset(lists, 0, sizeof(*lists));
43 
44 	wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
45 	gt_init_workarounds(i915, &lists->gt_wa_list);
46 	wa_init_finish(&lists->gt_wa_list);
47 
48 	for_each_engine(engine, i915, id) {
49 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
50 
51 		wa_init_start(wal, "REF", engine->name);
52 		engine_init_workarounds(engine, wal);
53 		wa_init_finish(wal);
54 
55 		__intel_engine_init_ctx_wa(engine,
56 					   &lists->engine[id].ctx_wa_list,
57 					   "CTX_REF");
58 	}
59 }
60 
61 static void
62 reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
63 {
64 	struct intel_engine_cs *engine;
65 	enum intel_engine_id id;
66 
67 	for_each_engine(engine, i915, id)
68 		intel_wa_list_free(&lists->engine[id].wa_list);
69 
70 	intel_wa_list_free(&lists->gt_wa_list);
71 }
72 
73 static struct drm_i915_gem_object *
74 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
75 {
76 	const u32 base = engine->mmio_base;
77 	struct drm_i915_gem_object *result;
78 	struct i915_request *rq;
79 	struct i915_vma *vma;
80 	u32 srm, *cs;
81 	int err;
82 	int i;
83 
84 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
85 	if (IS_ERR(result))
86 		return result;
87 
88 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
89 
90 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
91 	if (IS_ERR(cs)) {
92 		err = PTR_ERR(cs);
93 		goto err_obj;
94 	}
95 	memset(cs, 0xc5, PAGE_SIZE);
96 	i915_gem_object_flush_map(result);
97 	i915_gem_object_unpin_map(result);
98 
99 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
100 	if (IS_ERR(vma)) {
101 		err = PTR_ERR(vma);
102 		goto err_obj;
103 	}
104 
105 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
106 	if (err)
107 		goto err_obj;
108 
109 	rq = igt_request_alloc(ctx, engine);
110 	if (IS_ERR(rq)) {
111 		err = PTR_ERR(rq);
112 		goto err_pin;
113 	}
114 
115 	i915_vma_lock(vma);
116 	err = i915_request_await_object(rq, vma->obj, true);
117 	if (err == 0)
118 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
119 	i915_vma_unlock(vma);
120 	if (err)
121 		goto err_req;
122 
123 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
124 	if (INTEL_GEN(ctx->i915) >= 8)
125 		srm++;
126 
127 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
128 	if (IS_ERR(cs)) {
129 		err = PTR_ERR(cs);
130 		goto err_req;
131 	}
132 
133 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
134 		*cs++ = srm;
135 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
136 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
137 		*cs++ = 0;
138 	}
139 	intel_ring_advance(rq, cs);
140 
141 	i915_request_add(rq);
142 	i915_vma_unpin(vma);
143 
144 	return result;
145 
146 err_req:
147 	i915_request_add(rq);
148 err_pin:
149 	i915_vma_unpin(vma);
150 err_obj:
151 	i915_gem_object_put(result);
152 	return ERR_PTR(err);
153 }
154 
155 static u32
156 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
157 {
158 	i915_reg_t reg = i < engine->whitelist.count ?
159 			 engine->whitelist.list[i].reg :
160 			 RING_NOPID(engine->mmio_base);
161 
162 	return i915_mmio_reg_offset(reg);
163 }
164 
165 static void
166 print_results(const struct intel_engine_cs *engine, const u32 *results)
167 {
168 	unsigned int i;
169 
170 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
171 		u32 expected = get_whitelist_reg(engine, i);
172 		u32 actual = results[i];
173 
174 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
175 			i, expected, actual);
176 	}
177 }
178 
179 static int check_whitelist(struct i915_gem_context *ctx,
180 			   struct intel_engine_cs *engine)
181 {
182 	struct drm_i915_gem_object *results;
183 	struct intel_wedge_me wedge;
184 	u32 *vaddr;
185 	int err;
186 	int i;
187 
188 	results = read_nonprivs(ctx, engine);
189 	if (IS_ERR(results))
190 		return PTR_ERR(results);
191 
192 	err = 0;
193 	i915_gem_object_lock(results);
194 	intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */
195 		err = i915_gem_object_set_to_cpu_domain(results, false);
196 	i915_gem_object_unlock(results);
197 	if (intel_gt_is_wedged(&ctx->i915->gt))
198 		err = -EIO;
199 	if (err)
200 		goto out_put;
201 
202 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
203 	if (IS_ERR(vaddr)) {
204 		err = PTR_ERR(vaddr);
205 		goto out_put;
206 	}
207 
208 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
209 		u32 expected = get_whitelist_reg(engine, i);
210 		u32 actual = vaddr[i];
211 
212 		if (expected != actual) {
213 			print_results(engine, vaddr);
214 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
215 			       i, expected, actual);
216 
217 			err = -EINVAL;
218 			break;
219 		}
220 	}
221 
222 	i915_gem_object_unpin_map(results);
223 out_put:
224 	i915_gem_object_put(results);
225 	return err;
226 }
227 
228 static int do_device_reset(struct intel_engine_cs *engine)
229 {
230 	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
231 	return 0;
232 }
233 
234 static int do_engine_reset(struct intel_engine_cs *engine)
235 {
236 	return intel_engine_reset(engine, "live_workarounds");
237 }
238 
239 static int
240 switch_to_scratch_context(struct intel_engine_cs *engine,
241 			  struct igt_spinner *spin)
242 {
243 	struct i915_gem_context *ctx;
244 	struct intel_context *ce;
245 	struct i915_request *rq;
246 	intel_wakeref_t wakeref;
247 	int err = 0;
248 
249 	ctx = kernel_context(engine->i915);
250 	if (IS_ERR(ctx))
251 		return PTR_ERR(ctx);
252 
253 	GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
254 
255 	ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
256 	GEM_BUG_ON(IS_ERR(ce));
257 
258 	rq = ERR_PTR(-ENODEV);
259 	with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
260 		rq = igt_spinner_create_request(spin, ce, MI_NOOP);
261 
262 	intel_context_put(ce);
263 	kernel_context_close(ctx);
264 
265 	if (IS_ERR(rq)) {
266 		spin = NULL;
267 		err = PTR_ERR(rq);
268 		goto err;
269 	}
270 
271 	i915_request_add(rq);
272 
273 	if (spin && !igt_wait_for_spinner(spin, rq)) {
274 		pr_err("Spinner failed to start\n");
275 		err = -ETIMEDOUT;
276 	}
277 
278 err:
279 	if (err && spin)
280 		igt_spinner_end(spin);
281 
282 	return err;
283 }
284 
285 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
286 					int (*reset)(struct intel_engine_cs *),
287 					const char *name)
288 {
289 	struct drm_i915_private *i915 = engine->i915;
290 	struct i915_gem_context *ctx, *tmp;
291 	struct igt_spinner spin;
292 	intel_wakeref_t wakeref;
293 	int err;
294 
295 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
296 		engine->whitelist.count, engine->name, name);
297 
298 	ctx = kernel_context(i915);
299 	if (IS_ERR(ctx))
300 		return PTR_ERR(ctx);
301 
302 	err = igt_spinner_init(&spin, engine->gt);
303 	if (err)
304 		goto out_ctx;
305 
306 	err = check_whitelist(ctx, engine);
307 	if (err) {
308 		pr_err("Invalid whitelist *before* %s reset!\n", name);
309 		goto out_spin;
310 	}
311 
312 	err = switch_to_scratch_context(engine, &spin);
313 	if (err)
314 		goto out_spin;
315 
316 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
317 		err = reset(engine);
318 
319 	igt_spinner_end(&spin);
320 
321 	if (err) {
322 		pr_err("%s reset failed\n", name);
323 		goto out_spin;
324 	}
325 
326 	err = check_whitelist(ctx, engine);
327 	if (err) {
328 		pr_err("Whitelist not preserved in context across %s reset!\n",
329 		       name);
330 		goto out_spin;
331 	}
332 
333 	tmp = kernel_context(i915);
334 	if (IS_ERR(tmp)) {
335 		err = PTR_ERR(tmp);
336 		goto out_spin;
337 	}
338 	kernel_context_close(ctx);
339 	ctx = tmp;
340 
341 	err = check_whitelist(ctx, engine);
342 	if (err) {
343 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
344 		       name);
345 		goto out_spin;
346 	}
347 
348 out_spin:
349 	igt_spinner_fini(&spin);
350 out_ctx:
351 	kernel_context_close(ctx);
352 	return err;
353 }
354 
355 static struct i915_vma *create_batch(struct i915_gem_context *ctx)
356 {
357 	struct drm_i915_gem_object *obj;
358 	struct i915_vma *vma;
359 	int err;
360 
361 	obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
362 	if (IS_ERR(obj))
363 		return ERR_CAST(obj);
364 
365 	vma = i915_vma_instance(obj, ctx->vm, NULL);
366 	if (IS_ERR(vma)) {
367 		err = PTR_ERR(vma);
368 		goto err_obj;
369 	}
370 
371 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
372 	if (err)
373 		goto err_obj;
374 
375 	return vma;
376 
377 err_obj:
378 	i915_gem_object_put(obj);
379 	return ERR_PTR(err);
380 }
381 
382 static u32 reg_write(u32 old, u32 new, u32 rsvd)
383 {
384 	if (rsvd == 0x0000ffff) {
385 		old &= ~(new >> 16);
386 		old |= new & (new >> 16);
387 	} else {
388 		old &= ~rsvd;
389 		old |= new & rsvd;
390 	}
391 
392 	return old;
393 }
394 
395 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
396 {
397 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
398 	int i;
399 
400 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
401 	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
402 		return true;
403 
404 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
405 		if (wo_registers[i].platform == platform &&
406 		    wo_registers[i].reg == reg)
407 			return true;
408 	}
409 
410 	return false;
411 }
412 
413 static bool ro_register(u32 reg)
414 {
415 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
416 	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
417 		return true;
418 
419 	return false;
420 }
421 
422 static int whitelist_writable_count(struct intel_engine_cs *engine)
423 {
424 	int count = engine->whitelist.count;
425 	int i;
426 
427 	for (i = 0; i < engine->whitelist.count; i++) {
428 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
429 
430 		if (ro_register(reg))
431 			count--;
432 	}
433 
434 	return count;
435 }
436 
437 static int check_dirty_whitelist(struct i915_gem_context *ctx,
438 				 struct intel_engine_cs *engine)
439 {
440 	const u32 values[] = {
441 		0x00000000,
442 		0x01010101,
443 		0x10100101,
444 		0x03030303,
445 		0x30300303,
446 		0x05050505,
447 		0x50500505,
448 		0x0f0f0f0f,
449 		0xf00ff00f,
450 		0x10101010,
451 		0xf0f01010,
452 		0x30303030,
453 		0xa0a03030,
454 		0x50505050,
455 		0xc0c05050,
456 		0xf0f0f0f0,
457 		0x11111111,
458 		0x33333333,
459 		0x55555555,
460 		0x0000ffff,
461 		0x00ff00ff,
462 		0xff0000ff,
463 		0xffff00ff,
464 		0xffffffff,
465 	};
466 	struct i915_vma *scratch;
467 	struct i915_vma *batch;
468 	int err = 0, i, v;
469 	u32 *cs, *results;
470 
471 	scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
472 	if (IS_ERR(scratch))
473 		return PTR_ERR(scratch);
474 
475 	batch = create_batch(ctx);
476 	if (IS_ERR(batch)) {
477 		err = PTR_ERR(batch);
478 		goto out_scratch;
479 	}
480 
481 	for (i = 0; i < engine->whitelist.count; i++) {
482 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
483 		u64 addr = scratch->node.start;
484 		struct i915_request *rq;
485 		u32 srm, lrm, rsvd;
486 		u32 expect;
487 		int idx;
488 		bool ro_reg;
489 
490 		if (wo_register(engine, reg))
491 			continue;
492 
493 		ro_reg = ro_register(reg);
494 
495 		srm = MI_STORE_REGISTER_MEM;
496 		lrm = MI_LOAD_REGISTER_MEM;
497 		if (INTEL_GEN(ctx->i915) >= 8)
498 			lrm++, srm++;
499 
500 		pr_debug("%s: Writing garbage to %x\n",
501 			 engine->name, reg);
502 
503 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
504 		if (IS_ERR(cs)) {
505 			err = PTR_ERR(cs);
506 			goto out_batch;
507 		}
508 
509 		/* SRM original */
510 		*cs++ = srm;
511 		*cs++ = reg;
512 		*cs++ = lower_32_bits(addr);
513 		*cs++ = upper_32_bits(addr);
514 
515 		idx = 1;
516 		for (v = 0; v < ARRAY_SIZE(values); v++) {
517 			/* LRI garbage */
518 			*cs++ = MI_LOAD_REGISTER_IMM(1);
519 			*cs++ = reg;
520 			*cs++ = values[v];
521 
522 			/* SRM result */
523 			*cs++ = srm;
524 			*cs++ = reg;
525 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
526 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
527 			idx++;
528 		}
529 		for (v = 0; v < ARRAY_SIZE(values); v++) {
530 			/* LRI garbage */
531 			*cs++ = MI_LOAD_REGISTER_IMM(1);
532 			*cs++ = reg;
533 			*cs++ = ~values[v];
534 
535 			/* SRM result */
536 			*cs++ = srm;
537 			*cs++ = reg;
538 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
539 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
540 			idx++;
541 		}
542 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
543 
544 		/* LRM original -- don't leave garbage in the context! */
545 		*cs++ = lrm;
546 		*cs++ = reg;
547 		*cs++ = lower_32_bits(addr);
548 		*cs++ = upper_32_bits(addr);
549 
550 		*cs++ = MI_BATCH_BUFFER_END;
551 
552 		i915_gem_object_flush_map(batch->obj);
553 		i915_gem_object_unpin_map(batch->obj);
554 		intel_gt_chipset_flush(engine->gt);
555 
556 		rq = igt_request_alloc(ctx, engine);
557 		if (IS_ERR(rq)) {
558 			err = PTR_ERR(rq);
559 			goto out_batch;
560 		}
561 
562 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
563 			err = engine->emit_init_breadcrumb(rq);
564 			if (err)
565 				goto err_request;
566 		}
567 
568 		err = engine->emit_bb_start(rq,
569 					    batch->node.start, PAGE_SIZE,
570 					    0);
571 		if (err)
572 			goto err_request;
573 
574 err_request:
575 		i915_request_add(rq);
576 		if (err)
577 			goto out_batch;
578 
579 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
580 			pr_err("%s: Futzing %x timedout; cancelling test\n",
581 			       engine->name, reg);
582 			intel_gt_set_wedged(&ctx->i915->gt);
583 			err = -EIO;
584 			goto out_batch;
585 		}
586 
587 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
588 		if (IS_ERR(results)) {
589 			err = PTR_ERR(results);
590 			goto out_batch;
591 		}
592 
593 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
594 		if (!ro_reg) {
595 			/* detect write masking */
596 			rsvd = results[ARRAY_SIZE(values)];
597 			if (!rsvd) {
598 				pr_err("%s: Unable to write to whitelisted register %x\n",
599 				       engine->name, reg);
600 				err = -EINVAL;
601 				goto out_unpin;
602 			}
603 		}
604 
605 		expect = results[0];
606 		idx = 1;
607 		for (v = 0; v < ARRAY_SIZE(values); v++) {
608 			if (ro_reg)
609 				expect = results[0];
610 			else
611 				expect = reg_write(expect, values[v], rsvd);
612 
613 			if (results[idx] != expect)
614 				err++;
615 			idx++;
616 		}
617 		for (v = 0; v < ARRAY_SIZE(values); v++) {
618 			if (ro_reg)
619 				expect = results[0];
620 			else
621 				expect = reg_write(expect, ~values[v], rsvd);
622 
623 			if (results[idx] != expect)
624 				err++;
625 			idx++;
626 		}
627 		if (err) {
628 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
629 			       engine->name, err, reg);
630 
631 			if (ro_reg)
632 				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
633 					engine->name, reg, results[0]);
634 			else
635 				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
636 					engine->name, reg, results[0], rsvd);
637 
638 			expect = results[0];
639 			idx = 1;
640 			for (v = 0; v < ARRAY_SIZE(values); v++) {
641 				u32 w = values[v];
642 
643 				if (ro_reg)
644 					expect = results[0];
645 				else
646 					expect = reg_write(expect, w, rsvd);
647 				pr_info("Wrote %08x, read %08x, expect %08x\n",
648 					w, results[idx], expect);
649 				idx++;
650 			}
651 			for (v = 0; v < ARRAY_SIZE(values); v++) {
652 				u32 w = ~values[v];
653 
654 				if (ro_reg)
655 					expect = results[0];
656 				else
657 					expect = reg_write(expect, w, rsvd);
658 				pr_info("Wrote %08x, read %08x, expect %08x\n",
659 					w, results[idx], expect);
660 				idx++;
661 			}
662 
663 			err = -EINVAL;
664 		}
665 out_unpin:
666 		i915_gem_object_unpin_map(scratch->obj);
667 		if (err)
668 			break;
669 	}
670 
671 	if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
672 		err = -EIO;
673 out_batch:
674 	i915_vma_unpin_and_release(&batch, 0);
675 out_scratch:
676 	i915_vma_unpin_and_release(&scratch, 0);
677 	return err;
678 }
679 
680 static int live_dirty_whitelist(void *arg)
681 {
682 	struct drm_i915_private *i915 = arg;
683 	struct intel_engine_cs *engine;
684 	struct i915_gem_context *ctx;
685 	enum intel_engine_id id;
686 	intel_wakeref_t wakeref;
687 	struct drm_file *file;
688 	int err = 0;
689 
690 	/* Can the user write to the whitelisted registers? */
691 
692 	if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
693 		return 0;
694 
695 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
696 
697 	mutex_unlock(&i915->drm.struct_mutex);
698 	file = mock_file(i915);
699 	mutex_lock(&i915->drm.struct_mutex);
700 	if (IS_ERR(file)) {
701 		err = PTR_ERR(file);
702 		goto out_rpm;
703 	}
704 
705 	ctx = live_context(i915, file);
706 	if (IS_ERR(ctx)) {
707 		err = PTR_ERR(ctx);
708 		goto out_file;
709 	}
710 
711 	for_each_engine(engine, i915, id) {
712 		if (engine->whitelist.count == 0)
713 			continue;
714 
715 		err = check_dirty_whitelist(ctx, engine);
716 		if (err)
717 			goto out_file;
718 	}
719 
720 out_file:
721 	mutex_unlock(&i915->drm.struct_mutex);
722 	mock_file_free(i915, file);
723 	mutex_lock(&i915->drm.struct_mutex);
724 out_rpm:
725 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
726 	return err;
727 }
728 
729 static int live_reset_whitelist(void *arg)
730 {
731 	struct drm_i915_private *i915 = arg;
732 	struct intel_engine_cs *engine = i915->engine[RCS0];
733 	int err = 0;
734 
735 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
736 
737 	if (!engine || engine->whitelist.count == 0)
738 		return 0;
739 
740 	igt_global_reset_lock(&i915->gt);
741 
742 	if (intel_has_reset_engine(i915)) {
743 		err = check_whitelist_across_reset(engine,
744 						   do_engine_reset,
745 						   "engine");
746 		if (err)
747 			goto out;
748 	}
749 
750 	if (intel_has_gpu_reset(i915)) {
751 		err = check_whitelist_across_reset(engine,
752 						   do_device_reset,
753 						   "device");
754 		if (err)
755 			goto out;
756 	}
757 
758 out:
759 	igt_global_reset_unlock(&i915->gt);
760 	return err;
761 }
762 
763 static int read_whitelisted_registers(struct i915_gem_context *ctx,
764 				      struct intel_engine_cs *engine,
765 				      struct i915_vma *results)
766 {
767 	struct i915_request *rq;
768 	int i, err = 0;
769 	u32 srm, *cs;
770 
771 	rq = igt_request_alloc(ctx, engine);
772 	if (IS_ERR(rq))
773 		return PTR_ERR(rq);
774 
775 	srm = MI_STORE_REGISTER_MEM;
776 	if (INTEL_GEN(ctx->i915) >= 8)
777 		srm++;
778 
779 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
780 	if (IS_ERR(cs)) {
781 		err = PTR_ERR(cs);
782 		goto err_req;
783 	}
784 
785 	for (i = 0; i < engine->whitelist.count; i++) {
786 		u64 offset = results->node.start + sizeof(u32) * i;
787 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
788 
789 		/* Clear access permission field */
790 		reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
791 
792 		*cs++ = srm;
793 		*cs++ = reg;
794 		*cs++ = lower_32_bits(offset);
795 		*cs++ = upper_32_bits(offset);
796 	}
797 	intel_ring_advance(rq, cs);
798 
799 err_req:
800 	i915_request_add(rq);
801 
802 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
803 		err = -EIO;
804 
805 	return err;
806 }
807 
808 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
809 				       struct intel_engine_cs *engine)
810 {
811 	struct i915_request *rq;
812 	struct i915_vma *batch;
813 	int i, err = 0;
814 	u32 *cs;
815 
816 	batch = create_batch(ctx);
817 	if (IS_ERR(batch))
818 		return PTR_ERR(batch);
819 
820 	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
821 	if (IS_ERR(cs)) {
822 		err = PTR_ERR(cs);
823 		goto err_batch;
824 	}
825 
826 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
827 	for (i = 0; i < engine->whitelist.count; i++) {
828 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
829 
830 		if (ro_register(reg))
831 			continue;
832 
833 		*cs++ = reg;
834 		*cs++ = 0xffffffff;
835 	}
836 	*cs++ = MI_BATCH_BUFFER_END;
837 
838 	i915_gem_object_flush_map(batch->obj);
839 	intel_gt_chipset_flush(engine->gt);
840 
841 	rq = igt_request_alloc(ctx, engine);
842 	if (IS_ERR(rq)) {
843 		err = PTR_ERR(rq);
844 		goto err_unpin;
845 	}
846 
847 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
848 		err = engine->emit_init_breadcrumb(rq);
849 		if (err)
850 			goto err_request;
851 	}
852 
853 	/* Perform the writes from an unprivileged "user" batch */
854 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
855 
856 err_request:
857 	i915_request_add(rq);
858 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
859 		err = -EIO;
860 
861 err_unpin:
862 	i915_gem_object_unpin_map(batch->obj);
863 err_batch:
864 	i915_vma_unpin_and_release(&batch, 0);
865 	return err;
866 }
867 
868 struct regmask {
869 	i915_reg_t reg;
870 	unsigned long gen_mask;
871 };
872 
873 static bool find_reg(struct drm_i915_private *i915,
874 		     i915_reg_t reg,
875 		     const struct regmask *tbl,
876 		     unsigned long count)
877 {
878 	u32 offset = i915_mmio_reg_offset(reg);
879 
880 	while (count--) {
881 		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
882 		    i915_mmio_reg_offset(tbl->reg) == offset)
883 			return true;
884 		tbl++;
885 	}
886 
887 	return false;
888 }
889 
890 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
891 {
892 	/* Alas, we must pardon some whitelists. Mistakes already made */
893 	static const struct regmask pardon[] = {
894 		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
895 		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
896 	};
897 
898 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
899 }
900 
901 static bool result_eq(struct intel_engine_cs *engine,
902 		      u32 a, u32 b, i915_reg_t reg)
903 {
904 	if (a != b && !pardon_reg(engine->i915, reg)) {
905 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
906 		       i915_mmio_reg_offset(reg), a, b);
907 		return false;
908 	}
909 
910 	return true;
911 }
912 
913 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
914 {
915 	/* Some registers do not seem to behave and our writes unreadable */
916 	static const struct regmask wo[] = {
917 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
918 	};
919 
920 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
921 }
922 
923 static bool result_neq(struct intel_engine_cs *engine,
924 		       u32 a, u32 b, i915_reg_t reg)
925 {
926 	if (a == b && !writeonly_reg(engine->i915, reg)) {
927 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
928 		       i915_mmio_reg_offset(reg), a);
929 		return false;
930 	}
931 
932 	return true;
933 }
934 
935 static int
936 check_whitelisted_registers(struct intel_engine_cs *engine,
937 			    struct i915_vma *A,
938 			    struct i915_vma *B,
939 			    bool (*fn)(struct intel_engine_cs *engine,
940 				       u32 a, u32 b,
941 				       i915_reg_t reg))
942 {
943 	u32 *a, *b;
944 	int i, err;
945 
946 	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
947 	if (IS_ERR(a))
948 		return PTR_ERR(a);
949 
950 	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
951 	if (IS_ERR(b)) {
952 		err = PTR_ERR(b);
953 		goto err_a;
954 	}
955 
956 	err = 0;
957 	for (i = 0; i < engine->whitelist.count; i++) {
958 		const struct i915_wa *wa = &engine->whitelist.list[i];
959 
960 		if (i915_mmio_reg_offset(wa->reg) &
961 		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
962 			continue;
963 
964 		if (!fn(engine, a[i], b[i], wa->reg))
965 			err = -EINVAL;
966 	}
967 
968 	i915_gem_object_unpin_map(B->obj);
969 err_a:
970 	i915_gem_object_unpin_map(A->obj);
971 	return err;
972 }
973 
974 static int live_isolated_whitelist(void *arg)
975 {
976 	struct drm_i915_private *i915 = arg;
977 	struct {
978 		struct i915_gem_context *ctx;
979 		struct i915_vma *scratch[2];
980 	} client[2] = {};
981 	struct intel_engine_cs *engine;
982 	enum intel_engine_id id;
983 	int i, err = 0;
984 
985 	/*
986 	 * Check that a write into a whitelist register works, but
987 	 * invisible to a second context.
988 	 */
989 
990 	if (!intel_engines_has_context_isolation(i915))
991 		return 0;
992 
993 	if (!i915->kernel_context->vm)
994 		return 0;
995 
996 	for (i = 0; i < ARRAY_SIZE(client); i++) {
997 		struct i915_gem_context *c;
998 
999 		c = kernel_context(i915);
1000 		if (IS_ERR(c)) {
1001 			err = PTR_ERR(c);
1002 			goto err;
1003 		}
1004 
1005 		client[i].scratch[0] = create_scratch(c->vm, 1024);
1006 		if (IS_ERR(client[i].scratch[0])) {
1007 			err = PTR_ERR(client[i].scratch[0]);
1008 			kernel_context_close(c);
1009 			goto err;
1010 		}
1011 
1012 		client[i].scratch[1] = create_scratch(c->vm, 1024);
1013 		if (IS_ERR(client[i].scratch[1])) {
1014 			err = PTR_ERR(client[i].scratch[1]);
1015 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1016 			kernel_context_close(c);
1017 			goto err;
1018 		}
1019 
1020 		client[i].ctx = c;
1021 	}
1022 
1023 	for_each_engine(engine, i915, id) {
1024 		if (!whitelist_writable_count(engine))
1025 			continue;
1026 
1027 		/* Read default values */
1028 		err = read_whitelisted_registers(client[0].ctx, engine,
1029 						 client[0].scratch[0]);
1030 		if (err)
1031 			goto err;
1032 
1033 		/* Try to overwrite registers (should only affect ctx0) */
1034 		err = scrub_whitelisted_registers(client[0].ctx, engine);
1035 		if (err)
1036 			goto err;
1037 
1038 		/* Read values from ctx1, we expect these to be defaults */
1039 		err = read_whitelisted_registers(client[1].ctx, engine,
1040 						 client[1].scratch[0]);
1041 		if (err)
1042 			goto err;
1043 
1044 		/* Verify that both reads return the same default values */
1045 		err = check_whitelisted_registers(engine,
1046 						  client[0].scratch[0],
1047 						  client[1].scratch[0],
1048 						  result_eq);
1049 		if (err)
1050 			goto err;
1051 
1052 		/* Read back the updated values in ctx0 */
1053 		err = read_whitelisted_registers(client[0].ctx, engine,
1054 						 client[0].scratch[1]);
1055 		if (err)
1056 			goto err;
1057 
1058 		/* User should be granted privilege to overwhite regs */
1059 		err = check_whitelisted_registers(engine,
1060 						  client[0].scratch[0],
1061 						  client[0].scratch[1],
1062 						  result_neq);
1063 		if (err)
1064 			goto err;
1065 	}
1066 
1067 err:
1068 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1069 		if (!client[i].ctx)
1070 			break;
1071 
1072 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1073 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1074 		kernel_context_close(client[i].ctx);
1075 	}
1076 
1077 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
1078 		err = -EIO;
1079 
1080 	return err;
1081 }
1082 
1083 static bool
1084 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1085 		const char *str)
1086 {
1087 	struct drm_i915_private *i915 = ctx->i915;
1088 	struct i915_gem_engines_iter it;
1089 	struct intel_context *ce;
1090 	bool ok = true;
1091 
1092 	ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1093 
1094 	for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
1095 		enum intel_engine_id id = ce->engine->id;
1096 
1097 		ok &= engine_wa_list_verify(ce,
1098 					    &lists->engine[id].wa_list,
1099 					    str) == 0;
1100 
1101 		ok &= engine_wa_list_verify(ce,
1102 					    &lists->engine[id].ctx_wa_list,
1103 					    str) == 0;
1104 	}
1105 
1106 	return ok;
1107 }
1108 
1109 static int
1110 live_gpu_reset_workarounds(void *arg)
1111 {
1112 	struct drm_i915_private *i915 = arg;
1113 	struct i915_gem_context *ctx;
1114 	intel_wakeref_t wakeref;
1115 	struct wa_lists lists;
1116 	bool ok;
1117 
1118 	if (!intel_has_gpu_reset(i915))
1119 		return 0;
1120 
1121 	ctx = kernel_context(i915);
1122 	if (IS_ERR(ctx))
1123 		return PTR_ERR(ctx);
1124 
1125 	i915_gem_context_lock_engines(ctx);
1126 
1127 	pr_info("Verifying after GPU reset...\n");
1128 
1129 	igt_global_reset_lock(&i915->gt);
1130 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1131 
1132 	reference_lists_init(i915, &lists);
1133 
1134 	ok = verify_wa_lists(ctx, &lists, "before reset");
1135 	if (!ok)
1136 		goto out;
1137 
1138 	intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds");
1139 
1140 	ok = verify_wa_lists(ctx, &lists, "after reset");
1141 
1142 out:
1143 	i915_gem_context_unlock_engines(ctx);
1144 	kernel_context_close(ctx);
1145 	reference_lists_fini(i915, &lists);
1146 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1147 	igt_global_reset_unlock(&i915->gt);
1148 
1149 	return ok ? 0 : -ESRCH;
1150 }
1151 
1152 static int
1153 live_engine_reset_workarounds(void *arg)
1154 {
1155 	struct drm_i915_private *i915 = arg;
1156 	struct i915_gem_engines_iter it;
1157 	struct i915_gem_context *ctx;
1158 	struct intel_context *ce;
1159 	struct igt_spinner spin;
1160 	struct i915_request *rq;
1161 	intel_wakeref_t wakeref;
1162 	struct wa_lists lists;
1163 	int ret = 0;
1164 
1165 	if (!intel_has_reset_engine(i915))
1166 		return 0;
1167 
1168 	ctx = kernel_context(i915);
1169 	if (IS_ERR(ctx))
1170 		return PTR_ERR(ctx);
1171 
1172 	igt_global_reset_lock(&i915->gt);
1173 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1174 
1175 	reference_lists_init(i915, &lists);
1176 
1177 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1178 		struct intel_engine_cs *engine = ce->engine;
1179 		bool ok;
1180 
1181 		pr_info("Verifying after %s reset...\n", engine->name);
1182 
1183 		ok = verify_wa_lists(ctx, &lists, "before reset");
1184 		if (!ok) {
1185 			ret = -ESRCH;
1186 			goto err;
1187 		}
1188 
1189 		intel_engine_reset(engine, "live_workarounds");
1190 
1191 		ok = verify_wa_lists(ctx, &lists, "after idle reset");
1192 		if (!ok) {
1193 			ret = -ESRCH;
1194 			goto err;
1195 		}
1196 
1197 		ret = igt_spinner_init(&spin, engine->gt);
1198 		if (ret)
1199 			goto err;
1200 
1201 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1202 		if (IS_ERR(rq)) {
1203 			ret = PTR_ERR(rq);
1204 			igt_spinner_fini(&spin);
1205 			goto err;
1206 		}
1207 
1208 		i915_request_add(rq);
1209 
1210 		if (!igt_wait_for_spinner(&spin, rq)) {
1211 			pr_err("Spinner failed to start\n");
1212 			igt_spinner_fini(&spin);
1213 			ret = -ETIMEDOUT;
1214 			goto err;
1215 		}
1216 
1217 		intel_engine_reset(engine, "live_workarounds");
1218 
1219 		igt_spinner_end(&spin);
1220 		igt_spinner_fini(&spin);
1221 
1222 		ok = verify_wa_lists(ctx, &lists, "after busy reset");
1223 		if (!ok) {
1224 			ret = -ESRCH;
1225 			goto err;
1226 		}
1227 	}
1228 err:
1229 	i915_gem_context_unlock_engines(ctx);
1230 	reference_lists_fini(i915, &lists);
1231 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1232 	igt_global_reset_unlock(&i915->gt);
1233 	kernel_context_close(ctx);
1234 
1235 	igt_flush_test(i915, I915_WAIT_LOCKED);
1236 
1237 	return ret;
1238 }
1239 
1240 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1241 {
1242 	static const struct i915_subtest tests[] = {
1243 		SUBTEST(live_dirty_whitelist),
1244 		SUBTEST(live_reset_whitelist),
1245 		SUBTEST(live_isolated_whitelist),
1246 		SUBTEST(live_gpu_reset_workarounds),
1247 		SUBTEST(live_engine_reset_workarounds),
1248 	};
1249 	int err;
1250 
1251 	if (intel_gt_is_wedged(&i915->gt))
1252 		return 0;
1253 
1254 	mutex_lock(&i915->drm.struct_mutex);
1255 	err = i915_subtests(tests, i915);
1256 	mutex_unlock(&i915->drm.struct_mutex);
1257 
1258 	return err;
1259 }
1260