1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_gt.h"
9 #include "i915_selftest.h"
10 #include "intel_reset.h"
11 
12 #include "selftests/igt_flush_test.h"
13 #include "selftests/igt_reset.h"
14 #include "selftests/igt_spinner.h"
15 #include "selftests/mock_drm.h"
16 
17 #include "gem/selftests/igt_gem_utils.h"
18 #include "gem/selftests/mock_context.h"
19 
20 static const struct wo_register {
21 	enum intel_platform platform;
22 	u32 reg;
23 } wo_registers[] = {
24 	{ INTEL_GEMINILAKE, 0x731c }
25 };
26 
27 struct wa_lists {
28 	struct i915_wa_list gt_wa_list;
29 	struct {
30 		struct i915_wa_list wa_list;
31 		struct i915_wa_list ctx_wa_list;
32 	} engine[I915_NUM_ENGINES];
33 };
34 
35 static void
36 reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
37 {
38 	struct intel_engine_cs *engine;
39 	enum intel_engine_id id;
40 
41 	memset(lists, 0, sizeof(*lists));
42 
43 	wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
44 	gt_init_workarounds(i915, &lists->gt_wa_list);
45 	wa_init_finish(&lists->gt_wa_list);
46 
47 	for_each_engine(engine, i915, id) {
48 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
49 
50 		wa_init_start(wal, "REF", engine->name);
51 		engine_init_workarounds(engine, wal);
52 		wa_init_finish(wal);
53 
54 		__intel_engine_init_ctx_wa(engine,
55 					   &lists->engine[id].ctx_wa_list,
56 					   "CTX_REF");
57 	}
58 }
59 
60 static void
61 reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
62 {
63 	struct intel_engine_cs *engine;
64 	enum intel_engine_id id;
65 
66 	for_each_engine(engine, i915, id)
67 		intel_wa_list_free(&lists->engine[id].wa_list);
68 
69 	intel_wa_list_free(&lists->gt_wa_list);
70 }
71 
72 static struct drm_i915_gem_object *
73 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
74 {
75 	const u32 base = engine->mmio_base;
76 	struct drm_i915_gem_object *result;
77 	struct i915_request *rq;
78 	struct i915_vma *vma;
79 	u32 srm, *cs;
80 	int err;
81 	int i;
82 
83 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
84 	if (IS_ERR(result))
85 		return result;
86 
87 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
88 
89 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
90 	if (IS_ERR(cs)) {
91 		err = PTR_ERR(cs);
92 		goto err_obj;
93 	}
94 	memset(cs, 0xc5, PAGE_SIZE);
95 	i915_gem_object_flush_map(result);
96 	i915_gem_object_unpin_map(result);
97 
98 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
99 	if (IS_ERR(vma)) {
100 		err = PTR_ERR(vma);
101 		goto err_obj;
102 	}
103 
104 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
105 	if (err)
106 		goto err_obj;
107 
108 	rq = igt_request_alloc(ctx, engine);
109 	if (IS_ERR(rq)) {
110 		err = PTR_ERR(rq);
111 		goto err_pin;
112 	}
113 
114 	i915_vma_lock(vma);
115 	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
116 	i915_vma_unlock(vma);
117 	if (err)
118 		goto err_req;
119 
120 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
121 	if (INTEL_GEN(ctx->i915) >= 8)
122 		srm++;
123 
124 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
125 	if (IS_ERR(cs)) {
126 		err = PTR_ERR(cs);
127 		goto err_req;
128 	}
129 
130 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
131 		*cs++ = srm;
132 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
133 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
134 		*cs++ = 0;
135 	}
136 	intel_ring_advance(rq, cs);
137 
138 	i915_request_add(rq);
139 	i915_vma_unpin(vma);
140 
141 	return result;
142 
143 err_req:
144 	i915_request_add(rq);
145 err_pin:
146 	i915_vma_unpin(vma);
147 err_obj:
148 	i915_gem_object_put(result);
149 	return ERR_PTR(err);
150 }
151 
152 static u32
153 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
154 {
155 	i915_reg_t reg = i < engine->whitelist.count ?
156 			 engine->whitelist.list[i].reg :
157 			 RING_NOPID(engine->mmio_base);
158 
159 	return i915_mmio_reg_offset(reg);
160 }
161 
162 static void
163 print_results(const struct intel_engine_cs *engine, const u32 *results)
164 {
165 	unsigned int i;
166 
167 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
168 		u32 expected = get_whitelist_reg(engine, i);
169 		u32 actual = results[i];
170 
171 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
172 			i, expected, actual);
173 	}
174 }
175 
176 static int check_whitelist(struct i915_gem_context *ctx,
177 			   struct intel_engine_cs *engine)
178 {
179 	struct drm_i915_gem_object *results;
180 	struct intel_wedge_me wedge;
181 	u32 *vaddr;
182 	int err;
183 	int i;
184 
185 	results = read_nonprivs(ctx, engine);
186 	if (IS_ERR(results))
187 		return PTR_ERR(results);
188 
189 	err = 0;
190 	i915_gem_object_lock(results);
191 	intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */
192 		err = i915_gem_object_set_to_cpu_domain(results, false);
193 	i915_gem_object_unlock(results);
194 	if (intel_gt_is_wedged(&ctx->i915->gt))
195 		err = -EIO;
196 	if (err)
197 		goto out_put;
198 
199 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
200 	if (IS_ERR(vaddr)) {
201 		err = PTR_ERR(vaddr);
202 		goto out_put;
203 	}
204 
205 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
206 		u32 expected = get_whitelist_reg(engine, i);
207 		u32 actual = vaddr[i];
208 
209 		if (expected != actual) {
210 			print_results(engine, vaddr);
211 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
212 			       i, expected, actual);
213 
214 			err = -EINVAL;
215 			break;
216 		}
217 	}
218 
219 	i915_gem_object_unpin_map(results);
220 out_put:
221 	i915_gem_object_put(results);
222 	return err;
223 }
224 
225 static int do_device_reset(struct intel_engine_cs *engine)
226 {
227 	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
228 	return 0;
229 }
230 
231 static int do_engine_reset(struct intel_engine_cs *engine)
232 {
233 	return intel_engine_reset(engine, "live_workarounds");
234 }
235 
236 static int
237 switch_to_scratch_context(struct intel_engine_cs *engine,
238 			  struct igt_spinner *spin)
239 {
240 	struct i915_gem_context *ctx;
241 	struct i915_request *rq;
242 	intel_wakeref_t wakeref;
243 	int err = 0;
244 
245 	ctx = kernel_context(engine->i915);
246 	if (IS_ERR(ctx))
247 		return PTR_ERR(ctx);
248 
249 	GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
250 
251 	rq = ERR_PTR(-ENODEV);
252 	with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
253 		rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
254 
255 	kernel_context_close(ctx);
256 
257 	if (IS_ERR(rq)) {
258 		spin = NULL;
259 		err = PTR_ERR(rq);
260 		goto err;
261 	}
262 
263 	i915_request_add(rq);
264 
265 	if (spin && !igt_wait_for_spinner(spin, rq)) {
266 		pr_err("Spinner failed to start\n");
267 		err = -ETIMEDOUT;
268 	}
269 
270 err:
271 	if (err && spin)
272 		igt_spinner_end(spin);
273 
274 	return err;
275 }
276 
277 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
278 					int (*reset)(struct intel_engine_cs *),
279 					const char *name)
280 {
281 	struct drm_i915_private *i915 = engine->i915;
282 	struct i915_gem_context *ctx, *tmp;
283 	struct igt_spinner spin;
284 	intel_wakeref_t wakeref;
285 	int err;
286 
287 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
288 		engine->whitelist.count, engine->name, name);
289 
290 	ctx = kernel_context(i915);
291 	if (IS_ERR(ctx))
292 		return PTR_ERR(ctx);
293 
294 	err = igt_spinner_init(&spin, i915);
295 	if (err)
296 		goto out_ctx;
297 
298 	err = check_whitelist(ctx, engine);
299 	if (err) {
300 		pr_err("Invalid whitelist *before* %s reset!\n", name);
301 		goto out_spin;
302 	}
303 
304 	err = switch_to_scratch_context(engine, &spin);
305 	if (err)
306 		goto out_spin;
307 
308 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
309 		err = reset(engine);
310 
311 	igt_spinner_end(&spin);
312 
313 	if (err) {
314 		pr_err("%s reset failed\n", name);
315 		goto out_spin;
316 	}
317 
318 	err = check_whitelist(ctx, engine);
319 	if (err) {
320 		pr_err("Whitelist not preserved in context across %s reset!\n",
321 		       name);
322 		goto out_spin;
323 	}
324 
325 	tmp = kernel_context(i915);
326 	if (IS_ERR(tmp)) {
327 		err = PTR_ERR(tmp);
328 		goto out_spin;
329 	}
330 	kernel_context_close(ctx);
331 	ctx = tmp;
332 
333 	err = check_whitelist(ctx, engine);
334 	if (err) {
335 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
336 		       name);
337 		goto out_spin;
338 	}
339 
340 out_spin:
341 	igt_spinner_fini(&spin);
342 out_ctx:
343 	kernel_context_close(ctx);
344 	return err;
345 }
346 
347 static struct i915_vma *create_batch(struct i915_gem_context *ctx)
348 {
349 	struct drm_i915_gem_object *obj;
350 	struct i915_vma *vma;
351 	int err;
352 
353 	obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
354 	if (IS_ERR(obj))
355 		return ERR_CAST(obj);
356 
357 	vma = i915_vma_instance(obj, ctx->vm, NULL);
358 	if (IS_ERR(vma)) {
359 		err = PTR_ERR(vma);
360 		goto err_obj;
361 	}
362 
363 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
364 	if (err)
365 		goto err_obj;
366 
367 	return vma;
368 
369 err_obj:
370 	i915_gem_object_put(obj);
371 	return ERR_PTR(err);
372 }
373 
374 static u32 reg_write(u32 old, u32 new, u32 rsvd)
375 {
376 	if (rsvd == 0x0000ffff) {
377 		old &= ~(new >> 16);
378 		old |= new & (new >> 16);
379 	} else {
380 		old &= ~rsvd;
381 		old |= new & rsvd;
382 	}
383 
384 	return old;
385 }
386 
387 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
388 {
389 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
390 	int i;
391 
392 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
393 	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
394 		return true;
395 
396 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
397 		if (wo_registers[i].platform == platform &&
398 		    wo_registers[i].reg == reg)
399 			return true;
400 	}
401 
402 	return false;
403 }
404 
405 static bool ro_register(u32 reg)
406 {
407 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
408 	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
409 		return true;
410 
411 	return false;
412 }
413 
414 static int whitelist_writable_count(struct intel_engine_cs *engine)
415 {
416 	int count = engine->whitelist.count;
417 	int i;
418 
419 	for (i = 0; i < engine->whitelist.count; i++) {
420 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
421 
422 		if (ro_register(reg))
423 			count--;
424 	}
425 
426 	return count;
427 }
428 
429 static int check_dirty_whitelist(struct i915_gem_context *ctx,
430 				 struct intel_engine_cs *engine)
431 {
432 	const u32 values[] = {
433 		0x00000000,
434 		0x01010101,
435 		0x10100101,
436 		0x03030303,
437 		0x30300303,
438 		0x05050505,
439 		0x50500505,
440 		0x0f0f0f0f,
441 		0xf00ff00f,
442 		0x10101010,
443 		0xf0f01010,
444 		0x30303030,
445 		0xa0a03030,
446 		0x50505050,
447 		0xc0c05050,
448 		0xf0f0f0f0,
449 		0x11111111,
450 		0x33333333,
451 		0x55555555,
452 		0x0000ffff,
453 		0x00ff00ff,
454 		0xff0000ff,
455 		0xffff00ff,
456 		0xffffffff,
457 	};
458 	struct i915_vma *scratch;
459 	struct i915_vma *batch;
460 	int err = 0, i, v;
461 	u32 *cs, *results;
462 
463 	scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
464 	if (IS_ERR(scratch))
465 		return PTR_ERR(scratch);
466 
467 	batch = create_batch(ctx);
468 	if (IS_ERR(batch)) {
469 		err = PTR_ERR(batch);
470 		goto out_scratch;
471 	}
472 
473 	for (i = 0; i < engine->whitelist.count; i++) {
474 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
475 		u64 addr = scratch->node.start;
476 		struct i915_request *rq;
477 		u32 srm, lrm, rsvd;
478 		u32 expect;
479 		int idx;
480 		bool ro_reg;
481 
482 		if (wo_register(engine, reg))
483 			continue;
484 
485 		ro_reg = ro_register(reg);
486 
487 		srm = MI_STORE_REGISTER_MEM;
488 		lrm = MI_LOAD_REGISTER_MEM;
489 		if (INTEL_GEN(ctx->i915) >= 8)
490 			lrm++, srm++;
491 
492 		pr_debug("%s: Writing garbage to %x\n",
493 			 engine->name, reg);
494 
495 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
496 		if (IS_ERR(cs)) {
497 			err = PTR_ERR(cs);
498 			goto out_batch;
499 		}
500 
501 		/* SRM original */
502 		*cs++ = srm;
503 		*cs++ = reg;
504 		*cs++ = lower_32_bits(addr);
505 		*cs++ = upper_32_bits(addr);
506 
507 		idx = 1;
508 		for (v = 0; v < ARRAY_SIZE(values); v++) {
509 			/* LRI garbage */
510 			*cs++ = MI_LOAD_REGISTER_IMM(1);
511 			*cs++ = reg;
512 			*cs++ = values[v];
513 
514 			/* SRM result */
515 			*cs++ = srm;
516 			*cs++ = reg;
517 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
518 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
519 			idx++;
520 		}
521 		for (v = 0; v < ARRAY_SIZE(values); v++) {
522 			/* LRI garbage */
523 			*cs++ = MI_LOAD_REGISTER_IMM(1);
524 			*cs++ = reg;
525 			*cs++ = ~values[v];
526 
527 			/* SRM result */
528 			*cs++ = srm;
529 			*cs++ = reg;
530 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
531 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
532 			idx++;
533 		}
534 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
535 
536 		/* LRM original -- don't leave garbage in the context! */
537 		*cs++ = lrm;
538 		*cs++ = reg;
539 		*cs++ = lower_32_bits(addr);
540 		*cs++ = upper_32_bits(addr);
541 
542 		*cs++ = MI_BATCH_BUFFER_END;
543 
544 		i915_gem_object_flush_map(batch->obj);
545 		i915_gem_object_unpin_map(batch->obj);
546 		intel_gt_chipset_flush(engine->gt);
547 
548 		rq = igt_request_alloc(ctx, engine);
549 		if (IS_ERR(rq)) {
550 			err = PTR_ERR(rq);
551 			goto out_batch;
552 		}
553 
554 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
555 			err = engine->emit_init_breadcrumb(rq);
556 			if (err)
557 				goto err_request;
558 		}
559 
560 		err = engine->emit_bb_start(rq,
561 					    batch->node.start, PAGE_SIZE,
562 					    0);
563 		if (err)
564 			goto err_request;
565 
566 err_request:
567 		i915_request_add(rq);
568 		if (err)
569 			goto out_batch;
570 
571 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
572 			pr_err("%s: Futzing %x timedout; cancelling test\n",
573 			       engine->name, reg);
574 			intel_gt_set_wedged(&ctx->i915->gt);
575 			err = -EIO;
576 			goto out_batch;
577 		}
578 
579 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
580 		if (IS_ERR(results)) {
581 			err = PTR_ERR(results);
582 			goto out_batch;
583 		}
584 
585 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
586 		if (!ro_reg) {
587 			/* detect write masking */
588 			rsvd = results[ARRAY_SIZE(values)];
589 			if (!rsvd) {
590 				pr_err("%s: Unable to write to whitelisted register %x\n",
591 				       engine->name, reg);
592 				err = -EINVAL;
593 				goto out_unpin;
594 			}
595 		}
596 
597 		expect = results[0];
598 		idx = 1;
599 		for (v = 0; v < ARRAY_SIZE(values); v++) {
600 			if (ro_reg)
601 				expect = results[0];
602 			else
603 				expect = reg_write(expect, values[v], rsvd);
604 
605 			if (results[idx] != expect)
606 				err++;
607 			idx++;
608 		}
609 		for (v = 0; v < ARRAY_SIZE(values); v++) {
610 			if (ro_reg)
611 				expect = results[0];
612 			else
613 				expect = reg_write(expect, ~values[v], rsvd);
614 
615 			if (results[idx] != expect)
616 				err++;
617 			idx++;
618 		}
619 		if (err) {
620 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
621 			       engine->name, err, reg);
622 
623 			if (ro_reg)
624 				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
625 					engine->name, reg, results[0]);
626 			else
627 				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
628 					engine->name, reg, results[0], rsvd);
629 
630 			expect = results[0];
631 			idx = 1;
632 			for (v = 0; v < ARRAY_SIZE(values); v++) {
633 				u32 w = values[v];
634 
635 				if (ro_reg)
636 					expect = results[0];
637 				else
638 					expect = reg_write(expect, w, rsvd);
639 				pr_info("Wrote %08x, read %08x, expect %08x\n",
640 					w, results[idx], expect);
641 				idx++;
642 			}
643 			for (v = 0; v < ARRAY_SIZE(values); v++) {
644 				u32 w = ~values[v];
645 
646 				if (ro_reg)
647 					expect = results[0];
648 				else
649 					expect = reg_write(expect, w, rsvd);
650 				pr_info("Wrote %08x, read %08x, expect %08x\n",
651 					w, results[idx], expect);
652 				idx++;
653 			}
654 
655 			err = -EINVAL;
656 		}
657 out_unpin:
658 		i915_gem_object_unpin_map(scratch->obj);
659 		if (err)
660 			break;
661 	}
662 
663 	if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
664 		err = -EIO;
665 out_batch:
666 	i915_vma_unpin_and_release(&batch, 0);
667 out_scratch:
668 	i915_vma_unpin_and_release(&scratch, 0);
669 	return err;
670 }
671 
672 static int live_dirty_whitelist(void *arg)
673 {
674 	struct drm_i915_private *i915 = arg;
675 	struct intel_engine_cs *engine;
676 	struct i915_gem_context *ctx;
677 	enum intel_engine_id id;
678 	intel_wakeref_t wakeref;
679 	struct drm_file *file;
680 	int err = 0;
681 
682 	/* Can the user write to the whitelisted registers? */
683 
684 	if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
685 		return 0;
686 
687 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
688 
689 	mutex_unlock(&i915->drm.struct_mutex);
690 	file = mock_file(i915);
691 	mutex_lock(&i915->drm.struct_mutex);
692 	if (IS_ERR(file)) {
693 		err = PTR_ERR(file);
694 		goto out_rpm;
695 	}
696 
697 	ctx = live_context(i915, file);
698 	if (IS_ERR(ctx)) {
699 		err = PTR_ERR(ctx);
700 		goto out_file;
701 	}
702 
703 	for_each_engine(engine, i915, id) {
704 		if (engine->whitelist.count == 0)
705 			continue;
706 
707 		err = check_dirty_whitelist(ctx, engine);
708 		if (err)
709 			goto out_file;
710 	}
711 
712 out_file:
713 	mutex_unlock(&i915->drm.struct_mutex);
714 	mock_file_free(i915, file);
715 	mutex_lock(&i915->drm.struct_mutex);
716 out_rpm:
717 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
718 	return err;
719 }
720 
721 static int live_reset_whitelist(void *arg)
722 {
723 	struct drm_i915_private *i915 = arg;
724 	struct intel_engine_cs *engine = i915->engine[RCS0];
725 	int err = 0;
726 
727 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
728 
729 	if (!engine || engine->whitelist.count == 0)
730 		return 0;
731 
732 	igt_global_reset_lock(&i915->gt);
733 
734 	if (intel_has_reset_engine(i915)) {
735 		err = check_whitelist_across_reset(engine,
736 						   do_engine_reset,
737 						   "engine");
738 		if (err)
739 			goto out;
740 	}
741 
742 	if (intel_has_gpu_reset(i915)) {
743 		err = check_whitelist_across_reset(engine,
744 						   do_device_reset,
745 						   "device");
746 		if (err)
747 			goto out;
748 	}
749 
750 out:
751 	igt_global_reset_unlock(&i915->gt);
752 	return err;
753 }
754 
755 static int read_whitelisted_registers(struct i915_gem_context *ctx,
756 				      struct intel_engine_cs *engine,
757 				      struct i915_vma *results)
758 {
759 	struct i915_request *rq;
760 	int i, err = 0;
761 	u32 srm, *cs;
762 
763 	rq = igt_request_alloc(ctx, engine);
764 	if (IS_ERR(rq))
765 		return PTR_ERR(rq);
766 
767 	srm = MI_STORE_REGISTER_MEM;
768 	if (INTEL_GEN(ctx->i915) >= 8)
769 		srm++;
770 
771 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
772 	if (IS_ERR(cs)) {
773 		err = PTR_ERR(cs);
774 		goto err_req;
775 	}
776 
777 	for (i = 0; i < engine->whitelist.count; i++) {
778 		u64 offset = results->node.start + sizeof(u32) * i;
779 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
780 
781 		/* Clear access permission field */
782 		reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
783 
784 		*cs++ = srm;
785 		*cs++ = reg;
786 		*cs++ = lower_32_bits(offset);
787 		*cs++ = upper_32_bits(offset);
788 	}
789 	intel_ring_advance(rq, cs);
790 
791 err_req:
792 	i915_request_add(rq);
793 
794 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
795 		err = -EIO;
796 
797 	return err;
798 }
799 
800 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
801 				       struct intel_engine_cs *engine)
802 {
803 	struct i915_request *rq;
804 	struct i915_vma *batch;
805 	int i, err = 0;
806 	u32 *cs;
807 
808 	batch = create_batch(ctx);
809 	if (IS_ERR(batch))
810 		return PTR_ERR(batch);
811 
812 	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
813 	if (IS_ERR(cs)) {
814 		err = PTR_ERR(cs);
815 		goto err_batch;
816 	}
817 
818 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
819 	for (i = 0; i < engine->whitelist.count; i++) {
820 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
821 
822 		if (ro_register(reg))
823 			continue;
824 
825 		*cs++ = reg;
826 		*cs++ = 0xffffffff;
827 	}
828 	*cs++ = MI_BATCH_BUFFER_END;
829 
830 	i915_gem_object_flush_map(batch->obj);
831 	intel_gt_chipset_flush(engine->gt);
832 
833 	rq = igt_request_alloc(ctx, engine);
834 	if (IS_ERR(rq)) {
835 		err = PTR_ERR(rq);
836 		goto err_unpin;
837 	}
838 
839 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
840 		err = engine->emit_init_breadcrumb(rq);
841 		if (err)
842 			goto err_request;
843 	}
844 
845 	/* Perform the writes from an unprivileged "user" batch */
846 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
847 
848 err_request:
849 	i915_request_add(rq);
850 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
851 		err = -EIO;
852 
853 err_unpin:
854 	i915_gem_object_unpin_map(batch->obj);
855 err_batch:
856 	i915_vma_unpin_and_release(&batch, 0);
857 	return err;
858 }
859 
860 struct regmask {
861 	i915_reg_t reg;
862 	unsigned long gen_mask;
863 };
864 
865 static bool find_reg(struct drm_i915_private *i915,
866 		     i915_reg_t reg,
867 		     const struct regmask *tbl,
868 		     unsigned long count)
869 {
870 	u32 offset = i915_mmio_reg_offset(reg);
871 
872 	while (count--) {
873 		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
874 		    i915_mmio_reg_offset(tbl->reg) == offset)
875 			return true;
876 		tbl++;
877 	}
878 
879 	return false;
880 }
881 
882 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
883 {
884 	/* Alas, we must pardon some whitelists. Mistakes already made */
885 	static const struct regmask pardon[] = {
886 		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
887 		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
888 	};
889 
890 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
891 }
892 
893 static bool result_eq(struct intel_engine_cs *engine,
894 		      u32 a, u32 b, i915_reg_t reg)
895 {
896 	if (a != b && !pardon_reg(engine->i915, reg)) {
897 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
898 		       i915_mmio_reg_offset(reg), a, b);
899 		return false;
900 	}
901 
902 	return true;
903 }
904 
905 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
906 {
907 	/* Some registers do not seem to behave and our writes unreadable */
908 	static const struct regmask wo[] = {
909 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
910 	};
911 
912 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
913 }
914 
915 static bool result_neq(struct intel_engine_cs *engine,
916 		       u32 a, u32 b, i915_reg_t reg)
917 {
918 	if (a == b && !writeonly_reg(engine->i915, reg)) {
919 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
920 		       i915_mmio_reg_offset(reg), a);
921 		return false;
922 	}
923 
924 	return true;
925 }
926 
927 static int
928 check_whitelisted_registers(struct intel_engine_cs *engine,
929 			    struct i915_vma *A,
930 			    struct i915_vma *B,
931 			    bool (*fn)(struct intel_engine_cs *engine,
932 				       u32 a, u32 b,
933 				       i915_reg_t reg))
934 {
935 	u32 *a, *b;
936 	int i, err;
937 
938 	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
939 	if (IS_ERR(a))
940 		return PTR_ERR(a);
941 
942 	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
943 	if (IS_ERR(b)) {
944 		err = PTR_ERR(b);
945 		goto err_a;
946 	}
947 
948 	err = 0;
949 	for (i = 0; i < engine->whitelist.count; i++) {
950 		const struct i915_wa *wa = &engine->whitelist.list[i];
951 
952 		if (i915_mmio_reg_offset(wa->reg) &
953 		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
954 			continue;
955 
956 		if (!fn(engine, a[i], b[i], wa->reg))
957 			err = -EINVAL;
958 	}
959 
960 	i915_gem_object_unpin_map(B->obj);
961 err_a:
962 	i915_gem_object_unpin_map(A->obj);
963 	return err;
964 }
965 
966 static int live_isolated_whitelist(void *arg)
967 {
968 	struct drm_i915_private *i915 = arg;
969 	struct {
970 		struct i915_gem_context *ctx;
971 		struct i915_vma *scratch[2];
972 	} client[2] = {};
973 	struct intel_engine_cs *engine;
974 	enum intel_engine_id id;
975 	int i, err = 0;
976 
977 	/*
978 	 * Check that a write into a whitelist register works, but
979 	 * invisible to a second context.
980 	 */
981 
982 	if (!intel_engines_has_context_isolation(i915))
983 		return 0;
984 
985 	if (!i915->kernel_context->vm)
986 		return 0;
987 
988 	for (i = 0; i < ARRAY_SIZE(client); i++) {
989 		struct i915_gem_context *c;
990 
991 		c = kernel_context(i915);
992 		if (IS_ERR(c)) {
993 			err = PTR_ERR(c);
994 			goto err;
995 		}
996 
997 		client[i].scratch[0] = create_scratch(c->vm, 1024);
998 		if (IS_ERR(client[i].scratch[0])) {
999 			err = PTR_ERR(client[i].scratch[0]);
1000 			kernel_context_close(c);
1001 			goto err;
1002 		}
1003 
1004 		client[i].scratch[1] = create_scratch(c->vm, 1024);
1005 		if (IS_ERR(client[i].scratch[1])) {
1006 			err = PTR_ERR(client[i].scratch[1]);
1007 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1008 			kernel_context_close(c);
1009 			goto err;
1010 		}
1011 
1012 		client[i].ctx = c;
1013 	}
1014 
1015 	for_each_engine(engine, i915, id) {
1016 		if (!whitelist_writable_count(engine))
1017 			continue;
1018 
1019 		/* Read default values */
1020 		err = read_whitelisted_registers(client[0].ctx, engine,
1021 						 client[0].scratch[0]);
1022 		if (err)
1023 			goto err;
1024 
1025 		/* Try to overwrite registers (should only affect ctx0) */
1026 		err = scrub_whitelisted_registers(client[0].ctx, engine);
1027 		if (err)
1028 			goto err;
1029 
1030 		/* Read values from ctx1, we expect these to be defaults */
1031 		err = read_whitelisted_registers(client[1].ctx, engine,
1032 						 client[1].scratch[0]);
1033 		if (err)
1034 			goto err;
1035 
1036 		/* Verify that both reads return the same default values */
1037 		err = check_whitelisted_registers(engine,
1038 						  client[0].scratch[0],
1039 						  client[1].scratch[0],
1040 						  result_eq);
1041 		if (err)
1042 			goto err;
1043 
1044 		/* Read back the updated values in ctx0 */
1045 		err = read_whitelisted_registers(client[0].ctx, engine,
1046 						 client[0].scratch[1]);
1047 		if (err)
1048 			goto err;
1049 
1050 		/* User should be granted privilege to overwhite regs */
1051 		err = check_whitelisted_registers(engine,
1052 						  client[0].scratch[0],
1053 						  client[0].scratch[1],
1054 						  result_neq);
1055 		if (err)
1056 			goto err;
1057 	}
1058 
1059 err:
1060 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1061 		if (!client[i].ctx)
1062 			break;
1063 
1064 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1065 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1066 		kernel_context_close(client[i].ctx);
1067 	}
1068 
1069 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
1070 		err = -EIO;
1071 
1072 	return err;
1073 }
1074 
1075 static bool
1076 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1077 		const char *str)
1078 {
1079 	struct drm_i915_private *i915 = ctx->i915;
1080 	struct i915_gem_engines_iter it;
1081 	struct intel_context *ce;
1082 	bool ok = true;
1083 
1084 	ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1085 
1086 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1087 		enum intel_engine_id id = ce->engine->id;
1088 
1089 		ok &= engine_wa_list_verify(ce,
1090 					    &lists->engine[id].wa_list,
1091 					    str) == 0;
1092 
1093 		ok &= engine_wa_list_verify(ce,
1094 					    &lists->engine[id].ctx_wa_list,
1095 					    str) == 0;
1096 	}
1097 	i915_gem_context_unlock_engines(ctx);
1098 
1099 	return ok;
1100 }
1101 
1102 static int
1103 live_gpu_reset_workarounds(void *arg)
1104 {
1105 	struct drm_i915_private *i915 = arg;
1106 	struct i915_gem_context *ctx;
1107 	intel_wakeref_t wakeref;
1108 	struct wa_lists lists;
1109 	bool ok;
1110 
1111 	if (!intel_has_gpu_reset(i915))
1112 		return 0;
1113 
1114 	ctx = kernel_context(i915);
1115 	if (IS_ERR(ctx))
1116 		return PTR_ERR(ctx);
1117 
1118 	pr_info("Verifying after GPU reset...\n");
1119 
1120 	igt_global_reset_lock(&i915->gt);
1121 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1122 
1123 	reference_lists_init(i915, &lists);
1124 
1125 	ok = verify_wa_lists(ctx, &lists, "before reset");
1126 	if (!ok)
1127 		goto out;
1128 
1129 	intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds");
1130 
1131 	ok = verify_wa_lists(ctx, &lists, "after reset");
1132 
1133 out:
1134 	kernel_context_close(ctx);
1135 	reference_lists_fini(i915, &lists);
1136 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1137 	igt_global_reset_unlock(&i915->gt);
1138 
1139 	return ok ? 0 : -ESRCH;
1140 }
1141 
1142 static int
1143 live_engine_reset_workarounds(void *arg)
1144 {
1145 	struct drm_i915_private *i915 = arg;
1146 	struct intel_engine_cs *engine;
1147 	struct i915_gem_context *ctx;
1148 	struct igt_spinner spin;
1149 	enum intel_engine_id id;
1150 	struct i915_request *rq;
1151 	intel_wakeref_t wakeref;
1152 	struct wa_lists lists;
1153 	int ret = 0;
1154 
1155 	if (!intel_has_reset_engine(i915))
1156 		return 0;
1157 
1158 	ctx = kernel_context(i915);
1159 	if (IS_ERR(ctx))
1160 		return PTR_ERR(ctx);
1161 
1162 	igt_global_reset_lock(&i915->gt);
1163 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1164 
1165 	reference_lists_init(i915, &lists);
1166 
1167 	for_each_engine(engine, i915, id) {
1168 		bool ok;
1169 
1170 		pr_info("Verifying after %s reset...\n", engine->name);
1171 
1172 		ok = verify_wa_lists(ctx, &lists, "before reset");
1173 		if (!ok) {
1174 			ret = -ESRCH;
1175 			goto err;
1176 		}
1177 
1178 		intel_engine_reset(engine, "live_workarounds");
1179 
1180 		ok = verify_wa_lists(ctx, &lists, "after idle reset");
1181 		if (!ok) {
1182 			ret = -ESRCH;
1183 			goto err;
1184 		}
1185 
1186 		ret = igt_spinner_init(&spin, i915);
1187 		if (ret)
1188 			goto err;
1189 
1190 		rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
1191 		if (IS_ERR(rq)) {
1192 			ret = PTR_ERR(rq);
1193 			igt_spinner_fini(&spin);
1194 			goto err;
1195 		}
1196 
1197 		i915_request_add(rq);
1198 
1199 		if (!igt_wait_for_spinner(&spin, rq)) {
1200 			pr_err("Spinner failed to start\n");
1201 			igt_spinner_fini(&spin);
1202 			ret = -ETIMEDOUT;
1203 			goto err;
1204 		}
1205 
1206 		intel_engine_reset(engine, "live_workarounds");
1207 
1208 		igt_spinner_end(&spin);
1209 		igt_spinner_fini(&spin);
1210 
1211 		ok = verify_wa_lists(ctx, &lists, "after busy reset");
1212 		if (!ok) {
1213 			ret = -ESRCH;
1214 			goto err;
1215 		}
1216 	}
1217 
1218 err:
1219 	reference_lists_fini(i915, &lists);
1220 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1221 	igt_global_reset_unlock(&i915->gt);
1222 	kernel_context_close(ctx);
1223 
1224 	igt_flush_test(i915, I915_WAIT_LOCKED);
1225 
1226 	return ret;
1227 }
1228 
1229 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1230 {
1231 	static const struct i915_subtest tests[] = {
1232 		SUBTEST(live_dirty_whitelist),
1233 		SUBTEST(live_reset_whitelist),
1234 		SUBTEST(live_isolated_whitelist),
1235 		SUBTEST(live_gpu_reset_workarounds),
1236 		SUBTEST(live_engine_reset_workarounds),
1237 	};
1238 	int err;
1239 
1240 	if (intel_gt_is_wedged(&i915->gt))
1241 		return 0;
1242 
1243 	mutex_lock(&i915->drm.struct_mutex);
1244 	err = i915_subtests(tests, i915);
1245 	mutex_unlock(&i915->drm.struct_mutex);
1246 
1247 	return err;
1248 }
1249