1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
12 
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
17 
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
20 
21 static const struct wo_register {
22 	enum intel_platform platform;
23 	u32 reg;
24 } wo_registers[] = {
25 	{ INTEL_GEMINILAKE, 0x731c }
26 };
27 
28 struct wa_lists {
29 	struct i915_wa_list gt_wa_list;
30 	struct {
31 		struct i915_wa_list wa_list;
32 		struct i915_wa_list ctx_wa_list;
33 	} engine[I915_NUM_ENGINES];
34 };
35 
36 static int request_add_sync(struct i915_request *rq, int err)
37 {
38 	i915_request_get(rq);
39 	i915_request_add(rq);
40 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
41 		err = -EIO;
42 	i915_request_put(rq);
43 
44 	return err;
45 }
46 
47 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
48 {
49 	int err = 0;
50 
51 	i915_request_get(rq);
52 	i915_request_add(rq);
53 	if (spin && !igt_wait_for_spinner(spin, rq))
54 		err = -ETIMEDOUT;
55 	i915_request_put(rq);
56 
57 	return err;
58 }
59 
60 static void
61 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
62 {
63 	struct intel_engine_cs *engine;
64 	enum intel_engine_id id;
65 
66 	memset(lists, 0, sizeof(*lists));
67 
68 	wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
69 	gt_init_workarounds(gt->i915, &lists->gt_wa_list);
70 	wa_init_finish(&lists->gt_wa_list);
71 
72 	for_each_engine(engine, gt, id) {
73 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
74 
75 		wa_init_start(wal, "REF", engine->name);
76 		engine_init_workarounds(engine, wal);
77 		wa_init_finish(wal);
78 
79 		__intel_engine_init_ctx_wa(engine,
80 					   &lists->engine[id].ctx_wa_list,
81 					   "CTX_REF");
82 	}
83 }
84 
85 static void
86 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
87 {
88 	struct intel_engine_cs *engine;
89 	enum intel_engine_id id;
90 
91 	for_each_engine(engine, gt, id)
92 		intel_wa_list_free(&lists->engine[id].wa_list);
93 
94 	intel_wa_list_free(&lists->gt_wa_list);
95 }
96 
97 static struct drm_i915_gem_object *
98 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
99 {
100 	const u32 base = engine->mmio_base;
101 	struct drm_i915_gem_object *result;
102 	struct i915_request *rq;
103 	struct i915_vma *vma;
104 	u32 srm, *cs;
105 	int err;
106 	int i;
107 
108 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
109 	if (IS_ERR(result))
110 		return result;
111 
112 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
113 
114 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
115 	if (IS_ERR(cs)) {
116 		err = PTR_ERR(cs);
117 		goto err_obj;
118 	}
119 	memset(cs, 0xc5, PAGE_SIZE);
120 	i915_gem_object_flush_map(result);
121 	i915_gem_object_unpin_map(result);
122 
123 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
124 	if (IS_ERR(vma)) {
125 		err = PTR_ERR(vma);
126 		goto err_obj;
127 	}
128 
129 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
130 	if (err)
131 		goto err_obj;
132 
133 	rq = igt_request_alloc(ctx, engine);
134 	if (IS_ERR(rq)) {
135 		err = PTR_ERR(rq);
136 		goto err_pin;
137 	}
138 
139 	i915_vma_lock(vma);
140 	err = i915_request_await_object(rq, vma->obj, true);
141 	if (err == 0)
142 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
143 	i915_vma_unlock(vma);
144 	if (err)
145 		goto err_req;
146 
147 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
148 	if (INTEL_GEN(ctx->i915) >= 8)
149 		srm++;
150 
151 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
152 	if (IS_ERR(cs)) {
153 		err = PTR_ERR(cs);
154 		goto err_req;
155 	}
156 
157 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
158 		*cs++ = srm;
159 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
160 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
161 		*cs++ = 0;
162 	}
163 	intel_ring_advance(rq, cs);
164 
165 	i915_request_add(rq);
166 	i915_vma_unpin(vma);
167 
168 	return result;
169 
170 err_req:
171 	i915_request_add(rq);
172 err_pin:
173 	i915_vma_unpin(vma);
174 err_obj:
175 	i915_gem_object_put(result);
176 	return ERR_PTR(err);
177 }
178 
179 static u32
180 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
181 {
182 	i915_reg_t reg = i < engine->whitelist.count ?
183 			 engine->whitelist.list[i].reg :
184 			 RING_NOPID(engine->mmio_base);
185 
186 	return i915_mmio_reg_offset(reg);
187 }
188 
189 static void
190 print_results(const struct intel_engine_cs *engine, const u32 *results)
191 {
192 	unsigned int i;
193 
194 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
195 		u32 expected = get_whitelist_reg(engine, i);
196 		u32 actual = results[i];
197 
198 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
199 			i, expected, actual);
200 	}
201 }
202 
203 static int check_whitelist(struct i915_gem_context *ctx,
204 			   struct intel_engine_cs *engine)
205 {
206 	struct drm_i915_gem_object *results;
207 	struct intel_wedge_me wedge;
208 	u32 *vaddr;
209 	int err;
210 	int i;
211 
212 	results = read_nonprivs(ctx, engine);
213 	if (IS_ERR(results))
214 		return PTR_ERR(results);
215 
216 	err = 0;
217 	i915_gem_object_lock(results);
218 	intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
219 		err = i915_gem_object_set_to_cpu_domain(results, false);
220 	i915_gem_object_unlock(results);
221 	if (intel_gt_is_wedged(engine->gt))
222 		err = -EIO;
223 	if (err)
224 		goto out_put;
225 
226 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
227 	if (IS_ERR(vaddr)) {
228 		err = PTR_ERR(vaddr);
229 		goto out_put;
230 	}
231 
232 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
233 		u32 expected = get_whitelist_reg(engine, i);
234 		u32 actual = vaddr[i];
235 
236 		if (expected != actual) {
237 			print_results(engine, vaddr);
238 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
239 			       i, expected, actual);
240 
241 			err = -EINVAL;
242 			break;
243 		}
244 	}
245 
246 	i915_gem_object_unpin_map(results);
247 out_put:
248 	i915_gem_object_put(results);
249 	return err;
250 }
251 
252 static int do_device_reset(struct intel_engine_cs *engine)
253 {
254 	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
255 	return 0;
256 }
257 
258 static int do_engine_reset(struct intel_engine_cs *engine)
259 {
260 	return intel_engine_reset(engine, "live_workarounds");
261 }
262 
263 static int
264 switch_to_scratch_context(struct intel_engine_cs *engine,
265 			  struct igt_spinner *spin)
266 {
267 	struct intel_context *ce;
268 	struct i915_request *rq;
269 	int err = 0;
270 
271 	ce = intel_context_create(engine);
272 	if (IS_ERR(ce))
273 		return PTR_ERR(ce);
274 
275 	rq = igt_spinner_create_request(spin, ce, MI_NOOP);
276 	intel_context_put(ce);
277 
278 	if (IS_ERR(rq)) {
279 		spin = NULL;
280 		err = PTR_ERR(rq);
281 		goto err;
282 	}
283 
284 	err = request_add_spin(rq, spin);
285 err:
286 	if (err && spin)
287 		igt_spinner_end(spin);
288 
289 	return err;
290 }
291 
292 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
293 					int (*reset)(struct intel_engine_cs *),
294 					const char *name)
295 {
296 	struct drm_i915_private *i915 = engine->i915;
297 	struct i915_gem_context *ctx, *tmp;
298 	struct igt_spinner spin;
299 	intel_wakeref_t wakeref;
300 	int err;
301 
302 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
303 		engine->whitelist.count, engine->name, name);
304 
305 	ctx = kernel_context(i915);
306 	if (IS_ERR(ctx))
307 		return PTR_ERR(ctx);
308 
309 	err = igt_spinner_init(&spin, engine->gt);
310 	if (err)
311 		goto out_ctx;
312 
313 	err = check_whitelist(ctx, engine);
314 	if (err) {
315 		pr_err("Invalid whitelist *before* %s reset!\n", name);
316 		goto out_spin;
317 	}
318 
319 	err = switch_to_scratch_context(engine, &spin);
320 	if (err)
321 		goto out_spin;
322 
323 	with_intel_runtime_pm(engine->uncore->rpm, wakeref)
324 		err = reset(engine);
325 
326 	igt_spinner_end(&spin);
327 
328 	if (err) {
329 		pr_err("%s reset failed\n", name);
330 		goto out_spin;
331 	}
332 
333 	err = check_whitelist(ctx, engine);
334 	if (err) {
335 		pr_err("Whitelist not preserved in context across %s reset!\n",
336 		       name);
337 		goto out_spin;
338 	}
339 
340 	tmp = kernel_context(i915);
341 	if (IS_ERR(tmp)) {
342 		err = PTR_ERR(tmp);
343 		goto out_spin;
344 	}
345 	kernel_context_close(ctx);
346 	ctx = tmp;
347 
348 	err = check_whitelist(ctx, engine);
349 	if (err) {
350 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
351 		       name);
352 		goto out_spin;
353 	}
354 
355 out_spin:
356 	igt_spinner_fini(&spin);
357 out_ctx:
358 	kernel_context_close(ctx);
359 	return err;
360 }
361 
362 static struct i915_vma *create_batch(struct i915_address_space *vm)
363 {
364 	struct drm_i915_gem_object *obj;
365 	struct i915_vma *vma;
366 	int err;
367 
368 	obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
369 	if (IS_ERR(obj))
370 		return ERR_CAST(obj);
371 
372 	vma = i915_vma_instance(obj, vm, NULL);
373 	if (IS_ERR(vma)) {
374 		err = PTR_ERR(vma);
375 		goto err_obj;
376 	}
377 
378 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
379 	if (err)
380 		goto err_obj;
381 
382 	return vma;
383 
384 err_obj:
385 	i915_gem_object_put(obj);
386 	return ERR_PTR(err);
387 }
388 
389 static u32 reg_write(u32 old, u32 new, u32 rsvd)
390 {
391 	if (rsvd == 0x0000ffff) {
392 		old &= ~(new >> 16);
393 		old |= new & (new >> 16);
394 	} else {
395 		old &= ~rsvd;
396 		old |= new & rsvd;
397 	}
398 
399 	return old;
400 }
401 
402 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
403 {
404 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
405 	int i;
406 
407 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
408 	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
409 		return true;
410 
411 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
412 		if (wo_registers[i].platform == platform &&
413 		    wo_registers[i].reg == reg)
414 			return true;
415 	}
416 
417 	return false;
418 }
419 
420 static bool ro_register(u32 reg)
421 {
422 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
423 	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
424 		return true;
425 
426 	return false;
427 }
428 
429 static int whitelist_writable_count(struct intel_engine_cs *engine)
430 {
431 	int count = engine->whitelist.count;
432 	int i;
433 
434 	for (i = 0; i < engine->whitelist.count; i++) {
435 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
436 
437 		if (ro_register(reg))
438 			count--;
439 	}
440 
441 	return count;
442 }
443 
444 static int check_dirty_whitelist(struct intel_context *ce)
445 {
446 	const u32 values[] = {
447 		0x00000000,
448 		0x01010101,
449 		0x10100101,
450 		0x03030303,
451 		0x30300303,
452 		0x05050505,
453 		0x50500505,
454 		0x0f0f0f0f,
455 		0xf00ff00f,
456 		0x10101010,
457 		0xf0f01010,
458 		0x30303030,
459 		0xa0a03030,
460 		0x50505050,
461 		0xc0c05050,
462 		0xf0f0f0f0,
463 		0x11111111,
464 		0x33333333,
465 		0x55555555,
466 		0x0000ffff,
467 		0x00ff00ff,
468 		0xff0000ff,
469 		0xffff00ff,
470 		0xffffffff,
471 	};
472 	struct intel_engine_cs *engine = ce->engine;
473 	struct i915_vma *scratch;
474 	struct i915_vma *batch;
475 	int err = 0, i, v;
476 	u32 *cs, *results;
477 
478 	scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
479 	if (IS_ERR(scratch))
480 		return PTR_ERR(scratch);
481 
482 	batch = create_batch(ce->vm);
483 	if (IS_ERR(batch)) {
484 		err = PTR_ERR(batch);
485 		goto out_scratch;
486 	}
487 
488 	for (i = 0; i < engine->whitelist.count; i++) {
489 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
490 		u64 addr = scratch->node.start;
491 		struct i915_request *rq;
492 		u32 srm, lrm, rsvd;
493 		u32 expect;
494 		int idx;
495 		bool ro_reg;
496 
497 		if (wo_register(engine, reg))
498 			continue;
499 
500 		ro_reg = ro_register(reg);
501 
502 		/* Clear non priv flags */
503 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
504 
505 		srm = MI_STORE_REGISTER_MEM;
506 		lrm = MI_LOAD_REGISTER_MEM;
507 		if (INTEL_GEN(engine->i915) >= 8)
508 			lrm++, srm++;
509 
510 		pr_debug("%s: Writing garbage to %x\n",
511 			 engine->name, reg);
512 
513 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
514 		if (IS_ERR(cs)) {
515 			err = PTR_ERR(cs);
516 			goto out_batch;
517 		}
518 
519 		/* SRM original */
520 		*cs++ = srm;
521 		*cs++ = reg;
522 		*cs++ = lower_32_bits(addr);
523 		*cs++ = upper_32_bits(addr);
524 
525 		idx = 1;
526 		for (v = 0; v < ARRAY_SIZE(values); v++) {
527 			/* LRI garbage */
528 			*cs++ = MI_LOAD_REGISTER_IMM(1);
529 			*cs++ = reg;
530 			*cs++ = values[v];
531 
532 			/* SRM result */
533 			*cs++ = srm;
534 			*cs++ = reg;
535 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
536 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
537 			idx++;
538 		}
539 		for (v = 0; v < ARRAY_SIZE(values); v++) {
540 			/* LRI garbage */
541 			*cs++ = MI_LOAD_REGISTER_IMM(1);
542 			*cs++ = reg;
543 			*cs++ = ~values[v];
544 
545 			/* SRM result */
546 			*cs++ = srm;
547 			*cs++ = reg;
548 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
549 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
550 			idx++;
551 		}
552 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
553 
554 		/* LRM original -- don't leave garbage in the context! */
555 		*cs++ = lrm;
556 		*cs++ = reg;
557 		*cs++ = lower_32_bits(addr);
558 		*cs++ = upper_32_bits(addr);
559 
560 		*cs++ = MI_BATCH_BUFFER_END;
561 
562 		i915_gem_object_flush_map(batch->obj);
563 		i915_gem_object_unpin_map(batch->obj);
564 		intel_gt_chipset_flush(engine->gt);
565 
566 		rq = intel_context_create_request(ce);
567 		if (IS_ERR(rq)) {
568 			err = PTR_ERR(rq);
569 			goto out_batch;
570 		}
571 
572 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
573 			err = engine->emit_init_breadcrumb(rq);
574 			if (err)
575 				goto err_request;
576 		}
577 
578 		i915_vma_lock(batch);
579 		err = i915_request_await_object(rq, batch->obj, false);
580 		if (err == 0)
581 			err = i915_vma_move_to_active(batch, rq, 0);
582 		i915_vma_unlock(batch);
583 		if (err)
584 			goto err_request;
585 
586 		i915_vma_lock(scratch);
587 		err = i915_request_await_object(rq, scratch->obj, true);
588 		if (err == 0)
589 			err = i915_vma_move_to_active(scratch, rq,
590 						      EXEC_OBJECT_WRITE);
591 		i915_vma_unlock(scratch);
592 		if (err)
593 			goto err_request;
594 
595 		err = engine->emit_bb_start(rq,
596 					    batch->node.start, PAGE_SIZE,
597 					    0);
598 		if (err)
599 			goto err_request;
600 
601 err_request:
602 		err = request_add_sync(rq, err);
603 		if (err) {
604 			pr_err("%s: Futzing %x timedout; cancelling test\n",
605 			       engine->name, reg);
606 			intel_gt_set_wedged(engine->gt);
607 			goto out_batch;
608 		}
609 
610 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
611 		if (IS_ERR(results)) {
612 			err = PTR_ERR(results);
613 			goto out_batch;
614 		}
615 
616 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
617 		if (!ro_reg) {
618 			/* detect write masking */
619 			rsvd = results[ARRAY_SIZE(values)];
620 			if (!rsvd) {
621 				pr_err("%s: Unable to write to whitelisted register %x\n",
622 				       engine->name, reg);
623 				err = -EINVAL;
624 				goto out_unpin;
625 			}
626 		} else {
627 			rsvd = 0;
628 		}
629 
630 		expect = results[0];
631 		idx = 1;
632 		for (v = 0; v < ARRAY_SIZE(values); v++) {
633 			if (ro_reg)
634 				expect = results[0];
635 			else
636 				expect = reg_write(expect, values[v], rsvd);
637 
638 			if (results[idx] != expect)
639 				err++;
640 			idx++;
641 		}
642 		for (v = 0; v < ARRAY_SIZE(values); v++) {
643 			if (ro_reg)
644 				expect = results[0];
645 			else
646 				expect = reg_write(expect, ~values[v], rsvd);
647 
648 			if (results[idx] != expect)
649 				err++;
650 			idx++;
651 		}
652 		if (err) {
653 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
654 			       engine->name, err, reg);
655 
656 			if (ro_reg)
657 				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
658 					engine->name, reg, results[0]);
659 			else
660 				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
661 					engine->name, reg, results[0], rsvd);
662 
663 			expect = results[0];
664 			idx = 1;
665 			for (v = 0; v < ARRAY_SIZE(values); v++) {
666 				u32 w = values[v];
667 
668 				if (ro_reg)
669 					expect = results[0];
670 				else
671 					expect = reg_write(expect, w, rsvd);
672 				pr_info("Wrote %08x, read %08x, expect %08x\n",
673 					w, results[idx], expect);
674 				idx++;
675 			}
676 			for (v = 0; v < ARRAY_SIZE(values); v++) {
677 				u32 w = ~values[v];
678 
679 				if (ro_reg)
680 					expect = results[0];
681 				else
682 					expect = reg_write(expect, w, rsvd);
683 				pr_info("Wrote %08x, read %08x, expect %08x\n",
684 					w, results[idx], expect);
685 				idx++;
686 			}
687 
688 			err = -EINVAL;
689 		}
690 out_unpin:
691 		i915_gem_object_unpin_map(scratch->obj);
692 		if (err)
693 			break;
694 	}
695 
696 	if (igt_flush_test(engine->i915))
697 		err = -EIO;
698 out_batch:
699 	i915_vma_unpin_and_release(&batch, 0);
700 out_scratch:
701 	i915_vma_unpin_and_release(&scratch, 0);
702 	return err;
703 }
704 
705 static int live_dirty_whitelist(void *arg)
706 {
707 	struct intel_gt *gt = arg;
708 	struct intel_engine_cs *engine;
709 	enum intel_engine_id id;
710 
711 	/* Can the user write to the whitelisted registers? */
712 
713 	if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
714 		return 0;
715 
716 	for_each_engine(engine, gt, id) {
717 		struct intel_context *ce;
718 		int err;
719 
720 		if (engine->whitelist.count == 0)
721 			continue;
722 
723 		ce = intel_context_create(engine);
724 		if (IS_ERR(ce))
725 			return PTR_ERR(ce);
726 
727 		err = check_dirty_whitelist(ce);
728 		intel_context_put(ce);
729 		if (err)
730 			return err;
731 	}
732 
733 	return 0;
734 }
735 
736 static int live_reset_whitelist(void *arg)
737 {
738 	struct intel_gt *gt = arg;
739 	struct intel_engine_cs *engine;
740 	enum intel_engine_id id;
741 	int err = 0;
742 
743 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
744 	igt_global_reset_lock(gt);
745 
746 	for_each_engine(engine, gt, id) {
747 		if (engine->whitelist.count == 0)
748 			continue;
749 
750 		if (intel_has_reset_engine(gt)) {
751 			err = check_whitelist_across_reset(engine,
752 							   do_engine_reset,
753 							   "engine");
754 			if (err)
755 				goto out;
756 		}
757 
758 		if (intel_has_gpu_reset(gt)) {
759 			err = check_whitelist_across_reset(engine,
760 							   do_device_reset,
761 							   "device");
762 			if (err)
763 				goto out;
764 		}
765 	}
766 
767 out:
768 	igt_global_reset_unlock(gt);
769 	return err;
770 }
771 
772 static int read_whitelisted_registers(struct i915_gem_context *ctx,
773 				      struct intel_engine_cs *engine,
774 				      struct i915_vma *results)
775 {
776 	struct i915_request *rq;
777 	int i, err = 0;
778 	u32 srm, *cs;
779 
780 	rq = igt_request_alloc(ctx, engine);
781 	if (IS_ERR(rq))
782 		return PTR_ERR(rq);
783 
784 	i915_vma_lock(results);
785 	err = i915_request_await_object(rq, results->obj, true);
786 	if (err == 0)
787 		err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
788 	i915_vma_unlock(results);
789 	if (err)
790 		goto err_req;
791 
792 	srm = MI_STORE_REGISTER_MEM;
793 	if (INTEL_GEN(ctx->i915) >= 8)
794 		srm++;
795 
796 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
797 	if (IS_ERR(cs)) {
798 		err = PTR_ERR(cs);
799 		goto err_req;
800 	}
801 
802 	for (i = 0; i < engine->whitelist.count; i++) {
803 		u64 offset = results->node.start + sizeof(u32) * i;
804 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
805 
806 		/* Clear non priv flags */
807 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
808 
809 		*cs++ = srm;
810 		*cs++ = reg;
811 		*cs++ = lower_32_bits(offset);
812 		*cs++ = upper_32_bits(offset);
813 	}
814 	intel_ring_advance(rq, cs);
815 
816 err_req:
817 	return request_add_sync(rq, err);
818 }
819 
820 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
821 				       struct intel_engine_cs *engine)
822 {
823 	struct i915_address_space *vm;
824 	struct i915_request *rq;
825 	struct i915_vma *batch;
826 	int i, err = 0;
827 	u32 *cs;
828 
829 	vm = i915_gem_context_get_vm_rcu(ctx);
830 	batch = create_batch(vm);
831 	i915_vm_put(vm);
832 	if (IS_ERR(batch))
833 		return PTR_ERR(batch);
834 
835 	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
836 	if (IS_ERR(cs)) {
837 		err = PTR_ERR(cs);
838 		goto err_batch;
839 	}
840 
841 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
842 	for (i = 0; i < engine->whitelist.count; i++) {
843 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
844 
845 		if (ro_register(reg))
846 			continue;
847 
848 		/* Clear non priv flags */
849 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
850 
851 		*cs++ = reg;
852 		*cs++ = 0xffffffff;
853 	}
854 	*cs++ = MI_BATCH_BUFFER_END;
855 
856 	i915_gem_object_flush_map(batch->obj);
857 	intel_gt_chipset_flush(engine->gt);
858 
859 	rq = igt_request_alloc(ctx, engine);
860 	if (IS_ERR(rq)) {
861 		err = PTR_ERR(rq);
862 		goto err_unpin;
863 	}
864 
865 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
866 		err = engine->emit_init_breadcrumb(rq);
867 		if (err)
868 			goto err_request;
869 	}
870 
871 	i915_vma_lock(batch);
872 	err = i915_request_await_object(rq, batch->obj, false);
873 	if (err == 0)
874 		err = i915_vma_move_to_active(batch, rq, 0);
875 	i915_vma_unlock(batch);
876 	if (err)
877 		goto err_request;
878 
879 	/* Perform the writes from an unprivileged "user" batch */
880 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
881 
882 err_request:
883 	err = request_add_sync(rq, err);
884 
885 err_unpin:
886 	i915_gem_object_unpin_map(batch->obj);
887 err_batch:
888 	i915_vma_unpin_and_release(&batch, 0);
889 	return err;
890 }
891 
892 struct regmask {
893 	i915_reg_t reg;
894 	unsigned long gen_mask;
895 };
896 
897 static bool find_reg(struct drm_i915_private *i915,
898 		     i915_reg_t reg,
899 		     const struct regmask *tbl,
900 		     unsigned long count)
901 {
902 	u32 offset = i915_mmio_reg_offset(reg);
903 
904 	while (count--) {
905 		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
906 		    i915_mmio_reg_offset(tbl->reg) == offset)
907 			return true;
908 		tbl++;
909 	}
910 
911 	return false;
912 }
913 
914 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
915 {
916 	/* Alas, we must pardon some whitelists. Mistakes already made */
917 	static const struct regmask pardon[] = {
918 		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
919 		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
920 	};
921 
922 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
923 }
924 
925 static bool result_eq(struct intel_engine_cs *engine,
926 		      u32 a, u32 b, i915_reg_t reg)
927 {
928 	if (a != b && !pardon_reg(engine->i915, reg)) {
929 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
930 		       i915_mmio_reg_offset(reg), a, b);
931 		return false;
932 	}
933 
934 	return true;
935 }
936 
937 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
938 {
939 	/* Some registers do not seem to behave and our writes unreadable */
940 	static const struct regmask wo[] = {
941 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
942 	};
943 
944 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
945 }
946 
947 static bool result_neq(struct intel_engine_cs *engine,
948 		       u32 a, u32 b, i915_reg_t reg)
949 {
950 	if (a == b && !writeonly_reg(engine->i915, reg)) {
951 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
952 		       i915_mmio_reg_offset(reg), a);
953 		return false;
954 	}
955 
956 	return true;
957 }
958 
959 static int
960 check_whitelisted_registers(struct intel_engine_cs *engine,
961 			    struct i915_vma *A,
962 			    struct i915_vma *B,
963 			    bool (*fn)(struct intel_engine_cs *engine,
964 				       u32 a, u32 b,
965 				       i915_reg_t reg))
966 {
967 	u32 *a, *b;
968 	int i, err;
969 
970 	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
971 	if (IS_ERR(a))
972 		return PTR_ERR(a);
973 
974 	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
975 	if (IS_ERR(b)) {
976 		err = PTR_ERR(b);
977 		goto err_a;
978 	}
979 
980 	err = 0;
981 	for (i = 0; i < engine->whitelist.count; i++) {
982 		const struct i915_wa *wa = &engine->whitelist.list[i];
983 
984 		if (i915_mmio_reg_offset(wa->reg) &
985 		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
986 			continue;
987 
988 		if (!fn(engine, a[i], b[i], wa->reg))
989 			err = -EINVAL;
990 	}
991 
992 	i915_gem_object_unpin_map(B->obj);
993 err_a:
994 	i915_gem_object_unpin_map(A->obj);
995 	return err;
996 }
997 
998 static int live_isolated_whitelist(void *arg)
999 {
1000 	struct intel_gt *gt = arg;
1001 	struct {
1002 		struct i915_gem_context *ctx;
1003 		struct i915_vma *scratch[2];
1004 	} client[2] = {};
1005 	struct intel_engine_cs *engine;
1006 	enum intel_engine_id id;
1007 	int i, err = 0;
1008 
1009 	/*
1010 	 * Check that a write into a whitelist register works, but
1011 	 * invisible to a second context.
1012 	 */
1013 
1014 	if (!intel_engines_has_context_isolation(gt->i915))
1015 		return 0;
1016 
1017 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1018 		struct i915_address_space *vm;
1019 		struct i915_gem_context *c;
1020 
1021 		c = kernel_context(gt->i915);
1022 		if (IS_ERR(c)) {
1023 			err = PTR_ERR(c);
1024 			goto err;
1025 		}
1026 
1027 		vm = i915_gem_context_get_vm_rcu(c);
1028 
1029 		client[i].scratch[0] = create_scratch(vm, 1024);
1030 		if (IS_ERR(client[i].scratch[0])) {
1031 			err = PTR_ERR(client[i].scratch[0]);
1032 			i915_vm_put(vm);
1033 			kernel_context_close(c);
1034 			goto err;
1035 		}
1036 
1037 		client[i].scratch[1] = create_scratch(vm, 1024);
1038 		if (IS_ERR(client[i].scratch[1])) {
1039 			err = PTR_ERR(client[i].scratch[1]);
1040 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1041 			i915_vm_put(vm);
1042 			kernel_context_close(c);
1043 			goto err;
1044 		}
1045 
1046 		client[i].ctx = c;
1047 		i915_vm_put(vm);
1048 	}
1049 
1050 	for_each_engine(engine, gt, id) {
1051 		if (!engine->kernel_context->vm)
1052 			continue;
1053 
1054 		if (!whitelist_writable_count(engine))
1055 			continue;
1056 
1057 		/* Read default values */
1058 		err = read_whitelisted_registers(client[0].ctx, engine,
1059 						 client[0].scratch[0]);
1060 		if (err)
1061 			goto err;
1062 
1063 		/* Try to overwrite registers (should only affect ctx0) */
1064 		err = scrub_whitelisted_registers(client[0].ctx, engine);
1065 		if (err)
1066 			goto err;
1067 
1068 		/* Read values from ctx1, we expect these to be defaults */
1069 		err = read_whitelisted_registers(client[1].ctx, engine,
1070 						 client[1].scratch[0]);
1071 		if (err)
1072 			goto err;
1073 
1074 		/* Verify that both reads return the same default values */
1075 		err = check_whitelisted_registers(engine,
1076 						  client[0].scratch[0],
1077 						  client[1].scratch[0],
1078 						  result_eq);
1079 		if (err)
1080 			goto err;
1081 
1082 		/* Read back the updated values in ctx0 */
1083 		err = read_whitelisted_registers(client[0].ctx, engine,
1084 						 client[0].scratch[1]);
1085 		if (err)
1086 			goto err;
1087 
1088 		/* User should be granted privilege to overwhite regs */
1089 		err = check_whitelisted_registers(engine,
1090 						  client[0].scratch[0],
1091 						  client[0].scratch[1],
1092 						  result_neq);
1093 		if (err)
1094 			goto err;
1095 	}
1096 
1097 err:
1098 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1099 		if (!client[i].ctx)
1100 			break;
1101 
1102 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1103 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1104 		kernel_context_close(client[i].ctx);
1105 	}
1106 
1107 	if (igt_flush_test(gt->i915))
1108 		err = -EIO;
1109 
1110 	return err;
1111 }
1112 
1113 static bool
1114 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1115 		const char *str)
1116 {
1117 	struct drm_i915_private *i915 = ctx->i915;
1118 	struct i915_gem_engines_iter it;
1119 	struct intel_context *ce;
1120 	bool ok = true;
1121 
1122 	ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1123 
1124 	for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
1125 		enum intel_engine_id id = ce->engine->id;
1126 
1127 		ok &= engine_wa_list_verify(ce,
1128 					    &lists->engine[id].wa_list,
1129 					    str) == 0;
1130 
1131 		ok &= engine_wa_list_verify(ce,
1132 					    &lists->engine[id].ctx_wa_list,
1133 					    str) == 0;
1134 	}
1135 
1136 	return ok;
1137 }
1138 
1139 static int
1140 live_gpu_reset_workarounds(void *arg)
1141 {
1142 	struct intel_gt *gt = arg;
1143 	struct i915_gem_context *ctx;
1144 	intel_wakeref_t wakeref;
1145 	struct wa_lists lists;
1146 	bool ok;
1147 
1148 	if (!intel_has_gpu_reset(gt))
1149 		return 0;
1150 
1151 	ctx = kernel_context(gt->i915);
1152 	if (IS_ERR(ctx))
1153 		return PTR_ERR(ctx);
1154 
1155 	i915_gem_context_lock_engines(ctx);
1156 
1157 	pr_info("Verifying after GPU reset...\n");
1158 
1159 	igt_global_reset_lock(gt);
1160 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1161 
1162 	reference_lists_init(gt, &lists);
1163 
1164 	ok = verify_wa_lists(ctx, &lists, "before reset");
1165 	if (!ok)
1166 		goto out;
1167 
1168 	intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1169 
1170 	ok = verify_wa_lists(ctx, &lists, "after reset");
1171 
1172 out:
1173 	i915_gem_context_unlock_engines(ctx);
1174 	kernel_context_close(ctx);
1175 	reference_lists_fini(gt, &lists);
1176 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1177 	igt_global_reset_unlock(gt);
1178 
1179 	return ok ? 0 : -ESRCH;
1180 }
1181 
1182 static int
1183 live_engine_reset_workarounds(void *arg)
1184 {
1185 	struct intel_gt *gt = arg;
1186 	struct i915_gem_engines_iter it;
1187 	struct i915_gem_context *ctx;
1188 	struct intel_context *ce;
1189 	struct igt_spinner spin;
1190 	struct i915_request *rq;
1191 	intel_wakeref_t wakeref;
1192 	struct wa_lists lists;
1193 	int ret = 0;
1194 
1195 	if (!intel_has_reset_engine(gt))
1196 		return 0;
1197 
1198 	ctx = kernel_context(gt->i915);
1199 	if (IS_ERR(ctx))
1200 		return PTR_ERR(ctx);
1201 
1202 	igt_global_reset_lock(gt);
1203 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1204 
1205 	reference_lists_init(gt, &lists);
1206 
1207 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1208 		struct intel_engine_cs *engine = ce->engine;
1209 		bool ok;
1210 
1211 		pr_info("Verifying after %s reset...\n", engine->name);
1212 
1213 		ok = verify_wa_lists(ctx, &lists, "before reset");
1214 		if (!ok) {
1215 			ret = -ESRCH;
1216 			goto err;
1217 		}
1218 
1219 		intel_engine_reset(engine, "live_workarounds");
1220 
1221 		ok = verify_wa_lists(ctx, &lists, "after idle reset");
1222 		if (!ok) {
1223 			ret = -ESRCH;
1224 			goto err;
1225 		}
1226 
1227 		ret = igt_spinner_init(&spin, engine->gt);
1228 		if (ret)
1229 			goto err;
1230 
1231 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1232 		if (IS_ERR(rq)) {
1233 			ret = PTR_ERR(rq);
1234 			igt_spinner_fini(&spin);
1235 			goto err;
1236 		}
1237 
1238 		ret = request_add_spin(rq, &spin);
1239 		if (ret) {
1240 			pr_err("Spinner failed to start\n");
1241 			igt_spinner_fini(&spin);
1242 			goto err;
1243 		}
1244 
1245 		intel_engine_reset(engine, "live_workarounds");
1246 
1247 		igt_spinner_end(&spin);
1248 		igt_spinner_fini(&spin);
1249 
1250 		ok = verify_wa_lists(ctx, &lists, "after busy reset");
1251 		if (!ok) {
1252 			ret = -ESRCH;
1253 			goto err;
1254 		}
1255 	}
1256 err:
1257 	i915_gem_context_unlock_engines(ctx);
1258 	reference_lists_fini(gt, &lists);
1259 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1260 	igt_global_reset_unlock(gt);
1261 	kernel_context_close(ctx);
1262 
1263 	igt_flush_test(gt->i915);
1264 
1265 	return ret;
1266 }
1267 
1268 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1269 {
1270 	static const struct i915_subtest tests[] = {
1271 		SUBTEST(live_dirty_whitelist),
1272 		SUBTEST(live_reset_whitelist),
1273 		SUBTEST(live_isolated_whitelist),
1274 		SUBTEST(live_gpu_reset_workarounds),
1275 		SUBTEST(live_engine_reset_workarounds),
1276 	};
1277 
1278 	if (intel_gt_is_wedged(&i915->gt))
1279 		return 0;
1280 
1281 	return intel_gt_live_subtests(tests, &i915->gt);
1282 }
1283