1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
12 
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
17 
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
20 
21 static const struct wo_register {
22 	enum intel_platform platform;
23 	u32 reg;
24 } wo_registers[] = {
25 	{ INTEL_GEMINILAKE, 0x731c }
26 };
27 
28 struct wa_lists {
29 	struct i915_wa_list gt_wa_list;
30 	struct {
31 		struct i915_wa_list wa_list;
32 		struct i915_wa_list ctx_wa_list;
33 	} engine[I915_NUM_ENGINES];
34 };
35 
36 static int request_add_sync(struct i915_request *rq, int err)
37 {
38 	i915_request_get(rq);
39 	i915_request_add(rq);
40 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
41 		err = -EIO;
42 	i915_request_put(rq);
43 
44 	return err;
45 }
46 
47 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
48 {
49 	int err = 0;
50 
51 	i915_request_get(rq);
52 	i915_request_add(rq);
53 	if (spin && !igt_wait_for_spinner(spin, rq))
54 		err = -ETIMEDOUT;
55 	i915_request_put(rq);
56 
57 	return err;
58 }
59 
60 static void
61 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
62 {
63 	struct intel_engine_cs *engine;
64 	enum intel_engine_id id;
65 
66 	memset(lists, 0, sizeof(*lists));
67 
68 	wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
69 	gt_init_workarounds(gt->i915, &lists->gt_wa_list);
70 	wa_init_finish(&lists->gt_wa_list);
71 
72 	for_each_engine(engine, gt, id) {
73 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
74 
75 		wa_init_start(wal, "REF", engine->name);
76 		engine_init_workarounds(engine, wal);
77 		wa_init_finish(wal);
78 
79 		__intel_engine_init_ctx_wa(engine,
80 					   &lists->engine[id].ctx_wa_list,
81 					   "CTX_REF");
82 	}
83 }
84 
85 static void
86 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
87 {
88 	struct intel_engine_cs *engine;
89 	enum intel_engine_id id;
90 
91 	for_each_engine(engine, gt, id)
92 		intel_wa_list_free(&lists->engine[id].wa_list);
93 
94 	intel_wa_list_free(&lists->gt_wa_list);
95 }
96 
97 static struct drm_i915_gem_object *
98 read_nonprivs(struct intel_context *ce)
99 {
100 	struct intel_engine_cs *engine = ce->engine;
101 	const u32 base = engine->mmio_base;
102 	struct drm_i915_gem_object *result;
103 	struct i915_request *rq;
104 	struct i915_vma *vma;
105 	u32 srm, *cs;
106 	int err;
107 	int i;
108 
109 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
110 	if (IS_ERR(result))
111 		return result;
112 
113 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
114 
115 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
116 	if (IS_ERR(cs)) {
117 		err = PTR_ERR(cs);
118 		goto err_obj;
119 	}
120 	memset(cs, 0xc5, PAGE_SIZE);
121 	i915_gem_object_flush_map(result);
122 	i915_gem_object_unpin_map(result);
123 
124 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
125 	if (IS_ERR(vma)) {
126 		err = PTR_ERR(vma);
127 		goto err_obj;
128 	}
129 
130 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
131 	if (err)
132 		goto err_obj;
133 
134 	rq = intel_context_create_request(ce);
135 	if (IS_ERR(rq)) {
136 		err = PTR_ERR(rq);
137 		goto err_pin;
138 	}
139 
140 	i915_vma_lock(vma);
141 	err = i915_request_await_object(rq, vma->obj, true);
142 	if (err == 0)
143 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
144 	i915_vma_unlock(vma);
145 	if (err)
146 		goto err_req;
147 
148 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
149 	if (INTEL_GEN(engine->i915) >= 8)
150 		srm++;
151 
152 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
153 	if (IS_ERR(cs)) {
154 		err = PTR_ERR(cs);
155 		goto err_req;
156 	}
157 
158 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
159 		*cs++ = srm;
160 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
161 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
162 		*cs++ = 0;
163 	}
164 	intel_ring_advance(rq, cs);
165 
166 	i915_request_add(rq);
167 	i915_vma_unpin(vma);
168 
169 	return result;
170 
171 err_req:
172 	i915_request_add(rq);
173 err_pin:
174 	i915_vma_unpin(vma);
175 err_obj:
176 	i915_gem_object_put(result);
177 	return ERR_PTR(err);
178 }
179 
180 static u32
181 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
182 {
183 	i915_reg_t reg = i < engine->whitelist.count ?
184 			 engine->whitelist.list[i].reg :
185 			 RING_NOPID(engine->mmio_base);
186 
187 	return i915_mmio_reg_offset(reg);
188 }
189 
190 static void
191 print_results(const struct intel_engine_cs *engine, const u32 *results)
192 {
193 	unsigned int i;
194 
195 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
196 		u32 expected = get_whitelist_reg(engine, i);
197 		u32 actual = results[i];
198 
199 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
200 			i, expected, actual);
201 	}
202 }
203 
204 static int check_whitelist(struct intel_context *ce)
205 {
206 	struct intel_engine_cs *engine = ce->engine;
207 	struct drm_i915_gem_object *results;
208 	struct intel_wedge_me wedge;
209 	u32 *vaddr;
210 	int err;
211 	int i;
212 
213 	results = read_nonprivs(ce);
214 	if (IS_ERR(results))
215 		return PTR_ERR(results);
216 
217 	err = 0;
218 	i915_gem_object_lock(results, NULL);
219 	intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
220 		err = i915_gem_object_set_to_cpu_domain(results, false);
221 	i915_gem_object_unlock(results);
222 	if (intel_gt_is_wedged(engine->gt))
223 		err = -EIO;
224 	if (err)
225 		goto out_put;
226 
227 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
228 	if (IS_ERR(vaddr)) {
229 		err = PTR_ERR(vaddr);
230 		goto out_put;
231 	}
232 
233 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
234 		u32 expected = get_whitelist_reg(engine, i);
235 		u32 actual = vaddr[i];
236 
237 		if (expected != actual) {
238 			print_results(engine, vaddr);
239 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
240 			       i, expected, actual);
241 
242 			err = -EINVAL;
243 			break;
244 		}
245 	}
246 
247 	i915_gem_object_unpin_map(results);
248 out_put:
249 	i915_gem_object_put(results);
250 	return err;
251 }
252 
253 static int do_device_reset(struct intel_engine_cs *engine)
254 {
255 	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
256 	return 0;
257 }
258 
259 static int do_engine_reset(struct intel_engine_cs *engine)
260 {
261 	return intel_engine_reset(engine, "live_workarounds");
262 }
263 
264 static int
265 switch_to_scratch_context(struct intel_engine_cs *engine,
266 			  struct igt_spinner *spin)
267 {
268 	struct intel_context *ce;
269 	struct i915_request *rq;
270 	int err = 0;
271 
272 	ce = intel_context_create(engine);
273 	if (IS_ERR(ce))
274 		return PTR_ERR(ce);
275 
276 	rq = igt_spinner_create_request(spin, ce, MI_NOOP);
277 	intel_context_put(ce);
278 
279 	if (IS_ERR(rq)) {
280 		spin = NULL;
281 		err = PTR_ERR(rq);
282 		goto err;
283 	}
284 
285 	err = request_add_spin(rq, spin);
286 err:
287 	if (err && spin)
288 		igt_spinner_end(spin);
289 
290 	return err;
291 }
292 
293 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
294 					int (*reset)(struct intel_engine_cs *),
295 					const char *name)
296 {
297 	struct intel_context *ce, *tmp;
298 	struct igt_spinner spin;
299 	intel_wakeref_t wakeref;
300 	int err;
301 
302 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
303 		engine->whitelist.count, engine->name, name);
304 
305 	ce = intel_context_create(engine);
306 	if (IS_ERR(ce))
307 		return PTR_ERR(ce);
308 
309 	err = igt_spinner_init(&spin, engine->gt);
310 	if (err)
311 		goto out_ctx;
312 
313 	err = check_whitelist(ce);
314 	if (err) {
315 		pr_err("Invalid whitelist *before* %s reset!\n", name);
316 		goto out_spin;
317 	}
318 
319 	err = switch_to_scratch_context(engine, &spin);
320 	if (err)
321 		goto out_spin;
322 
323 	with_intel_runtime_pm(engine->uncore->rpm, wakeref)
324 		err = reset(engine);
325 
326 	igt_spinner_end(&spin);
327 
328 	if (err) {
329 		pr_err("%s reset failed\n", name);
330 		goto out_spin;
331 	}
332 
333 	err = check_whitelist(ce);
334 	if (err) {
335 		pr_err("Whitelist not preserved in context across %s reset!\n",
336 		       name);
337 		goto out_spin;
338 	}
339 
340 	tmp = intel_context_create(engine);
341 	if (IS_ERR(tmp)) {
342 		err = PTR_ERR(tmp);
343 		goto out_spin;
344 	}
345 	intel_context_put(ce);
346 	ce = tmp;
347 
348 	err = check_whitelist(ce);
349 	if (err) {
350 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
351 		       name);
352 		goto out_spin;
353 	}
354 
355 out_spin:
356 	igt_spinner_fini(&spin);
357 out_ctx:
358 	intel_context_put(ce);
359 	return err;
360 }
361 
362 static struct i915_vma *create_batch(struct i915_address_space *vm)
363 {
364 	struct drm_i915_gem_object *obj;
365 	struct i915_vma *vma;
366 	int err;
367 
368 	obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
369 	if (IS_ERR(obj))
370 		return ERR_CAST(obj);
371 
372 	vma = i915_vma_instance(obj, vm, NULL);
373 	if (IS_ERR(vma)) {
374 		err = PTR_ERR(vma);
375 		goto err_obj;
376 	}
377 
378 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
379 	if (err)
380 		goto err_obj;
381 
382 	return vma;
383 
384 err_obj:
385 	i915_gem_object_put(obj);
386 	return ERR_PTR(err);
387 }
388 
389 static u32 reg_write(u32 old, u32 new, u32 rsvd)
390 {
391 	if (rsvd == 0x0000ffff) {
392 		old &= ~(new >> 16);
393 		old |= new & (new >> 16);
394 	} else {
395 		old &= ~rsvd;
396 		old |= new & rsvd;
397 	}
398 
399 	return old;
400 }
401 
402 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
403 {
404 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
405 	int i;
406 
407 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
408 	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
409 		return true;
410 
411 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
412 		if (wo_registers[i].platform == platform &&
413 		    wo_registers[i].reg == reg)
414 			return true;
415 	}
416 
417 	return false;
418 }
419 
420 static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
421 {
422 	reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
423 	switch (reg) {
424 	case 0x358:
425 	case 0x35c:
426 	case 0x3a8:
427 		return true;
428 
429 	default:
430 		return false;
431 	}
432 }
433 
434 static bool ro_register(u32 reg)
435 {
436 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
437 	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
438 		return true;
439 
440 	return false;
441 }
442 
443 static int whitelist_writable_count(struct intel_engine_cs *engine)
444 {
445 	int count = engine->whitelist.count;
446 	int i;
447 
448 	for (i = 0; i < engine->whitelist.count; i++) {
449 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
450 
451 		if (ro_register(reg))
452 			count--;
453 	}
454 
455 	return count;
456 }
457 
458 static int check_dirty_whitelist(struct intel_context *ce)
459 {
460 	const u32 values[] = {
461 		0x00000000,
462 		0x01010101,
463 		0x10100101,
464 		0x03030303,
465 		0x30300303,
466 		0x05050505,
467 		0x50500505,
468 		0x0f0f0f0f,
469 		0xf00ff00f,
470 		0x10101010,
471 		0xf0f01010,
472 		0x30303030,
473 		0xa0a03030,
474 		0x50505050,
475 		0xc0c05050,
476 		0xf0f0f0f0,
477 		0x11111111,
478 		0x33333333,
479 		0x55555555,
480 		0x0000ffff,
481 		0x00ff00ff,
482 		0xff0000ff,
483 		0xffff00ff,
484 		0xffffffff,
485 	};
486 	struct intel_engine_cs *engine = ce->engine;
487 	struct i915_vma *scratch;
488 	struct i915_vma *batch;
489 	int err = 0, i, v;
490 	u32 *cs, *results;
491 
492 	scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
493 	if (IS_ERR(scratch))
494 		return PTR_ERR(scratch);
495 
496 	batch = create_batch(ce->vm);
497 	if (IS_ERR(batch)) {
498 		err = PTR_ERR(batch);
499 		goto out_scratch;
500 	}
501 
502 	for (i = 0; i < engine->whitelist.count; i++) {
503 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
504 		u64 addr = scratch->node.start;
505 		struct i915_request *rq;
506 		u32 srm, lrm, rsvd;
507 		u32 expect;
508 		int idx;
509 		bool ro_reg;
510 
511 		if (wo_register(engine, reg))
512 			continue;
513 
514 		if (timestamp(engine, reg))
515 			continue; /* timestamps are expected to autoincrement */
516 
517 		ro_reg = ro_register(reg);
518 
519 		/* Clear non priv flags */
520 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
521 
522 		srm = MI_STORE_REGISTER_MEM;
523 		lrm = MI_LOAD_REGISTER_MEM;
524 		if (INTEL_GEN(engine->i915) >= 8)
525 			lrm++, srm++;
526 
527 		pr_debug("%s: Writing garbage to %x\n",
528 			 engine->name, reg);
529 
530 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
531 		if (IS_ERR(cs)) {
532 			err = PTR_ERR(cs);
533 			goto out_batch;
534 		}
535 
536 		/* SRM original */
537 		*cs++ = srm;
538 		*cs++ = reg;
539 		*cs++ = lower_32_bits(addr);
540 		*cs++ = upper_32_bits(addr);
541 
542 		idx = 1;
543 		for (v = 0; v < ARRAY_SIZE(values); v++) {
544 			/* LRI garbage */
545 			*cs++ = MI_LOAD_REGISTER_IMM(1);
546 			*cs++ = reg;
547 			*cs++ = values[v];
548 
549 			/* SRM result */
550 			*cs++ = srm;
551 			*cs++ = reg;
552 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
553 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
554 			idx++;
555 		}
556 		for (v = 0; v < ARRAY_SIZE(values); v++) {
557 			/* LRI garbage */
558 			*cs++ = MI_LOAD_REGISTER_IMM(1);
559 			*cs++ = reg;
560 			*cs++ = ~values[v];
561 
562 			/* SRM result */
563 			*cs++ = srm;
564 			*cs++ = reg;
565 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
566 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
567 			idx++;
568 		}
569 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
570 
571 		/* LRM original -- don't leave garbage in the context! */
572 		*cs++ = lrm;
573 		*cs++ = reg;
574 		*cs++ = lower_32_bits(addr);
575 		*cs++ = upper_32_bits(addr);
576 
577 		*cs++ = MI_BATCH_BUFFER_END;
578 
579 		i915_gem_object_flush_map(batch->obj);
580 		i915_gem_object_unpin_map(batch->obj);
581 		intel_gt_chipset_flush(engine->gt);
582 
583 		rq = intel_context_create_request(ce);
584 		if (IS_ERR(rq)) {
585 			err = PTR_ERR(rq);
586 			goto out_batch;
587 		}
588 
589 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
590 			err = engine->emit_init_breadcrumb(rq);
591 			if (err)
592 				goto err_request;
593 		}
594 
595 		i915_vma_lock(batch);
596 		err = i915_request_await_object(rq, batch->obj, false);
597 		if (err == 0)
598 			err = i915_vma_move_to_active(batch, rq, 0);
599 		i915_vma_unlock(batch);
600 		if (err)
601 			goto err_request;
602 
603 		i915_vma_lock(scratch);
604 		err = i915_request_await_object(rq, scratch->obj, true);
605 		if (err == 0)
606 			err = i915_vma_move_to_active(scratch, rq,
607 						      EXEC_OBJECT_WRITE);
608 		i915_vma_unlock(scratch);
609 		if (err)
610 			goto err_request;
611 
612 		err = engine->emit_bb_start(rq,
613 					    batch->node.start, PAGE_SIZE,
614 					    0);
615 		if (err)
616 			goto err_request;
617 
618 err_request:
619 		err = request_add_sync(rq, err);
620 		if (err) {
621 			pr_err("%s: Futzing %x timedout; cancelling test\n",
622 			       engine->name, reg);
623 			intel_gt_set_wedged(engine->gt);
624 			goto out_batch;
625 		}
626 
627 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
628 		if (IS_ERR(results)) {
629 			err = PTR_ERR(results);
630 			goto out_batch;
631 		}
632 
633 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
634 		if (!ro_reg) {
635 			/* detect write masking */
636 			rsvd = results[ARRAY_SIZE(values)];
637 			if (!rsvd) {
638 				pr_err("%s: Unable to write to whitelisted register %x\n",
639 				       engine->name, reg);
640 				err = -EINVAL;
641 				goto out_unpin;
642 			}
643 		} else {
644 			rsvd = 0;
645 		}
646 
647 		expect = results[0];
648 		idx = 1;
649 		for (v = 0; v < ARRAY_SIZE(values); v++) {
650 			if (ro_reg)
651 				expect = results[0];
652 			else
653 				expect = reg_write(expect, values[v], rsvd);
654 
655 			if (results[idx] != expect)
656 				err++;
657 			idx++;
658 		}
659 		for (v = 0; v < ARRAY_SIZE(values); v++) {
660 			if (ro_reg)
661 				expect = results[0];
662 			else
663 				expect = reg_write(expect, ~values[v], rsvd);
664 
665 			if (results[idx] != expect)
666 				err++;
667 			idx++;
668 		}
669 		if (err) {
670 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
671 			       engine->name, err, reg);
672 
673 			if (ro_reg)
674 				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
675 					engine->name, reg, results[0]);
676 			else
677 				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
678 					engine->name, reg, results[0], rsvd);
679 
680 			expect = results[0];
681 			idx = 1;
682 			for (v = 0; v < ARRAY_SIZE(values); v++) {
683 				u32 w = values[v];
684 
685 				if (ro_reg)
686 					expect = results[0];
687 				else
688 					expect = reg_write(expect, w, rsvd);
689 				pr_info("Wrote %08x, read %08x, expect %08x\n",
690 					w, results[idx], expect);
691 				idx++;
692 			}
693 			for (v = 0; v < ARRAY_SIZE(values); v++) {
694 				u32 w = ~values[v];
695 
696 				if (ro_reg)
697 					expect = results[0];
698 				else
699 					expect = reg_write(expect, w, rsvd);
700 				pr_info("Wrote %08x, read %08x, expect %08x\n",
701 					w, results[idx], expect);
702 				idx++;
703 			}
704 
705 			err = -EINVAL;
706 		}
707 out_unpin:
708 		i915_gem_object_unpin_map(scratch->obj);
709 		if (err)
710 			break;
711 	}
712 
713 	if (igt_flush_test(engine->i915))
714 		err = -EIO;
715 out_batch:
716 	i915_vma_unpin_and_release(&batch, 0);
717 out_scratch:
718 	i915_vma_unpin_and_release(&scratch, 0);
719 	return err;
720 }
721 
722 static int live_dirty_whitelist(void *arg)
723 {
724 	struct intel_gt *gt = arg;
725 	struct intel_engine_cs *engine;
726 	enum intel_engine_id id;
727 
728 	/* Can the user write to the whitelisted registers? */
729 
730 	if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
731 		return 0;
732 
733 	for_each_engine(engine, gt, id) {
734 		struct intel_context *ce;
735 		int err;
736 
737 		if (engine->whitelist.count == 0)
738 			continue;
739 
740 		ce = intel_context_create(engine);
741 		if (IS_ERR(ce))
742 			return PTR_ERR(ce);
743 
744 		err = check_dirty_whitelist(ce);
745 		intel_context_put(ce);
746 		if (err)
747 			return err;
748 	}
749 
750 	return 0;
751 }
752 
753 static int live_reset_whitelist(void *arg)
754 {
755 	struct intel_gt *gt = arg;
756 	struct intel_engine_cs *engine;
757 	enum intel_engine_id id;
758 	int err = 0;
759 
760 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
761 	igt_global_reset_lock(gt);
762 
763 	for_each_engine(engine, gt, id) {
764 		if (engine->whitelist.count == 0)
765 			continue;
766 
767 		if (intel_has_reset_engine(gt)) {
768 			err = check_whitelist_across_reset(engine,
769 							   do_engine_reset,
770 							   "engine");
771 			if (err)
772 				goto out;
773 		}
774 
775 		if (intel_has_gpu_reset(gt)) {
776 			err = check_whitelist_across_reset(engine,
777 							   do_device_reset,
778 							   "device");
779 			if (err)
780 				goto out;
781 		}
782 	}
783 
784 out:
785 	igt_global_reset_unlock(gt);
786 	return err;
787 }
788 
789 static int read_whitelisted_registers(struct intel_context *ce,
790 				      struct i915_vma *results)
791 {
792 	struct intel_engine_cs *engine = ce->engine;
793 	struct i915_request *rq;
794 	int i, err = 0;
795 	u32 srm, *cs;
796 
797 	rq = intel_context_create_request(ce);
798 	if (IS_ERR(rq))
799 		return PTR_ERR(rq);
800 
801 	i915_vma_lock(results);
802 	err = i915_request_await_object(rq, results->obj, true);
803 	if (err == 0)
804 		err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
805 	i915_vma_unlock(results);
806 	if (err)
807 		goto err_req;
808 
809 	srm = MI_STORE_REGISTER_MEM;
810 	if (INTEL_GEN(engine->i915) >= 8)
811 		srm++;
812 
813 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
814 	if (IS_ERR(cs)) {
815 		err = PTR_ERR(cs);
816 		goto err_req;
817 	}
818 
819 	for (i = 0; i < engine->whitelist.count; i++) {
820 		u64 offset = results->node.start + sizeof(u32) * i;
821 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
822 
823 		/* Clear non priv flags */
824 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
825 
826 		*cs++ = srm;
827 		*cs++ = reg;
828 		*cs++ = lower_32_bits(offset);
829 		*cs++ = upper_32_bits(offset);
830 	}
831 	intel_ring_advance(rq, cs);
832 
833 err_req:
834 	return request_add_sync(rq, err);
835 }
836 
837 static int scrub_whitelisted_registers(struct intel_context *ce)
838 {
839 	struct intel_engine_cs *engine = ce->engine;
840 	struct i915_request *rq;
841 	struct i915_vma *batch;
842 	int i, err = 0;
843 	u32 *cs;
844 
845 	batch = create_batch(ce->vm);
846 	if (IS_ERR(batch))
847 		return PTR_ERR(batch);
848 
849 	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
850 	if (IS_ERR(cs)) {
851 		err = PTR_ERR(cs);
852 		goto err_batch;
853 	}
854 
855 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
856 	for (i = 0; i < engine->whitelist.count; i++) {
857 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
858 
859 		if (ro_register(reg))
860 			continue;
861 
862 		/* Clear non priv flags */
863 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
864 
865 		*cs++ = reg;
866 		*cs++ = 0xffffffff;
867 	}
868 	*cs++ = MI_BATCH_BUFFER_END;
869 
870 	i915_gem_object_flush_map(batch->obj);
871 	intel_gt_chipset_flush(engine->gt);
872 
873 	rq = intel_context_create_request(ce);
874 	if (IS_ERR(rq)) {
875 		err = PTR_ERR(rq);
876 		goto err_unpin;
877 	}
878 
879 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
880 		err = engine->emit_init_breadcrumb(rq);
881 		if (err)
882 			goto err_request;
883 	}
884 
885 	i915_vma_lock(batch);
886 	err = i915_request_await_object(rq, batch->obj, false);
887 	if (err == 0)
888 		err = i915_vma_move_to_active(batch, rq, 0);
889 	i915_vma_unlock(batch);
890 	if (err)
891 		goto err_request;
892 
893 	/* Perform the writes from an unprivileged "user" batch */
894 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
895 
896 err_request:
897 	err = request_add_sync(rq, err);
898 
899 err_unpin:
900 	i915_gem_object_unpin_map(batch->obj);
901 err_batch:
902 	i915_vma_unpin_and_release(&batch, 0);
903 	return err;
904 }
905 
906 struct regmask {
907 	i915_reg_t reg;
908 	unsigned long gen_mask;
909 };
910 
911 static bool find_reg(struct drm_i915_private *i915,
912 		     i915_reg_t reg,
913 		     const struct regmask *tbl,
914 		     unsigned long count)
915 {
916 	u32 offset = i915_mmio_reg_offset(reg);
917 
918 	while (count--) {
919 		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
920 		    i915_mmio_reg_offset(tbl->reg) == offset)
921 			return true;
922 		tbl++;
923 	}
924 
925 	return false;
926 }
927 
928 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
929 {
930 	/* Alas, we must pardon some whitelists. Mistakes already made */
931 	static const struct regmask pardon[] = {
932 		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
933 		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
934 	};
935 
936 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
937 }
938 
939 static bool result_eq(struct intel_engine_cs *engine,
940 		      u32 a, u32 b, i915_reg_t reg)
941 {
942 	if (a != b && !pardon_reg(engine->i915, reg)) {
943 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
944 		       i915_mmio_reg_offset(reg), a, b);
945 		return false;
946 	}
947 
948 	return true;
949 }
950 
951 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
952 {
953 	/* Some registers do not seem to behave and our writes unreadable */
954 	static const struct regmask wo[] = {
955 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
956 	};
957 
958 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
959 }
960 
961 static bool result_neq(struct intel_engine_cs *engine,
962 		       u32 a, u32 b, i915_reg_t reg)
963 {
964 	if (a == b && !writeonly_reg(engine->i915, reg)) {
965 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
966 		       i915_mmio_reg_offset(reg), a);
967 		return false;
968 	}
969 
970 	return true;
971 }
972 
973 static int
974 check_whitelisted_registers(struct intel_engine_cs *engine,
975 			    struct i915_vma *A,
976 			    struct i915_vma *B,
977 			    bool (*fn)(struct intel_engine_cs *engine,
978 				       u32 a, u32 b,
979 				       i915_reg_t reg))
980 {
981 	u32 *a, *b;
982 	int i, err;
983 
984 	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
985 	if (IS_ERR(a))
986 		return PTR_ERR(a);
987 
988 	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
989 	if (IS_ERR(b)) {
990 		err = PTR_ERR(b);
991 		goto err_a;
992 	}
993 
994 	err = 0;
995 	for (i = 0; i < engine->whitelist.count; i++) {
996 		const struct i915_wa *wa = &engine->whitelist.list[i];
997 
998 		if (i915_mmio_reg_offset(wa->reg) &
999 		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
1000 			continue;
1001 
1002 		if (!fn(engine, a[i], b[i], wa->reg))
1003 			err = -EINVAL;
1004 	}
1005 
1006 	i915_gem_object_unpin_map(B->obj);
1007 err_a:
1008 	i915_gem_object_unpin_map(A->obj);
1009 	return err;
1010 }
1011 
1012 static int live_isolated_whitelist(void *arg)
1013 {
1014 	struct intel_gt *gt = arg;
1015 	struct {
1016 		struct i915_vma *scratch[2];
1017 	} client[2] = {};
1018 	struct intel_engine_cs *engine;
1019 	enum intel_engine_id id;
1020 	int i, err = 0;
1021 
1022 	/*
1023 	 * Check that a write into a whitelist register works, but
1024 	 * invisible to a second context.
1025 	 */
1026 
1027 	if (!intel_engines_has_context_isolation(gt->i915))
1028 		return 0;
1029 
1030 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1031 		client[i].scratch[0] = create_scratch(gt->vm, 1024);
1032 		if (IS_ERR(client[i].scratch[0])) {
1033 			err = PTR_ERR(client[i].scratch[0]);
1034 			goto err;
1035 		}
1036 
1037 		client[i].scratch[1] = create_scratch(gt->vm, 1024);
1038 		if (IS_ERR(client[i].scratch[1])) {
1039 			err = PTR_ERR(client[i].scratch[1]);
1040 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1041 			goto err;
1042 		}
1043 	}
1044 
1045 	for_each_engine(engine, gt, id) {
1046 		struct intel_context *ce[2];
1047 
1048 		if (!engine->kernel_context->vm)
1049 			continue;
1050 
1051 		if (!whitelist_writable_count(engine))
1052 			continue;
1053 
1054 		ce[0] = intel_context_create(engine);
1055 		if (IS_ERR(ce[0])) {
1056 			err = PTR_ERR(ce[0]);
1057 			break;
1058 		}
1059 		ce[1] = intel_context_create(engine);
1060 		if (IS_ERR(ce[1])) {
1061 			err = PTR_ERR(ce[1]);
1062 			intel_context_put(ce[0]);
1063 			break;
1064 		}
1065 
1066 		/* Read default values */
1067 		err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
1068 		if (err)
1069 			goto err_ce;
1070 
1071 		/* Try to overwrite registers (should only affect ctx0) */
1072 		err = scrub_whitelisted_registers(ce[0]);
1073 		if (err)
1074 			goto err_ce;
1075 
1076 		/* Read values from ctx1, we expect these to be defaults */
1077 		err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
1078 		if (err)
1079 			goto err_ce;
1080 
1081 		/* Verify that both reads return the same default values */
1082 		err = check_whitelisted_registers(engine,
1083 						  client[0].scratch[0],
1084 						  client[1].scratch[0],
1085 						  result_eq);
1086 		if (err)
1087 			goto err_ce;
1088 
1089 		/* Read back the updated values in ctx0 */
1090 		err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
1091 		if (err)
1092 			goto err_ce;
1093 
1094 		/* User should be granted privilege to overwhite regs */
1095 		err = check_whitelisted_registers(engine,
1096 						  client[0].scratch[0],
1097 						  client[0].scratch[1],
1098 						  result_neq);
1099 err_ce:
1100 		intel_context_put(ce[1]);
1101 		intel_context_put(ce[0]);
1102 		if (err)
1103 			break;
1104 	}
1105 
1106 err:
1107 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1108 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1109 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1110 	}
1111 
1112 	if (igt_flush_test(gt->i915))
1113 		err = -EIO;
1114 
1115 	return err;
1116 }
1117 
1118 static bool
1119 verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
1120 		const char *str)
1121 {
1122 	struct intel_engine_cs *engine;
1123 	enum intel_engine_id id;
1124 	bool ok = true;
1125 
1126 	ok &= wa_list_verify(gt->uncore, &lists->gt_wa_list, str);
1127 
1128 	for_each_engine(engine, gt, id) {
1129 		struct intel_context *ce;
1130 
1131 		ce = intel_context_create(engine);
1132 		if (IS_ERR(ce))
1133 			return false;
1134 
1135 		ok &= engine_wa_list_verify(ce,
1136 					    &lists->engine[id].wa_list,
1137 					    str) == 0;
1138 
1139 		ok &= engine_wa_list_verify(ce,
1140 					    &lists->engine[id].ctx_wa_list,
1141 					    str) == 0;
1142 
1143 		intel_context_put(ce);
1144 	}
1145 
1146 	return ok;
1147 }
1148 
1149 static int
1150 live_gpu_reset_workarounds(void *arg)
1151 {
1152 	struct intel_gt *gt = arg;
1153 	intel_wakeref_t wakeref;
1154 	struct wa_lists lists;
1155 	bool ok;
1156 
1157 	if (!intel_has_gpu_reset(gt))
1158 		return 0;
1159 
1160 	pr_info("Verifying after GPU reset...\n");
1161 
1162 	igt_global_reset_lock(gt);
1163 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1164 
1165 	reference_lists_init(gt, &lists);
1166 
1167 	ok = verify_wa_lists(gt, &lists, "before reset");
1168 	if (!ok)
1169 		goto out;
1170 
1171 	intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1172 
1173 	ok = verify_wa_lists(gt, &lists, "after reset");
1174 
1175 out:
1176 	reference_lists_fini(gt, &lists);
1177 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1178 	igt_global_reset_unlock(gt);
1179 
1180 	return ok ? 0 : -ESRCH;
1181 }
1182 
1183 static int
1184 live_engine_reset_workarounds(void *arg)
1185 {
1186 	struct intel_gt *gt = arg;
1187 	struct intel_engine_cs *engine;
1188 	enum intel_engine_id id;
1189 	struct intel_context *ce;
1190 	struct igt_spinner spin;
1191 	struct i915_request *rq;
1192 	intel_wakeref_t wakeref;
1193 	struct wa_lists lists;
1194 	int ret = 0;
1195 
1196 	if (!intel_has_reset_engine(gt))
1197 		return 0;
1198 
1199 	igt_global_reset_lock(gt);
1200 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1201 
1202 	reference_lists_init(gt, &lists);
1203 
1204 	for_each_engine(engine, gt, id) {
1205 		bool ok;
1206 
1207 		pr_info("Verifying after %s reset...\n", engine->name);
1208 		ce = intel_context_create(engine);
1209 		if (IS_ERR(ce)) {
1210 			ret = PTR_ERR(ce);
1211 			break;
1212 		}
1213 
1214 		ok = verify_wa_lists(gt, &lists, "before reset");
1215 		if (!ok) {
1216 			ret = -ESRCH;
1217 			goto err;
1218 		}
1219 
1220 		intel_engine_reset(engine, "live_workarounds:idle");
1221 
1222 		ok = verify_wa_lists(gt, &lists, "after idle reset");
1223 		if (!ok) {
1224 			ret = -ESRCH;
1225 			goto err;
1226 		}
1227 
1228 		ret = igt_spinner_init(&spin, engine->gt);
1229 		if (ret)
1230 			goto err;
1231 
1232 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1233 		if (IS_ERR(rq)) {
1234 			ret = PTR_ERR(rq);
1235 			igt_spinner_fini(&spin);
1236 			goto err;
1237 		}
1238 
1239 		ret = request_add_spin(rq, &spin);
1240 		if (ret) {
1241 			pr_err("Spinner failed to start\n");
1242 			igt_spinner_fini(&spin);
1243 			goto err;
1244 		}
1245 
1246 		intel_engine_reset(engine, "live_workarounds:active");
1247 
1248 		igt_spinner_end(&spin);
1249 		igt_spinner_fini(&spin);
1250 
1251 		ok = verify_wa_lists(gt, &lists, "after busy reset");
1252 		if (!ok) {
1253 			ret = -ESRCH;
1254 			goto err;
1255 		}
1256 
1257 err:
1258 		intel_context_put(ce);
1259 		if (ret)
1260 			break;
1261 	}
1262 
1263 	reference_lists_fini(gt, &lists);
1264 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1265 	igt_global_reset_unlock(gt);
1266 
1267 	igt_flush_test(gt->i915);
1268 
1269 	return ret;
1270 }
1271 
1272 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1273 {
1274 	static const struct i915_subtest tests[] = {
1275 		SUBTEST(live_dirty_whitelist),
1276 		SUBTEST(live_reset_whitelist),
1277 		SUBTEST(live_isolated_whitelist),
1278 		SUBTEST(live_gpu_reset_workarounds),
1279 		SUBTEST(live_engine_reset_workarounds),
1280 	};
1281 
1282 	if (intel_gt_is_wedged(&i915->gt))
1283 		return 0;
1284 
1285 	return intel_gt_live_subtests(tests, &i915->gt);
1286 }
1287