1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2018 Intel Corporation
4  */
5 
6 #include "gem/i915_gem_internal.h"
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
12 
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/intel_scheduler_helpers.h"
17 #include "selftests/mock_drm.h"
18 
19 #include "gem/selftests/igt_gem_utils.h"
20 #include "gem/selftests/mock_context.h"
21 
22 static const struct wo_register {
23 	enum intel_platform platform;
24 	u32 reg;
25 } wo_registers[] = {
26 	{ INTEL_GEMINILAKE, 0x731c }
27 };
28 
29 struct wa_lists {
30 	struct i915_wa_list gt_wa_list;
31 	struct {
32 		struct i915_wa_list wa_list;
33 		struct i915_wa_list ctx_wa_list;
34 	} engine[I915_NUM_ENGINES];
35 };
36 
37 static int request_add_sync(struct i915_request *rq, int err)
38 {
39 	i915_request_get(rq);
40 	i915_request_add(rq);
41 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
42 		err = -EIO;
43 	i915_request_put(rq);
44 
45 	return err;
46 }
47 
48 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
49 {
50 	int err = 0;
51 
52 	i915_request_get(rq);
53 	i915_request_add(rq);
54 	if (spin && !igt_wait_for_spinner(spin, rq))
55 		err = -ETIMEDOUT;
56 	i915_request_put(rq);
57 
58 	return err;
59 }
60 
61 static void
62 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
63 {
64 	struct intel_engine_cs *engine;
65 	enum intel_engine_id id;
66 
67 	memset(lists, 0, sizeof(*lists));
68 
69 	wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
70 	gt_init_workarounds(gt, &lists->gt_wa_list);
71 	wa_init_finish(&lists->gt_wa_list);
72 
73 	for_each_engine(engine, gt, id) {
74 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
75 
76 		wa_init_start(wal, "REF", engine->name);
77 		engine_init_workarounds(engine, wal);
78 		wa_init_finish(wal);
79 
80 		__intel_engine_init_ctx_wa(engine,
81 					   &lists->engine[id].ctx_wa_list,
82 					   "CTX_REF");
83 	}
84 }
85 
86 static void
87 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
88 {
89 	struct intel_engine_cs *engine;
90 	enum intel_engine_id id;
91 
92 	for_each_engine(engine, gt, id)
93 		intel_wa_list_free(&lists->engine[id].wa_list);
94 
95 	intel_wa_list_free(&lists->gt_wa_list);
96 }
97 
98 static struct drm_i915_gem_object *
99 read_nonprivs(struct intel_context *ce)
100 {
101 	struct intel_engine_cs *engine = ce->engine;
102 	const u32 base = engine->mmio_base;
103 	struct drm_i915_gem_object *result;
104 	struct i915_request *rq;
105 	struct i915_vma *vma;
106 	u32 srm, *cs;
107 	int err;
108 	int i;
109 
110 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
111 	if (IS_ERR(result))
112 		return result;
113 
114 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
115 
116 	cs = i915_gem_object_pin_map_unlocked(result, I915_MAP_WB);
117 	if (IS_ERR(cs)) {
118 		err = PTR_ERR(cs);
119 		goto err_obj;
120 	}
121 	memset(cs, 0xc5, PAGE_SIZE);
122 	i915_gem_object_flush_map(result);
123 	i915_gem_object_unpin_map(result);
124 
125 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
126 	if (IS_ERR(vma)) {
127 		err = PTR_ERR(vma);
128 		goto err_obj;
129 	}
130 
131 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
132 	if (err)
133 		goto err_obj;
134 
135 	rq = intel_context_create_request(ce);
136 	if (IS_ERR(rq)) {
137 		err = PTR_ERR(rq);
138 		goto err_pin;
139 	}
140 
141 	i915_vma_lock(vma);
142 	err = i915_request_await_object(rq, vma->obj, true);
143 	if (err == 0)
144 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
145 	i915_vma_unlock(vma);
146 	if (err)
147 		goto err_req;
148 
149 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
150 	if (GRAPHICS_VER(engine->i915) >= 8)
151 		srm++;
152 
153 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
154 	if (IS_ERR(cs)) {
155 		err = PTR_ERR(cs);
156 		goto err_req;
157 	}
158 
159 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
160 		*cs++ = srm;
161 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
162 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
163 		*cs++ = 0;
164 	}
165 	intel_ring_advance(rq, cs);
166 
167 	i915_request_add(rq);
168 	i915_vma_unpin(vma);
169 
170 	return result;
171 
172 err_req:
173 	i915_request_add(rq);
174 err_pin:
175 	i915_vma_unpin(vma);
176 err_obj:
177 	i915_gem_object_put(result);
178 	return ERR_PTR(err);
179 }
180 
181 static u32
182 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
183 {
184 	i915_reg_t reg = i < engine->whitelist.count ?
185 			 engine->whitelist.list[i].reg :
186 			 RING_NOPID(engine->mmio_base);
187 
188 	return i915_mmio_reg_offset(reg);
189 }
190 
191 static void
192 print_results(const struct intel_engine_cs *engine, const u32 *results)
193 {
194 	unsigned int i;
195 
196 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
197 		u32 expected = get_whitelist_reg(engine, i);
198 		u32 actual = results[i];
199 
200 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
201 			i, expected, actual);
202 	}
203 }
204 
205 static int check_whitelist(struct intel_context *ce)
206 {
207 	struct intel_engine_cs *engine = ce->engine;
208 	struct drm_i915_gem_object *results;
209 	struct intel_wedge_me wedge;
210 	u32 *vaddr;
211 	int err;
212 	int i;
213 
214 	results = read_nonprivs(ce);
215 	if (IS_ERR(results))
216 		return PTR_ERR(results);
217 
218 	err = 0;
219 	i915_gem_object_lock(results, NULL);
220 	intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
221 		err = i915_gem_object_set_to_cpu_domain(results, false);
222 
223 	if (intel_gt_is_wedged(engine->gt))
224 		err = -EIO;
225 	if (err)
226 		goto out_put;
227 
228 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
229 	if (IS_ERR(vaddr)) {
230 		err = PTR_ERR(vaddr);
231 		goto out_put;
232 	}
233 
234 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
235 		u32 expected = get_whitelist_reg(engine, i);
236 		u32 actual = vaddr[i];
237 
238 		if (expected != actual) {
239 			print_results(engine, vaddr);
240 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
241 			       i, expected, actual);
242 
243 			err = -EINVAL;
244 			break;
245 		}
246 	}
247 
248 	i915_gem_object_unpin_map(results);
249 out_put:
250 	i915_gem_object_unlock(results);
251 	i915_gem_object_put(results);
252 	return err;
253 }
254 
255 static int do_device_reset(struct intel_engine_cs *engine)
256 {
257 	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
258 	return 0;
259 }
260 
261 static int do_engine_reset(struct intel_engine_cs *engine)
262 {
263 	return intel_engine_reset(engine, "live_workarounds");
264 }
265 
266 static int do_guc_reset(struct intel_engine_cs *engine)
267 {
268 	/* Currently a no-op as the reset is handled by GuC */
269 	return 0;
270 }
271 
272 static int
273 switch_to_scratch_context(struct intel_engine_cs *engine,
274 			  struct igt_spinner *spin,
275 			  struct i915_request **rq)
276 {
277 	struct intel_context *ce;
278 	int err = 0;
279 
280 	ce = intel_context_create(engine);
281 	if (IS_ERR(ce))
282 		return PTR_ERR(ce);
283 
284 	*rq = igt_spinner_create_request(spin, ce, MI_NOOP);
285 	intel_context_put(ce);
286 
287 	if (IS_ERR(*rq)) {
288 		spin = NULL;
289 		err = PTR_ERR(*rq);
290 		goto err;
291 	}
292 
293 	err = request_add_spin(*rq, spin);
294 err:
295 	if (err && spin)
296 		igt_spinner_end(spin);
297 
298 	return err;
299 }
300 
301 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
302 					int (*reset)(struct intel_engine_cs *),
303 					const char *name)
304 {
305 	struct intel_context *ce, *tmp;
306 	struct igt_spinner spin;
307 	struct i915_request *rq;
308 	intel_wakeref_t wakeref;
309 	int err;
310 
311 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
312 		engine->whitelist.count, engine->name, name);
313 
314 	ce = intel_context_create(engine);
315 	if (IS_ERR(ce))
316 		return PTR_ERR(ce);
317 
318 	err = igt_spinner_init(&spin, engine->gt);
319 	if (err)
320 		goto out_ctx;
321 
322 	err = check_whitelist(ce);
323 	if (err) {
324 		pr_err("Invalid whitelist *before* %s reset!\n", name);
325 		goto out_spin;
326 	}
327 
328 	err = switch_to_scratch_context(engine, &spin, &rq);
329 	if (err)
330 		goto out_spin;
331 
332 	/* Ensure the spinner hasn't aborted */
333 	if (i915_request_completed(rq)) {
334 		pr_err("%s spinner failed to start\n", name);
335 		err = -ETIMEDOUT;
336 		goto out_spin;
337 	}
338 
339 	with_intel_runtime_pm(engine->uncore->rpm, wakeref)
340 		err = reset(engine);
341 
342 	/* Ensure the reset happens and kills the engine */
343 	if (err == 0)
344 		err = intel_selftest_wait_for_rq(rq);
345 
346 	igt_spinner_end(&spin);
347 
348 	if (err) {
349 		pr_err("%s reset failed\n", name);
350 		goto out_spin;
351 	}
352 
353 	err = check_whitelist(ce);
354 	if (err) {
355 		pr_err("Whitelist not preserved in context across %s reset!\n",
356 		       name);
357 		goto out_spin;
358 	}
359 
360 	tmp = intel_context_create(engine);
361 	if (IS_ERR(tmp)) {
362 		err = PTR_ERR(tmp);
363 		goto out_spin;
364 	}
365 	intel_context_put(ce);
366 	ce = tmp;
367 
368 	err = check_whitelist(ce);
369 	if (err) {
370 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
371 		       name);
372 		goto out_spin;
373 	}
374 
375 out_spin:
376 	igt_spinner_fini(&spin);
377 out_ctx:
378 	intel_context_put(ce);
379 	return err;
380 }
381 
382 static struct i915_vma *create_batch(struct i915_address_space *vm)
383 {
384 	struct drm_i915_gem_object *obj;
385 	struct i915_vma *vma;
386 	int err;
387 
388 	obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
389 	if (IS_ERR(obj))
390 		return ERR_CAST(obj);
391 
392 	vma = i915_vma_instance(obj, vm, NULL);
393 	if (IS_ERR(vma)) {
394 		err = PTR_ERR(vma);
395 		goto err_obj;
396 	}
397 
398 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
399 	if (err)
400 		goto err_obj;
401 
402 	return vma;
403 
404 err_obj:
405 	i915_gem_object_put(obj);
406 	return ERR_PTR(err);
407 }
408 
409 static u32 reg_write(u32 old, u32 new, u32 rsvd)
410 {
411 	if (rsvd == 0x0000ffff) {
412 		old &= ~(new >> 16);
413 		old |= new & (new >> 16);
414 	} else {
415 		old &= ~rsvd;
416 		old |= new & rsvd;
417 	}
418 
419 	return old;
420 }
421 
422 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
423 {
424 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
425 	int i;
426 
427 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
428 	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
429 		return true;
430 
431 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
432 		if (wo_registers[i].platform == platform &&
433 		    wo_registers[i].reg == reg)
434 			return true;
435 	}
436 
437 	return false;
438 }
439 
440 static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
441 {
442 	reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
443 	switch (reg) {
444 	case 0x358:
445 	case 0x35c:
446 	case 0x3a8:
447 		return true;
448 
449 	default:
450 		return false;
451 	}
452 }
453 
454 static bool ro_register(u32 reg)
455 {
456 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
457 	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
458 		return true;
459 
460 	return false;
461 }
462 
463 static int whitelist_writable_count(struct intel_engine_cs *engine)
464 {
465 	int count = engine->whitelist.count;
466 	int i;
467 
468 	for (i = 0; i < engine->whitelist.count; i++) {
469 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
470 
471 		if (ro_register(reg))
472 			count--;
473 	}
474 
475 	return count;
476 }
477 
478 static int check_dirty_whitelist(struct intel_context *ce)
479 {
480 	const u32 values[] = {
481 		0x00000000,
482 		0x01010101,
483 		0x10100101,
484 		0x03030303,
485 		0x30300303,
486 		0x05050505,
487 		0x50500505,
488 		0x0f0f0f0f,
489 		0xf00ff00f,
490 		0x10101010,
491 		0xf0f01010,
492 		0x30303030,
493 		0xa0a03030,
494 		0x50505050,
495 		0xc0c05050,
496 		0xf0f0f0f0,
497 		0x11111111,
498 		0x33333333,
499 		0x55555555,
500 		0x0000ffff,
501 		0x00ff00ff,
502 		0xff0000ff,
503 		0xffff00ff,
504 		0xffffffff,
505 	};
506 	struct intel_engine_cs *engine = ce->engine;
507 	struct i915_vma *scratch;
508 	struct i915_vma *batch;
509 	int err = 0, i, v, sz;
510 	u32 *cs, *results;
511 
512 	sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
513 	scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
514 	if (IS_ERR(scratch))
515 		return PTR_ERR(scratch);
516 
517 	batch = create_batch(ce->vm);
518 	if (IS_ERR(batch)) {
519 		err = PTR_ERR(batch);
520 		goto out_scratch;
521 	}
522 
523 	for (i = 0; i < engine->whitelist.count; i++) {
524 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
525 		struct i915_gem_ww_ctx ww;
526 		u64 addr = scratch->node.start;
527 		struct i915_request *rq;
528 		u32 srm, lrm, rsvd;
529 		u32 expect;
530 		int idx;
531 		bool ro_reg;
532 
533 		if (wo_register(engine, reg))
534 			continue;
535 
536 		if (timestamp(engine, reg))
537 			continue; /* timestamps are expected to autoincrement */
538 
539 		ro_reg = ro_register(reg);
540 
541 		i915_gem_ww_ctx_init(&ww, false);
542 retry:
543 		cs = NULL;
544 		err = i915_gem_object_lock(scratch->obj, &ww);
545 		if (!err)
546 			err = i915_gem_object_lock(batch->obj, &ww);
547 		if (!err)
548 			err = intel_context_pin_ww(ce, &ww);
549 		if (err)
550 			goto out;
551 
552 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
553 		if (IS_ERR(cs)) {
554 			err = PTR_ERR(cs);
555 			goto out_ctx;
556 		}
557 
558 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
559 		if (IS_ERR(results)) {
560 			err = PTR_ERR(results);
561 			goto out_unmap_batch;
562 		}
563 
564 		/* Clear non priv flags */
565 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
566 
567 		srm = MI_STORE_REGISTER_MEM;
568 		lrm = MI_LOAD_REGISTER_MEM;
569 		if (GRAPHICS_VER(engine->i915) >= 8)
570 			lrm++, srm++;
571 
572 		pr_debug("%s: Writing garbage to %x\n",
573 			 engine->name, reg);
574 
575 		/* SRM original */
576 		*cs++ = srm;
577 		*cs++ = reg;
578 		*cs++ = lower_32_bits(addr);
579 		*cs++ = upper_32_bits(addr);
580 
581 		idx = 1;
582 		for (v = 0; v < ARRAY_SIZE(values); v++) {
583 			/* LRI garbage */
584 			*cs++ = MI_LOAD_REGISTER_IMM(1);
585 			*cs++ = reg;
586 			*cs++ = values[v];
587 
588 			/* SRM result */
589 			*cs++ = srm;
590 			*cs++ = reg;
591 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
592 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
593 			idx++;
594 		}
595 		for (v = 0; v < ARRAY_SIZE(values); v++) {
596 			/* LRI garbage */
597 			*cs++ = MI_LOAD_REGISTER_IMM(1);
598 			*cs++ = reg;
599 			*cs++ = ~values[v];
600 
601 			/* SRM result */
602 			*cs++ = srm;
603 			*cs++ = reg;
604 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
605 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
606 			idx++;
607 		}
608 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
609 
610 		/* LRM original -- don't leave garbage in the context! */
611 		*cs++ = lrm;
612 		*cs++ = reg;
613 		*cs++ = lower_32_bits(addr);
614 		*cs++ = upper_32_bits(addr);
615 
616 		*cs++ = MI_BATCH_BUFFER_END;
617 
618 		i915_gem_object_flush_map(batch->obj);
619 		i915_gem_object_unpin_map(batch->obj);
620 		intel_gt_chipset_flush(engine->gt);
621 		cs = NULL;
622 
623 		rq = i915_request_create(ce);
624 		if (IS_ERR(rq)) {
625 			err = PTR_ERR(rq);
626 			goto out_unmap_scratch;
627 		}
628 
629 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
630 			err = engine->emit_init_breadcrumb(rq);
631 			if (err)
632 				goto err_request;
633 		}
634 
635 		err = i915_request_await_object(rq, batch->obj, false);
636 		if (err == 0)
637 			err = i915_vma_move_to_active(batch, rq, 0);
638 		if (err)
639 			goto err_request;
640 
641 		err = i915_request_await_object(rq, scratch->obj, true);
642 		if (err == 0)
643 			err = i915_vma_move_to_active(scratch, rq,
644 						      EXEC_OBJECT_WRITE);
645 		if (err)
646 			goto err_request;
647 
648 		err = engine->emit_bb_start(rq,
649 					    batch->node.start, PAGE_SIZE,
650 					    0);
651 		if (err)
652 			goto err_request;
653 
654 err_request:
655 		err = request_add_sync(rq, err);
656 		if (err) {
657 			pr_err("%s: Futzing %x timedout; cancelling test\n",
658 			       engine->name, reg);
659 			intel_gt_set_wedged(engine->gt);
660 			goto out_unmap_scratch;
661 		}
662 
663 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
664 		if (!ro_reg) {
665 			/* detect write masking */
666 			rsvd = results[ARRAY_SIZE(values)];
667 			if (!rsvd) {
668 				pr_err("%s: Unable to write to whitelisted register %x\n",
669 				       engine->name, reg);
670 				err = -EINVAL;
671 				goto out_unmap_scratch;
672 			}
673 		} else {
674 			rsvd = 0;
675 		}
676 
677 		expect = results[0];
678 		idx = 1;
679 		for (v = 0; v < ARRAY_SIZE(values); v++) {
680 			if (ro_reg)
681 				expect = results[0];
682 			else
683 				expect = reg_write(expect, values[v], rsvd);
684 
685 			if (results[idx] != expect)
686 				err++;
687 			idx++;
688 		}
689 		for (v = 0; v < ARRAY_SIZE(values); v++) {
690 			if (ro_reg)
691 				expect = results[0];
692 			else
693 				expect = reg_write(expect, ~values[v], rsvd);
694 
695 			if (results[idx] != expect)
696 				err++;
697 			idx++;
698 		}
699 		if (err) {
700 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
701 			       engine->name, err, reg);
702 
703 			if (ro_reg)
704 				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
705 					engine->name, reg, results[0]);
706 			else
707 				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
708 					engine->name, reg, results[0], rsvd);
709 
710 			expect = results[0];
711 			idx = 1;
712 			for (v = 0; v < ARRAY_SIZE(values); v++) {
713 				u32 w = values[v];
714 
715 				if (ro_reg)
716 					expect = results[0];
717 				else
718 					expect = reg_write(expect, w, rsvd);
719 				pr_info("Wrote %08x, read %08x, expect %08x\n",
720 					w, results[idx], expect);
721 				idx++;
722 			}
723 			for (v = 0; v < ARRAY_SIZE(values); v++) {
724 				u32 w = ~values[v];
725 
726 				if (ro_reg)
727 					expect = results[0];
728 				else
729 					expect = reg_write(expect, w, rsvd);
730 				pr_info("Wrote %08x, read %08x, expect %08x\n",
731 					w, results[idx], expect);
732 				idx++;
733 			}
734 
735 			err = -EINVAL;
736 		}
737 out_unmap_scratch:
738 		i915_gem_object_unpin_map(scratch->obj);
739 out_unmap_batch:
740 		if (cs)
741 			i915_gem_object_unpin_map(batch->obj);
742 out_ctx:
743 		intel_context_unpin(ce);
744 out:
745 		if (err == -EDEADLK) {
746 			err = i915_gem_ww_ctx_backoff(&ww);
747 			if (!err)
748 				goto retry;
749 		}
750 		i915_gem_ww_ctx_fini(&ww);
751 		if (err)
752 			break;
753 	}
754 
755 	if (igt_flush_test(engine->i915))
756 		err = -EIO;
757 
758 	i915_vma_unpin_and_release(&batch, 0);
759 out_scratch:
760 	i915_vma_unpin_and_release(&scratch, 0);
761 	return err;
762 }
763 
764 static int live_dirty_whitelist(void *arg)
765 {
766 	struct intel_gt *gt = arg;
767 	struct intel_engine_cs *engine;
768 	enum intel_engine_id id;
769 
770 	/* Can the user write to the whitelisted registers? */
771 
772 	if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
773 		return 0;
774 
775 	for_each_engine(engine, gt, id) {
776 		struct intel_context *ce;
777 		int err;
778 
779 		if (engine->whitelist.count == 0)
780 			continue;
781 
782 		ce = intel_context_create(engine);
783 		if (IS_ERR(ce))
784 			return PTR_ERR(ce);
785 
786 		err = check_dirty_whitelist(ce);
787 		intel_context_put(ce);
788 		if (err)
789 			return err;
790 	}
791 
792 	return 0;
793 }
794 
795 static int live_reset_whitelist(void *arg)
796 {
797 	struct intel_gt *gt = arg;
798 	struct intel_engine_cs *engine;
799 	enum intel_engine_id id;
800 	int err = 0;
801 
802 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
803 	igt_global_reset_lock(gt);
804 
805 	for_each_engine(engine, gt, id) {
806 		if (engine->whitelist.count == 0)
807 			continue;
808 
809 		if (intel_has_reset_engine(gt)) {
810 			if (intel_engine_uses_guc(engine)) {
811 				struct intel_selftest_saved_policy saved;
812 				int err2;
813 
814 				err = intel_selftest_modify_policy(engine, &saved,
815 								   SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
816 				if (err)
817 					goto out;
818 
819 				err = check_whitelist_across_reset(engine,
820 								   do_guc_reset,
821 								   "guc");
822 
823 				err2 = intel_selftest_restore_policy(engine, &saved);
824 				if (err == 0)
825 					err = err2;
826 			} else {
827 				err = check_whitelist_across_reset(engine,
828 								   do_engine_reset,
829 								   "engine");
830 			}
831 
832 			if (err)
833 				goto out;
834 		}
835 
836 		if (intel_has_gpu_reset(gt)) {
837 			err = check_whitelist_across_reset(engine,
838 							   do_device_reset,
839 							   "device");
840 			if (err)
841 				goto out;
842 		}
843 	}
844 
845 out:
846 	igt_global_reset_unlock(gt);
847 	return err;
848 }
849 
850 static int read_whitelisted_registers(struct intel_context *ce,
851 				      struct i915_vma *results)
852 {
853 	struct intel_engine_cs *engine = ce->engine;
854 	struct i915_request *rq;
855 	int i, err = 0;
856 	u32 srm, *cs;
857 
858 	rq = intel_context_create_request(ce);
859 	if (IS_ERR(rq))
860 		return PTR_ERR(rq);
861 
862 	i915_vma_lock(results);
863 	err = i915_request_await_object(rq, results->obj, true);
864 	if (err == 0)
865 		err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
866 	i915_vma_unlock(results);
867 	if (err)
868 		goto err_req;
869 
870 	srm = MI_STORE_REGISTER_MEM;
871 	if (GRAPHICS_VER(engine->i915) >= 8)
872 		srm++;
873 
874 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
875 	if (IS_ERR(cs)) {
876 		err = PTR_ERR(cs);
877 		goto err_req;
878 	}
879 
880 	for (i = 0; i < engine->whitelist.count; i++) {
881 		u64 offset = results->node.start + sizeof(u32) * i;
882 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
883 
884 		/* Clear non priv flags */
885 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
886 
887 		*cs++ = srm;
888 		*cs++ = reg;
889 		*cs++ = lower_32_bits(offset);
890 		*cs++ = upper_32_bits(offset);
891 	}
892 	intel_ring_advance(rq, cs);
893 
894 err_req:
895 	return request_add_sync(rq, err);
896 }
897 
898 static int scrub_whitelisted_registers(struct intel_context *ce)
899 {
900 	struct intel_engine_cs *engine = ce->engine;
901 	struct i915_request *rq;
902 	struct i915_vma *batch;
903 	int i, err = 0;
904 	u32 *cs;
905 
906 	batch = create_batch(ce->vm);
907 	if (IS_ERR(batch))
908 		return PTR_ERR(batch);
909 
910 	cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
911 	if (IS_ERR(cs)) {
912 		err = PTR_ERR(cs);
913 		goto err_batch;
914 	}
915 
916 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
917 	for (i = 0; i < engine->whitelist.count; i++) {
918 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
919 
920 		if (ro_register(reg))
921 			continue;
922 
923 		/* Clear non priv flags */
924 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
925 
926 		*cs++ = reg;
927 		*cs++ = 0xffffffff;
928 	}
929 	*cs++ = MI_BATCH_BUFFER_END;
930 
931 	i915_gem_object_flush_map(batch->obj);
932 	intel_gt_chipset_flush(engine->gt);
933 
934 	rq = intel_context_create_request(ce);
935 	if (IS_ERR(rq)) {
936 		err = PTR_ERR(rq);
937 		goto err_unpin;
938 	}
939 
940 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
941 		err = engine->emit_init_breadcrumb(rq);
942 		if (err)
943 			goto err_request;
944 	}
945 
946 	i915_vma_lock(batch);
947 	err = i915_request_await_object(rq, batch->obj, false);
948 	if (err == 0)
949 		err = i915_vma_move_to_active(batch, rq, 0);
950 	i915_vma_unlock(batch);
951 	if (err)
952 		goto err_request;
953 
954 	/* Perform the writes from an unprivileged "user" batch */
955 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
956 
957 err_request:
958 	err = request_add_sync(rq, err);
959 
960 err_unpin:
961 	i915_gem_object_unpin_map(batch->obj);
962 err_batch:
963 	i915_vma_unpin_and_release(&batch, 0);
964 	return err;
965 }
966 
967 struct regmask {
968 	i915_reg_t reg;
969 	u8 graphics_ver;
970 };
971 
972 static bool find_reg(struct drm_i915_private *i915,
973 		     i915_reg_t reg,
974 		     const struct regmask *tbl,
975 		     unsigned long count)
976 {
977 	u32 offset = i915_mmio_reg_offset(reg);
978 
979 	while (count--) {
980 		if (GRAPHICS_VER(i915) == tbl->graphics_ver &&
981 		    i915_mmio_reg_offset(tbl->reg) == offset)
982 			return true;
983 		tbl++;
984 	}
985 
986 	return false;
987 }
988 
989 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
990 {
991 	/* Alas, we must pardon some whitelists. Mistakes already made */
992 	static const struct regmask pardon[] = {
993 		{ GEN9_CTX_PREEMPT_REG, 9 },
994 		{ GEN8_L3SQCREG4, 9 },
995 	};
996 
997 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
998 }
999 
1000 static bool result_eq(struct intel_engine_cs *engine,
1001 		      u32 a, u32 b, i915_reg_t reg)
1002 {
1003 	if (a != b && !pardon_reg(engine->i915, reg)) {
1004 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
1005 		       i915_mmio_reg_offset(reg), a, b);
1006 		return false;
1007 	}
1008 
1009 	return true;
1010 }
1011 
1012 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
1013 {
1014 	/* Some registers do not seem to behave and our writes unreadable */
1015 	static const struct regmask wo[] = {
1016 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, 9 },
1017 	};
1018 
1019 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
1020 }
1021 
1022 static bool result_neq(struct intel_engine_cs *engine,
1023 		       u32 a, u32 b, i915_reg_t reg)
1024 {
1025 	if (a == b && !writeonly_reg(engine->i915, reg)) {
1026 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
1027 		       i915_mmio_reg_offset(reg), a);
1028 		return false;
1029 	}
1030 
1031 	return true;
1032 }
1033 
1034 static int
1035 check_whitelisted_registers(struct intel_engine_cs *engine,
1036 			    struct i915_vma *A,
1037 			    struct i915_vma *B,
1038 			    bool (*fn)(struct intel_engine_cs *engine,
1039 				       u32 a, u32 b,
1040 				       i915_reg_t reg))
1041 {
1042 	u32 *a, *b;
1043 	int i, err;
1044 
1045 	a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB);
1046 	if (IS_ERR(a))
1047 		return PTR_ERR(a);
1048 
1049 	b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB);
1050 	if (IS_ERR(b)) {
1051 		err = PTR_ERR(b);
1052 		goto err_a;
1053 	}
1054 
1055 	err = 0;
1056 	for (i = 0; i < engine->whitelist.count; i++) {
1057 		const struct i915_wa *wa = &engine->whitelist.list[i];
1058 
1059 		if (i915_mmio_reg_offset(wa->reg) &
1060 		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
1061 			continue;
1062 
1063 		if (!fn(engine, a[i], b[i], wa->reg))
1064 			err = -EINVAL;
1065 	}
1066 
1067 	i915_gem_object_unpin_map(B->obj);
1068 err_a:
1069 	i915_gem_object_unpin_map(A->obj);
1070 	return err;
1071 }
1072 
1073 static int live_isolated_whitelist(void *arg)
1074 {
1075 	struct intel_gt *gt = arg;
1076 	struct {
1077 		struct i915_vma *scratch[2];
1078 	} client[2] = {};
1079 	struct intel_engine_cs *engine;
1080 	enum intel_engine_id id;
1081 	int i, err = 0;
1082 
1083 	/*
1084 	 * Check that a write into a whitelist register works, but
1085 	 * invisible to a second context.
1086 	 */
1087 
1088 	if (!intel_engines_has_context_isolation(gt->i915))
1089 		return 0;
1090 
1091 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1092 		client[i].scratch[0] =
1093 			__vm_create_scratch_for_read_pinned(gt->vm, 4096);
1094 		if (IS_ERR(client[i].scratch[0])) {
1095 			err = PTR_ERR(client[i].scratch[0]);
1096 			goto err;
1097 		}
1098 
1099 		client[i].scratch[1] =
1100 			__vm_create_scratch_for_read_pinned(gt->vm, 4096);
1101 		if (IS_ERR(client[i].scratch[1])) {
1102 			err = PTR_ERR(client[i].scratch[1]);
1103 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1104 			goto err;
1105 		}
1106 	}
1107 
1108 	for_each_engine(engine, gt, id) {
1109 		struct intel_context *ce[2];
1110 
1111 		if (!engine->kernel_context->vm)
1112 			continue;
1113 
1114 		if (!whitelist_writable_count(engine))
1115 			continue;
1116 
1117 		ce[0] = intel_context_create(engine);
1118 		if (IS_ERR(ce[0])) {
1119 			err = PTR_ERR(ce[0]);
1120 			break;
1121 		}
1122 		ce[1] = intel_context_create(engine);
1123 		if (IS_ERR(ce[1])) {
1124 			err = PTR_ERR(ce[1]);
1125 			intel_context_put(ce[0]);
1126 			break;
1127 		}
1128 
1129 		/* Read default values */
1130 		err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
1131 		if (err)
1132 			goto err_ce;
1133 
1134 		/* Try to overwrite registers (should only affect ctx0) */
1135 		err = scrub_whitelisted_registers(ce[0]);
1136 		if (err)
1137 			goto err_ce;
1138 
1139 		/* Read values from ctx1, we expect these to be defaults */
1140 		err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
1141 		if (err)
1142 			goto err_ce;
1143 
1144 		/* Verify that both reads return the same default values */
1145 		err = check_whitelisted_registers(engine,
1146 						  client[0].scratch[0],
1147 						  client[1].scratch[0],
1148 						  result_eq);
1149 		if (err)
1150 			goto err_ce;
1151 
1152 		/* Read back the updated values in ctx0 */
1153 		err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
1154 		if (err)
1155 			goto err_ce;
1156 
1157 		/* User should be granted privilege to overwhite regs */
1158 		err = check_whitelisted_registers(engine,
1159 						  client[0].scratch[0],
1160 						  client[0].scratch[1],
1161 						  result_neq);
1162 err_ce:
1163 		intel_context_put(ce[1]);
1164 		intel_context_put(ce[0]);
1165 		if (err)
1166 			break;
1167 	}
1168 
1169 err:
1170 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1171 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1172 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1173 	}
1174 
1175 	if (igt_flush_test(gt->i915))
1176 		err = -EIO;
1177 
1178 	return err;
1179 }
1180 
1181 static bool
1182 verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
1183 		const char *str)
1184 {
1185 	struct intel_engine_cs *engine;
1186 	enum intel_engine_id id;
1187 	bool ok = true;
1188 
1189 	ok &= wa_list_verify(gt, &lists->gt_wa_list, str);
1190 
1191 	for_each_engine(engine, gt, id) {
1192 		struct intel_context *ce;
1193 
1194 		ce = intel_context_create(engine);
1195 		if (IS_ERR(ce))
1196 			return false;
1197 
1198 		ok &= engine_wa_list_verify(ce,
1199 					    &lists->engine[id].wa_list,
1200 					    str) == 0;
1201 
1202 		ok &= engine_wa_list_verify(ce,
1203 					    &lists->engine[id].ctx_wa_list,
1204 					    str) == 0;
1205 
1206 		intel_context_put(ce);
1207 	}
1208 
1209 	return ok;
1210 }
1211 
1212 static int
1213 live_gpu_reset_workarounds(void *arg)
1214 {
1215 	struct intel_gt *gt = arg;
1216 	intel_wakeref_t wakeref;
1217 	struct wa_lists *lists;
1218 	bool ok;
1219 
1220 	if (!intel_has_gpu_reset(gt))
1221 		return 0;
1222 
1223 	lists = kzalloc(sizeof(*lists), GFP_KERNEL);
1224 	if (!lists)
1225 		return -ENOMEM;
1226 
1227 	pr_info("Verifying after GPU reset...\n");
1228 
1229 	igt_global_reset_lock(gt);
1230 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1231 
1232 	reference_lists_init(gt, lists);
1233 
1234 	ok = verify_wa_lists(gt, lists, "before reset");
1235 	if (!ok)
1236 		goto out;
1237 
1238 	intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1239 
1240 	ok = verify_wa_lists(gt, lists, "after reset");
1241 
1242 out:
1243 	reference_lists_fini(gt, lists);
1244 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1245 	igt_global_reset_unlock(gt);
1246 	kfree(lists);
1247 
1248 	return ok ? 0 : -ESRCH;
1249 }
1250 
1251 static int
1252 live_engine_reset_workarounds(void *arg)
1253 {
1254 	struct intel_gt *gt = arg;
1255 	struct intel_engine_cs *engine;
1256 	enum intel_engine_id id;
1257 	struct intel_context *ce;
1258 	struct igt_spinner spin;
1259 	struct i915_request *rq;
1260 	intel_wakeref_t wakeref;
1261 	struct wa_lists *lists;
1262 	int ret = 0;
1263 
1264 	if (!intel_has_reset_engine(gt))
1265 		return 0;
1266 
1267 	lists = kzalloc(sizeof(*lists), GFP_KERNEL);
1268 	if (!lists)
1269 		return -ENOMEM;
1270 
1271 	igt_global_reset_lock(gt);
1272 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1273 
1274 	reference_lists_init(gt, lists);
1275 
1276 	for_each_engine(engine, gt, id) {
1277 		struct intel_selftest_saved_policy saved;
1278 		bool using_guc = intel_engine_uses_guc(engine);
1279 		bool ok;
1280 		int ret2;
1281 
1282 		pr_info("Verifying after %s reset...\n", engine->name);
1283 		ret = intel_selftest_modify_policy(engine, &saved,
1284 						   SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
1285 		if (ret)
1286 			break;
1287 
1288 		ce = intel_context_create(engine);
1289 		if (IS_ERR(ce)) {
1290 			ret = PTR_ERR(ce);
1291 			goto restore;
1292 		}
1293 
1294 		if (!using_guc) {
1295 			ok = verify_wa_lists(gt, lists, "before reset");
1296 			if (!ok) {
1297 				ret = -ESRCH;
1298 				goto err;
1299 			}
1300 
1301 			ret = intel_engine_reset(engine, "live_workarounds:idle");
1302 			if (ret) {
1303 				pr_err("%s: Reset failed while idle\n", engine->name);
1304 				goto err;
1305 			}
1306 
1307 			ok = verify_wa_lists(gt, lists, "after idle reset");
1308 			if (!ok) {
1309 				ret = -ESRCH;
1310 				goto err;
1311 			}
1312 		}
1313 
1314 		ret = igt_spinner_init(&spin, engine->gt);
1315 		if (ret)
1316 			goto err;
1317 
1318 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1319 		if (IS_ERR(rq)) {
1320 			ret = PTR_ERR(rq);
1321 			igt_spinner_fini(&spin);
1322 			goto err;
1323 		}
1324 
1325 		ret = request_add_spin(rq, &spin);
1326 		if (ret) {
1327 			pr_err("%s: Spinner failed to start\n", engine->name);
1328 			igt_spinner_fini(&spin);
1329 			goto err;
1330 		}
1331 
1332 		/* Ensure the spinner hasn't aborted */
1333 		if (i915_request_completed(rq)) {
1334 			ret = -ETIMEDOUT;
1335 			goto skip;
1336 		}
1337 
1338 		if (!using_guc) {
1339 			ret = intel_engine_reset(engine, "live_workarounds:active");
1340 			if (ret) {
1341 				pr_err("%s: Reset failed on an active spinner\n",
1342 				       engine->name);
1343 				igt_spinner_fini(&spin);
1344 				goto err;
1345 			}
1346 		}
1347 
1348 		/* Ensure the reset happens and kills the engine */
1349 		if (ret == 0)
1350 			ret = intel_selftest_wait_for_rq(rq);
1351 
1352 skip:
1353 		igt_spinner_end(&spin);
1354 		igt_spinner_fini(&spin);
1355 
1356 		ok = verify_wa_lists(gt, lists, "after busy reset");
1357 		if (!ok)
1358 			ret = -ESRCH;
1359 
1360 err:
1361 		intel_context_put(ce);
1362 
1363 restore:
1364 		ret2 = intel_selftest_restore_policy(engine, &saved);
1365 		if (ret == 0)
1366 			ret = ret2;
1367 		if (ret)
1368 			break;
1369 	}
1370 
1371 	reference_lists_fini(gt, lists);
1372 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1373 	igt_global_reset_unlock(gt);
1374 	kfree(lists);
1375 
1376 	igt_flush_test(gt->i915);
1377 
1378 	return ret;
1379 }
1380 
1381 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1382 {
1383 	static const struct i915_subtest tests[] = {
1384 		SUBTEST(live_dirty_whitelist),
1385 		SUBTEST(live_reset_whitelist),
1386 		SUBTEST(live_isolated_whitelist),
1387 		SUBTEST(live_gpu_reset_workarounds),
1388 		SUBTEST(live_engine_reset_workarounds),
1389 	};
1390 
1391 	if (intel_gt_is_wedged(to_gt(i915)))
1392 		return 0;
1393 
1394 	return intel_gt_live_subtests(tests, to_gt(i915));
1395 }
1396