1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
12 
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
17 
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
20 
21 static const struct wo_register {
22 	enum intel_platform platform;
23 	u32 reg;
24 } wo_registers[] = {
25 	{ INTEL_GEMINILAKE, 0x731c }
26 };
27 
28 struct wa_lists {
29 	struct i915_wa_list gt_wa_list;
30 	struct {
31 		struct i915_wa_list wa_list;
32 		struct i915_wa_list ctx_wa_list;
33 	} engine[I915_NUM_ENGINES];
34 };
35 
36 static int request_add_sync(struct i915_request *rq, int err)
37 {
38 	i915_request_get(rq);
39 	i915_request_add(rq);
40 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
41 		err = -EIO;
42 	i915_request_put(rq);
43 
44 	return err;
45 }
46 
47 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
48 {
49 	int err = 0;
50 
51 	i915_request_get(rq);
52 	i915_request_add(rq);
53 	if (spin && !igt_wait_for_spinner(spin, rq))
54 		err = -ETIMEDOUT;
55 	i915_request_put(rq);
56 
57 	return err;
58 }
59 
60 static void
61 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
62 {
63 	struct intel_engine_cs *engine;
64 	enum intel_engine_id id;
65 
66 	memset(lists, 0, sizeof(*lists));
67 
68 	wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
69 	gt_init_workarounds(gt->i915, &lists->gt_wa_list);
70 	wa_init_finish(&lists->gt_wa_list);
71 
72 	for_each_engine(engine, gt, id) {
73 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
74 
75 		wa_init_start(wal, "REF", engine->name);
76 		engine_init_workarounds(engine, wal);
77 		wa_init_finish(wal);
78 
79 		__intel_engine_init_ctx_wa(engine,
80 					   &lists->engine[id].ctx_wa_list,
81 					   "CTX_REF");
82 	}
83 }
84 
85 static void
86 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
87 {
88 	struct intel_engine_cs *engine;
89 	enum intel_engine_id id;
90 
91 	for_each_engine(engine, gt, id)
92 		intel_wa_list_free(&lists->engine[id].wa_list);
93 
94 	intel_wa_list_free(&lists->gt_wa_list);
95 }
96 
97 static struct drm_i915_gem_object *
98 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
99 {
100 	const u32 base = engine->mmio_base;
101 	struct drm_i915_gem_object *result;
102 	struct i915_request *rq;
103 	struct i915_vma *vma;
104 	u32 srm, *cs;
105 	int err;
106 	int i;
107 
108 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
109 	if (IS_ERR(result))
110 		return result;
111 
112 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
113 
114 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
115 	if (IS_ERR(cs)) {
116 		err = PTR_ERR(cs);
117 		goto err_obj;
118 	}
119 	memset(cs, 0xc5, PAGE_SIZE);
120 	i915_gem_object_flush_map(result);
121 	i915_gem_object_unpin_map(result);
122 
123 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
124 	if (IS_ERR(vma)) {
125 		err = PTR_ERR(vma);
126 		goto err_obj;
127 	}
128 
129 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
130 	if (err)
131 		goto err_obj;
132 
133 	rq = igt_request_alloc(ctx, engine);
134 	if (IS_ERR(rq)) {
135 		err = PTR_ERR(rq);
136 		goto err_pin;
137 	}
138 
139 	i915_vma_lock(vma);
140 	err = i915_request_await_object(rq, vma->obj, true);
141 	if (err == 0)
142 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
143 	i915_vma_unlock(vma);
144 	if (err)
145 		goto err_req;
146 
147 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
148 	if (INTEL_GEN(ctx->i915) >= 8)
149 		srm++;
150 
151 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
152 	if (IS_ERR(cs)) {
153 		err = PTR_ERR(cs);
154 		goto err_req;
155 	}
156 
157 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
158 		*cs++ = srm;
159 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
160 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
161 		*cs++ = 0;
162 	}
163 	intel_ring_advance(rq, cs);
164 
165 	i915_request_add(rq);
166 	i915_vma_unpin(vma);
167 
168 	return result;
169 
170 err_req:
171 	i915_request_add(rq);
172 err_pin:
173 	i915_vma_unpin(vma);
174 err_obj:
175 	i915_gem_object_put(result);
176 	return ERR_PTR(err);
177 }
178 
179 static u32
180 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
181 {
182 	i915_reg_t reg = i < engine->whitelist.count ?
183 			 engine->whitelist.list[i].reg :
184 			 RING_NOPID(engine->mmio_base);
185 
186 	return i915_mmio_reg_offset(reg);
187 }
188 
189 static void
190 print_results(const struct intel_engine_cs *engine, const u32 *results)
191 {
192 	unsigned int i;
193 
194 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
195 		u32 expected = get_whitelist_reg(engine, i);
196 		u32 actual = results[i];
197 
198 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
199 			i, expected, actual);
200 	}
201 }
202 
203 static int check_whitelist(struct i915_gem_context *ctx,
204 			   struct intel_engine_cs *engine)
205 {
206 	struct drm_i915_gem_object *results;
207 	struct intel_wedge_me wedge;
208 	u32 *vaddr;
209 	int err;
210 	int i;
211 
212 	results = read_nonprivs(ctx, engine);
213 	if (IS_ERR(results))
214 		return PTR_ERR(results);
215 
216 	err = 0;
217 	i915_gem_object_lock(results);
218 	intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
219 		err = i915_gem_object_set_to_cpu_domain(results, false);
220 	i915_gem_object_unlock(results);
221 	if (intel_gt_is_wedged(engine->gt))
222 		err = -EIO;
223 	if (err)
224 		goto out_put;
225 
226 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
227 	if (IS_ERR(vaddr)) {
228 		err = PTR_ERR(vaddr);
229 		goto out_put;
230 	}
231 
232 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
233 		u32 expected = get_whitelist_reg(engine, i);
234 		u32 actual = vaddr[i];
235 
236 		if (expected != actual) {
237 			print_results(engine, vaddr);
238 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
239 			       i, expected, actual);
240 
241 			err = -EINVAL;
242 			break;
243 		}
244 	}
245 
246 	i915_gem_object_unpin_map(results);
247 out_put:
248 	i915_gem_object_put(results);
249 	return err;
250 }
251 
252 static int do_device_reset(struct intel_engine_cs *engine)
253 {
254 	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
255 	return 0;
256 }
257 
258 static int do_engine_reset(struct intel_engine_cs *engine)
259 {
260 	return intel_engine_reset(engine, "live_workarounds");
261 }
262 
263 static int
264 switch_to_scratch_context(struct intel_engine_cs *engine,
265 			  struct igt_spinner *spin)
266 {
267 	struct intel_context *ce;
268 	struct i915_request *rq;
269 	int err = 0;
270 
271 	ce = intel_context_create(engine);
272 	if (IS_ERR(ce))
273 		return PTR_ERR(ce);
274 
275 	rq = igt_spinner_create_request(spin, ce, MI_NOOP);
276 	intel_context_put(ce);
277 
278 	if (IS_ERR(rq)) {
279 		spin = NULL;
280 		err = PTR_ERR(rq);
281 		goto err;
282 	}
283 
284 	err = request_add_spin(rq, spin);
285 err:
286 	if (err && spin)
287 		igt_spinner_end(spin);
288 
289 	return err;
290 }
291 
292 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
293 					int (*reset)(struct intel_engine_cs *),
294 					const char *name)
295 {
296 	struct drm_i915_private *i915 = engine->i915;
297 	struct i915_gem_context *ctx, *tmp;
298 	struct igt_spinner spin;
299 	intel_wakeref_t wakeref;
300 	int err;
301 
302 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
303 		engine->whitelist.count, engine->name, name);
304 
305 	ctx = kernel_context(i915);
306 	if (IS_ERR(ctx))
307 		return PTR_ERR(ctx);
308 
309 	err = igt_spinner_init(&spin, engine->gt);
310 	if (err)
311 		goto out_ctx;
312 
313 	err = check_whitelist(ctx, engine);
314 	if (err) {
315 		pr_err("Invalid whitelist *before* %s reset!\n", name);
316 		goto out_spin;
317 	}
318 
319 	err = switch_to_scratch_context(engine, &spin);
320 	if (err)
321 		goto out_spin;
322 
323 	with_intel_runtime_pm(engine->uncore->rpm, wakeref)
324 		err = reset(engine);
325 
326 	igt_spinner_end(&spin);
327 
328 	if (err) {
329 		pr_err("%s reset failed\n", name);
330 		goto out_spin;
331 	}
332 
333 	err = check_whitelist(ctx, engine);
334 	if (err) {
335 		pr_err("Whitelist not preserved in context across %s reset!\n",
336 		       name);
337 		goto out_spin;
338 	}
339 
340 	tmp = kernel_context(i915);
341 	if (IS_ERR(tmp)) {
342 		err = PTR_ERR(tmp);
343 		goto out_spin;
344 	}
345 	kernel_context_close(ctx);
346 	ctx = tmp;
347 
348 	err = check_whitelist(ctx, engine);
349 	if (err) {
350 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
351 		       name);
352 		goto out_spin;
353 	}
354 
355 out_spin:
356 	igt_spinner_fini(&spin);
357 out_ctx:
358 	kernel_context_close(ctx);
359 	return err;
360 }
361 
362 static struct i915_vma *create_batch(struct i915_address_space *vm)
363 {
364 	struct drm_i915_gem_object *obj;
365 	struct i915_vma *vma;
366 	int err;
367 
368 	obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
369 	if (IS_ERR(obj))
370 		return ERR_CAST(obj);
371 
372 	vma = i915_vma_instance(obj, vm, NULL);
373 	if (IS_ERR(vma)) {
374 		err = PTR_ERR(vma);
375 		goto err_obj;
376 	}
377 
378 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
379 	if (err)
380 		goto err_obj;
381 
382 	return vma;
383 
384 err_obj:
385 	i915_gem_object_put(obj);
386 	return ERR_PTR(err);
387 }
388 
389 static u32 reg_write(u32 old, u32 new, u32 rsvd)
390 {
391 	if (rsvd == 0x0000ffff) {
392 		old &= ~(new >> 16);
393 		old |= new & (new >> 16);
394 	} else {
395 		old &= ~rsvd;
396 		old |= new & rsvd;
397 	}
398 
399 	return old;
400 }
401 
402 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
403 {
404 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
405 	int i;
406 
407 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
408 	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
409 		return true;
410 
411 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
412 		if (wo_registers[i].platform == platform &&
413 		    wo_registers[i].reg == reg)
414 			return true;
415 	}
416 
417 	return false;
418 }
419 
420 static bool ro_register(u32 reg)
421 {
422 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
423 	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
424 		return true;
425 
426 	return false;
427 }
428 
429 static int whitelist_writable_count(struct intel_engine_cs *engine)
430 {
431 	int count = engine->whitelist.count;
432 	int i;
433 
434 	for (i = 0; i < engine->whitelist.count; i++) {
435 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
436 
437 		if (ro_register(reg))
438 			count--;
439 	}
440 
441 	return count;
442 }
443 
444 static int check_dirty_whitelist(struct intel_context *ce)
445 {
446 	const u32 values[] = {
447 		0x00000000,
448 		0x01010101,
449 		0x10100101,
450 		0x03030303,
451 		0x30300303,
452 		0x05050505,
453 		0x50500505,
454 		0x0f0f0f0f,
455 		0xf00ff00f,
456 		0x10101010,
457 		0xf0f01010,
458 		0x30303030,
459 		0xa0a03030,
460 		0x50505050,
461 		0xc0c05050,
462 		0xf0f0f0f0,
463 		0x11111111,
464 		0x33333333,
465 		0x55555555,
466 		0x0000ffff,
467 		0x00ff00ff,
468 		0xff0000ff,
469 		0xffff00ff,
470 		0xffffffff,
471 	};
472 	struct intel_engine_cs *engine = ce->engine;
473 	struct i915_vma *scratch;
474 	struct i915_vma *batch;
475 	int err = 0, i, v;
476 	u32 *cs, *results;
477 
478 	scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
479 	if (IS_ERR(scratch))
480 		return PTR_ERR(scratch);
481 
482 	batch = create_batch(ce->vm);
483 	if (IS_ERR(batch)) {
484 		err = PTR_ERR(batch);
485 		goto out_scratch;
486 	}
487 
488 	for (i = 0; i < engine->whitelist.count; i++) {
489 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
490 		u64 addr = scratch->node.start;
491 		struct i915_request *rq;
492 		u32 srm, lrm, rsvd;
493 		u32 expect;
494 		int idx;
495 		bool ro_reg;
496 
497 		if (wo_register(engine, reg))
498 			continue;
499 
500 		ro_reg = ro_register(reg);
501 
502 		/* Clear non priv flags */
503 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
504 
505 		srm = MI_STORE_REGISTER_MEM;
506 		lrm = MI_LOAD_REGISTER_MEM;
507 		if (INTEL_GEN(engine->i915) >= 8)
508 			lrm++, srm++;
509 
510 		pr_debug("%s: Writing garbage to %x\n",
511 			 engine->name, reg);
512 
513 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
514 		if (IS_ERR(cs)) {
515 			err = PTR_ERR(cs);
516 			goto out_batch;
517 		}
518 
519 		/* SRM original */
520 		*cs++ = srm;
521 		*cs++ = reg;
522 		*cs++ = lower_32_bits(addr);
523 		*cs++ = upper_32_bits(addr);
524 
525 		idx = 1;
526 		for (v = 0; v < ARRAY_SIZE(values); v++) {
527 			/* LRI garbage */
528 			*cs++ = MI_LOAD_REGISTER_IMM(1);
529 			*cs++ = reg;
530 			*cs++ = values[v];
531 
532 			/* SRM result */
533 			*cs++ = srm;
534 			*cs++ = reg;
535 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
536 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
537 			idx++;
538 		}
539 		for (v = 0; v < ARRAY_SIZE(values); v++) {
540 			/* LRI garbage */
541 			*cs++ = MI_LOAD_REGISTER_IMM(1);
542 			*cs++ = reg;
543 			*cs++ = ~values[v];
544 
545 			/* SRM result */
546 			*cs++ = srm;
547 			*cs++ = reg;
548 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
549 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
550 			idx++;
551 		}
552 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
553 
554 		/* LRM original -- don't leave garbage in the context! */
555 		*cs++ = lrm;
556 		*cs++ = reg;
557 		*cs++ = lower_32_bits(addr);
558 		*cs++ = upper_32_bits(addr);
559 
560 		*cs++ = MI_BATCH_BUFFER_END;
561 
562 		i915_gem_object_flush_map(batch->obj);
563 		i915_gem_object_unpin_map(batch->obj);
564 		intel_gt_chipset_flush(engine->gt);
565 
566 		rq = intel_context_create_request(ce);
567 		if (IS_ERR(rq)) {
568 			err = PTR_ERR(rq);
569 			goto out_batch;
570 		}
571 
572 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
573 			err = engine->emit_init_breadcrumb(rq);
574 			if (err)
575 				goto err_request;
576 		}
577 
578 		i915_vma_lock(batch);
579 		err = i915_request_await_object(rq, batch->obj, false);
580 		if (err == 0)
581 			err = i915_vma_move_to_active(batch, rq, 0);
582 		i915_vma_unlock(batch);
583 		if (err)
584 			goto err_request;
585 
586 		i915_vma_lock(scratch);
587 		err = i915_request_await_object(rq, scratch->obj, true);
588 		if (err == 0)
589 			err = i915_vma_move_to_active(scratch, rq,
590 						      EXEC_OBJECT_WRITE);
591 		i915_vma_unlock(scratch);
592 		if (err)
593 			goto err_request;
594 
595 		err = engine->emit_bb_start(rq,
596 					    batch->node.start, PAGE_SIZE,
597 					    0);
598 		if (err)
599 			goto err_request;
600 
601 err_request:
602 		err = request_add_sync(rq, err);
603 		if (err) {
604 			pr_err("%s: Futzing %x timedout; cancelling test\n",
605 			       engine->name, reg);
606 			intel_gt_set_wedged(engine->gt);
607 			goto out_batch;
608 		}
609 
610 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
611 		if (IS_ERR(results)) {
612 			err = PTR_ERR(results);
613 			goto out_batch;
614 		}
615 
616 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
617 		if (!ro_reg) {
618 			/* detect write masking */
619 			rsvd = results[ARRAY_SIZE(values)];
620 			if (!rsvd) {
621 				pr_err("%s: Unable to write to whitelisted register %x\n",
622 				       engine->name, reg);
623 				err = -EINVAL;
624 				goto out_unpin;
625 			}
626 		}
627 
628 		expect = results[0];
629 		idx = 1;
630 		for (v = 0; v < ARRAY_SIZE(values); v++) {
631 			if (ro_reg)
632 				expect = results[0];
633 			else
634 				expect = reg_write(expect, values[v], rsvd);
635 
636 			if (results[idx] != expect)
637 				err++;
638 			idx++;
639 		}
640 		for (v = 0; v < ARRAY_SIZE(values); v++) {
641 			if (ro_reg)
642 				expect = results[0];
643 			else
644 				expect = reg_write(expect, ~values[v], rsvd);
645 
646 			if (results[idx] != expect)
647 				err++;
648 			idx++;
649 		}
650 		if (err) {
651 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
652 			       engine->name, err, reg);
653 
654 			if (ro_reg)
655 				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
656 					engine->name, reg, results[0]);
657 			else
658 				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
659 					engine->name, reg, results[0], rsvd);
660 
661 			expect = results[0];
662 			idx = 1;
663 			for (v = 0; v < ARRAY_SIZE(values); v++) {
664 				u32 w = values[v];
665 
666 				if (ro_reg)
667 					expect = results[0];
668 				else
669 					expect = reg_write(expect, w, rsvd);
670 				pr_info("Wrote %08x, read %08x, expect %08x\n",
671 					w, results[idx], expect);
672 				idx++;
673 			}
674 			for (v = 0; v < ARRAY_SIZE(values); v++) {
675 				u32 w = ~values[v];
676 
677 				if (ro_reg)
678 					expect = results[0];
679 				else
680 					expect = reg_write(expect, w, rsvd);
681 				pr_info("Wrote %08x, read %08x, expect %08x\n",
682 					w, results[idx], expect);
683 				idx++;
684 			}
685 
686 			err = -EINVAL;
687 		}
688 out_unpin:
689 		i915_gem_object_unpin_map(scratch->obj);
690 		if (err)
691 			break;
692 	}
693 
694 	if (igt_flush_test(engine->i915))
695 		err = -EIO;
696 out_batch:
697 	i915_vma_unpin_and_release(&batch, 0);
698 out_scratch:
699 	i915_vma_unpin_and_release(&scratch, 0);
700 	return err;
701 }
702 
703 static int live_dirty_whitelist(void *arg)
704 {
705 	struct intel_gt *gt = arg;
706 	struct intel_engine_cs *engine;
707 	enum intel_engine_id id;
708 
709 	/* Can the user write to the whitelisted registers? */
710 
711 	if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
712 		return 0;
713 
714 	for_each_engine(engine, gt, id) {
715 		struct intel_context *ce;
716 		int err;
717 
718 		if (engine->whitelist.count == 0)
719 			continue;
720 
721 		ce = intel_context_create(engine);
722 		if (IS_ERR(ce))
723 			return PTR_ERR(ce);
724 
725 		err = check_dirty_whitelist(ce);
726 		intel_context_put(ce);
727 		if (err)
728 			return err;
729 	}
730 
731 	return 0;
732 }
733 
734 static int live_reset_whitelist(void *arg)
735 {
736 	struct intel_gt *gt = arg;
737 	struct intel_engine_cs *engine;
738 	enum intel_engine_id id;
739 	int err = 0;
740 
741 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
742 	igt_global_reset_lock(gt);
743 
744 	for_each_engine(engine, gt, id) {
745 		if (engine->whitelist.count == 0)
746 			continue;
747 
748 		if (intel_has_reset_engine(gt)) {
749 			err = check_whitelist_across_reset(engine,
750 							   do_engine_reset,
751 							   "engine");
752 			if (err)
753 				goto out;
754 		}
755 
756 		if (intel_has_gpu_reset(gt)) {
757 			err = check_whitelist_across_reset(engine,
758 							   do_device_reset,
759 							   "device");
760 			if (err)
761 				goto out;
762 		}
763 	}
764 
765 out:
766 	igt_global_reset_unlock(gt);
767 	return err;
768 }
769 
770 static int read_whitelisted_registers(struct i915_gem_context *ctx,
771 				      struct intel_engine_cs *engine,
772 				      struct i915_vma *results)
773 {
774 	struct i915_request *rq;
775 	int i, err = 0;
776 	u32 srm, *cs;
777 
778 	rq = igt_request_alloc(ctx, engine);
779 	if (IS_ERR(rq))
780 		return PTR_ERR(rq);
781 
782 	i915_vma_lock(results);
783 	err = i915_request_await_object(rq, results->obj, true);
784 	if (err == 0)
785 		err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
786 	i915_vma_unlock(results);
787 	if (err)
788 		goto err_req;
789 
790 	srm = MI_STORE_REGISTER_MEM;
791 	if (INTEL_GEN(ctx->i915) >= 8)
792 		srm++;
793 
794 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
795 	if (IS_ERR(cs)) {
796 		err = PTR_ERR(cs);
797 		goto err_req;
798 	}
799 
800 	for (i = 0; i < engine->whitelist.count; i++) {
801 		u64 offset = results->node.start + sizeof(u32) * i;
802 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
803 
804 		/* Clear non priv flags */
805 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
806 
807 		*cs++ = srm;
808 		*cs++ = reg;
809 		*cs++ = lower_32_bits(offset);
810 		*cs++ = upper_32_bits(offset);
811 	}
812 	intel_ring_advance(rq, cs);
813 
814 err_req:
815 	return request_add_sync(rq, err);
816 }
817 
818 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
819 				       struct intel_engine_cs *engine)
820 {
821 	struct i915_address_space *vm;
822 	struct i915_request *rq;
823 	struct i915_vma *batch;
824 	int i, err = 0;
825 	u32 *cs;
826 
827 	vm = i915_gem_context_get_vm_rcu(ctx);
828 	batch = create_batch(vm);
829 	i915_vm_put(vm);
830 	if (IS_ERR(batch))
831 		return PTR_ERR(batch);
832 
833 	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
834 	if (IS_ERR(cs)) {
835 		err = PTR_ERR(cs);
836 		goto err_batch;
837 	}
838 
839 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
840 	for (i = 0; i < engine->whitelist.count; i++) {
841 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
842 
843 		if (ro_register(reg))
844 			continue;
845 
846 		/* Clear non priv flags */
847 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
848 
849 		*cs++ = reg;
850 		*cs++ = 0xffffffff;
851 	}
852 	*cs++ = MI_BATCH_BUFFER_END;
853 
854 	i915_gem_object_flush_map(batch->obj);
855 	intel_gt_chipset_flush(engine->gt);
856 
857 	rq = igt_request_alloc(ctx, engine);
858 	if (IS_ERR(rq)) {
859 		err = PTR_ERR(rq);
860 		goto err_unpin;
861 	}
862 
863 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
864 		err = engine->emit_init_breadcrumb(rq);
865 		if (err)
866 			goto err_request;
867 	}
868 
869 	i915_vma_lock(batch);
870 	err = i915_request_await_object(rq, batch->obj, false);
871 	if (err == 0)
872 		err = i915_vma_move_to_active(batch, rq, 0);
873 	i915_vma_unlock(batch);
874 	if (err)
875 		goto err_request;
876 
877 	/* Perform the writes from an unprivileged "user" batch */
878 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
879 
880 err_request:
881 	err = request_add_sync(rq, err);
882 
883 err_unpin:
884 	i915_gem_object_unpin_map(batch->obj);
885 err_batch:
886 	i915_vma_unpin_and_release(&batch, 0);
887 	return err;
888 }
889 
890 struct regmask {
891 	i915_reg_t reg;
892 	unsigned long gen_mask;
893 };
894 
895 static bool find_reg(struct drm_i915_private *i915,
896 		     i915_reg_t reg,
897 		     const struct regmask *tbl,
898 		     unsigned long count)
899 {
900 	u32 offset = i915_mmio_reg_offset(reg);
901 
902 	while (count--) {
903 		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
904 		    i915_mmio_reg_offset(tbl->reg) == offset)
905 			return true;
906 		tbl++;
907 	}
908 
909 	return false;
910 }
911 
912 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
913 {
914 	/* Alas, we must pardon some whitelists. Mistakes already made */
915 	static const struct regmask pardon[] = {
916 		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
917 		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
918 	};
919 
920 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
921 }
922 
923 static bool result_eq(struct intel_engine_cs *engine,
924 		      u32 a, u32 b, i915_reg_t reg)
925 {
926 	if (a != b && !pardon_reg(engine->i915, reg)) {
927 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
928 		       i915_mmio_reg_offset(reg), a, b);
929 		return false;
930 	}
931 
932 	return true;
933 }
934 
935 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
936 {
937 	/* Some registers do not seem to behave and our writes unreadable */
938 	static const struct regmask wo[] = {
939 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
940 	};
941 
942 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
943 }
944 
945 static bool result_neq(struct intel_engine_cs *engine,
946 		       u32 a, u32 b, i915_reg_t reg)
947 {
948 	if (a == b && !writeonly_reg(engine->i915, reg)) {
949 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
950 		       i915_mmio_reg_offset(reg), a);
951 		return false;
952 	}
953 
954 	return true;
955 }
956 
957 static int
958 check_whitelisted_registers(struct intel_engine_cs *engine,
959 			    struct i915_vma *A,
960 			    struct i915_vma *B,
961 			    bool (*fn)(struct intel_engine_cs *engine,
962 				       u32 a, u32 b,
963 				       i915_reg_t reg))
964 {
965 	u32 *a, *b;
966 	int i, err;
967 
968 	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
969 	if (IS_ERR(a))
970 		return PTR_ERR(a);
971 
972 	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
973 	if (IS_ERR(b)) {
974 		err = PTR_ERR(b);
975 		goto err_a;
976 	}
977 
978 	err = 0;
979 	for (i = 0; i < engine->whitelist.count; i++) {
980 		const struct i915_wa *wa = &engine->whitelist.list[i];
981 
982 		if (i915_mmio_reg_offset(wa->reg) &
983 		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
984 			continue;
985 
986 		if (!fn(engine, a[i], b[i], wa->reg))
987 			err = -EINVAL;
988 	}
989 
990 	i915_gem_object_unpin_map(B->obj);
991 err_a:
992 	i915_gem_object_unpin_map(A->obj);
993 	return err;
994 }
995 
996 static int live_isolated_whitelist(void *arg)
997 {
998 	struct intel_gt *gt = arg;
999 	struct {
1000 		struct i915_gem_context *ctx;
1001 		struct i915_vma *scratch[2];
1002 	} client[2] = {};
1003 	struct intel_engine_cs *engine;
1004 	enum intel_engine_id id;
1005 	int i, err = 0;
1006 
1007 	/*
1008 	 * Check that a write into a whitelist register works, but
1009 	 * invisible to a second context.
1010 	 */
1011 
1012 	if (!intel_engines_has_context_isolation(gt->i915))
1013 		return 0;
1014 
1015 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1016 		struct i915_address_space *vm;
1017 		struct i915_gem_context *c;
1018 
1019 		c = kernel_context(gt->i915);
1020 		if (IS_ERR(c)) {
1021 			err = PTR_ERR(c);
1022 			goto err;
1023 		}
1024 
1025 		vm = i915_gem_context_get_vm_rcu(c);
1026 
1027 		client[i].scratch[0] = create_scratch(vm, 1024);
1028 		if (IS_ERR(client[i].scratch[0])) {
1029 			err = PTR_ERR(client[i].scratch[0]);
1030 			i915_vm_put(vm);
1031 			kernel_context_close(c);
1032 			goto err;
1033 		}
1034 
1035 		client[i].scratch[1] = create_scratch(vm, 1024);
1036 		if (IS_ERR(client[i].scratch[1])) {
1037 			err = PTR_ERR(client[i].scratch[1]);
1038 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1039 			i915_vm_put(vm);
1040 			kernel_context_close(c);
1041 			goto err;
1042 		}
1043 
1044 		client[i].ctx = c;
1045 		i915_vm_put(vm);
1046 	}
1047 
1048 	for_each_engine(engine, gt, id) {
1049 		if (!engine->kernel_context->vm)
1050 			continue;
1051 
1052 		if (!whitelist_writable_count(engine))
1053 			continue;
1054 
1055 		/* Read default values */
1056 		err = read_whitelisted_registers(client[0].ctx, engine,
1057 						 client[0].scratch[0]);
1058 		if (err)
1059 			goto err;
1060 
1061 		/* Try to overwrite registers (should only affect ctx0) */
1062 		err = scrub_whitelisted_registers(client[0].ctx, engine);
1063 		if (err)
1064 			goto err;
1065 
1066 		/* Read values from ctx1, we expect these to be defaults */
1067 		err = read_whitelisted_registers(client[1].ctx, engine,
1068 						 client[1].scratch[0]);
1069 		if (err)
1070 			goto err;
1071 
1072 		/* Verify that both reads return the same default values */
1073 		err = check_whitelisted_registers(engine,
1074 						  client[0].scratch[0],
1075 						  client[1].scratch[0],
1076 						  result_eq);
1077 		if (err)
1078 			goto err;
1079 
1080 		/* Read back the updated values in ctx0 */
1081 		err = read_whitelisted_registers(client[0].ctx, engine,
1082 						 client[0].scratch[1]);
1083 		if (err)
1084 			goto err;
1085 
1086 		/* User should be granted privilege to overwhite regs */
1087 		err = check_whitelisted_registers(engine,
1088 						  client[0].scratch[0],
1089 						  client[0].scratch[1],
1090 						  result_neq);
1091 		if (err)
1092 			goto err;
1093 	}
1094 
1095 err:
1096 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1097 		if (!client[i].ctx)
1098 			break;
1099 
1100 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1101 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1102 		kernel_context_close(client[i].ctx);
1103 	}
1104 
1105 	if (igt_flush_test(gt->i915))
1106 		err = -EIO;
1107 
1108 	return err;
1109 }
1110 
1111 static bool
1112 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1113 		const char *str)
1114 {
1115 	struct drm_i915_private *i915 = ctx->i915;
1116 	struct i915_gem_engines_iter it;
1117 	struct intel_context *ce;
1118 	bool ok = true;
1119 
1120 	ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1121 
1122 	for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
1123 		enum intel_engine_id id = ce->engine->id;
1124 
1125 		ok &= engine_wa_list_verify(ce,
1126 					    &lists->engine[id].wa_list,
1127 					    str) == 0;
1128 
1129 		ok &= engine_wa_list_verify(ce,
1130 					    &lists->engine[id].ctx_wa_list,
1131 					    str) == 0;
1132 	}
1133 
1134 	return ok;
1135 }
1136 
1137 static int
1138 live_gpu_reset_workarounds(void *arg)
1139 {
1140 	struct intel_gt *gt = arg;
1141 	struct i915_gem_context *ctx;
1142 	intel_wakeref_t wakeref;
1143 	struct wa_lists lists;
1144 	bool ok;
1145 
1146 	if (!intel_has_gpu_reset(gt))
1147 		return 0;
1148 
1149 	ctx = kernel_context(gt->i915);
1150 	if (IS_ERR(ctx))
1151 		return PTR_ERR(ctx);
1152 
1153 	i915_gem_context_lock_engines(ctx);
1154 
1155 	pr_info("Verifying after GPU reset...\n");
1156 
1157 	igt_global_reset_lock(gt);
1158 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1159 
1160 	reference_lists_init(gt, &lists);
1161 
1162 	ok = verify_wa_lists(ctx, &lists, "before reset");
1163 	if (!ok)
1164 		goto out;
1165 
1166 	intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1167 
1168 	ok = verify_wa_lists(ctx, &lists, "after reset");
1169 
1170 out:
1171 	i915_gem_context_unlock_engines(ctx);
1172 	kernel_context_close(ctx);
1173 	reference_lists_fini(gt, &lists);
1174 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1175 	igt_global_reset_unlock(gt);
1176 
1177 	return ok ? 0 : -ESRCH;
1178 }
1179 
1180 static int
1181 live_engine_reset_workarounds(void *arg)
1182 {
1183 	struct intel_gt *gt = arg;
1184 	struct i915_gem_engines_iter it;
1185 	struct i915_gem_context *ctx;
1186 	struct intel_context *ce;
1187 	struct igt_spinner spin;
1188 	struct i915_request *rq;
1189 	intel_wakeref_t wakeref;
1190 	struct wa_lists lists;
1191 	int ret = 0;
1192 
1193 	if (!intel_has_reset_engine(gt))
1194 		return 0;
1195 
1196 	ctx = kernel_context(gt->i915);
1197 	if (IS_ERR(ctx))
1198 		return PTR_ERR(ctx);
1199 
1200 	igt_global_reset_lock(gt);
1201 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1202 
1203 	reference_lists_init(gt, &lists);
1204 
1205 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1206 		struct intel_engine_cs *engine = ce->engine;
1207 		bool ok;
1208 
1209 		pr_info("Verifying after %s reset...\n", engine->name);
1210 
1211 		ok = verify_wa_lists(ctx, &lists, "before reset");
1212 		if (!ok) {
1213 			ret = -ESRCH;
1214 			goto err;
1215 		}
1216 
1217 		intel_engine_reset(engine, "live_workarounds");
1218 
1219 		ok = verify_wa_lists(ctx, &lists, "after idle reset");
1220 		if (!ok) {
1221 			ret = -ESRCH;
1222 			goto err;
1223 		}
1224 
1225 		ret = igt_spinner_init(&spin, engine->gt);
1226 		if (ret)
1227 			goto err;
1228 
1229 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1230 		if (IS_ERR(rq)) {
1231 			ret = PTR_ERR(rq);
1232 			igt_spinner_fini(&spin);
1233 			goto err;
1234 		}
1235 
1236 		ret = request_add_spin(rq, &spin);
1237 		if (ret) {
1238 			pr_err("Spinner failed to start\n");
1239 			igt_spinner_fini(&spin);
1240 			goto err;
1241 		}
1242 
1243 		intel_engine_reset(engine, "live_workarounds");
1244 
1245 		igt_spinner_end(&spin);
1246 		igt_spinner_fini(&spin);
1247 
1248 		ok = verify_wa_lists(ctx, &lists, "after busy reset");
1249 		if (!ok) {
1250 			ret = -ESRCH;
1251 			goto err;
1252 		}
1253 	}
1254 err:
1255 	i915_gem_context_unlock_engines(ctx);
1256 	reference_lists_fini(gt, &lists);
1257 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1258 	igt_global_reset_unlock(gt);
1259 	kernel_context_close(ctx);
1260 
1261 	igt_flush_test(gt->i915);
1262 
1263 	return ret;
1264 }
1265 
1266 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1267 {
1268 	static const struct i915_subtest tests[] = {
1269 		SUBTEST(live_dirty_whitelist),
1270 		SUBTEST(live_reset_whitelist),
1271 		SUBTEST(live_isolated_whitelist),
1272 		SUBTEST(live_gpu_reset_workarounds),
1273 		SUBTEST(live_engine_reset_workarounds),
1274 	};
1275 
1276 	if (intel_gt_is_wedged(&i915->gt))
1277 		return 0;
1278 
1279 	return intel_gt_live_subtests(tests, &i915->gt);
1280 }
1281