1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2018 Intel Corporation
4  */
5 
6 #include "gem/i915_gem_pm.h"
7 #include "gt/intel_engine_user.h"
8 #include "gt/intel_gt.h"
9 #include "i915_selftest.h"
10 #include "intel_reset.h"
11 
12 #include "selftests/igt_flush_test.h"
13 #include "selftests/igt_reset.h"
14 #include "selftests/igt_spinner.h"
15 #include "selftests/mock_drm.h"
16 
17 #include "gem/selftests/igt_gem_utils.h"
18 #include "gem/selftests/mock_context.h"
19 
20 static const struct wo_register {
21 	enum intel_platform platform;
22 	u32 reg;
23 } wo_registers[] = {
24 	{ INTEL_GEMINILAKE, 0x731c }
25 };
26 
27 struct wa_lists {
28 	struct i915_wa_list gt_wa_list;
29 	struct {
30 		struct i915_wa_list wa_list;
31 		struct i915_wa_list ctx_wa_list;
32 	} engine[I915_NUM_ENGINES];
33 };
34 
35 static int request_add_sync(struct i915_request *rq, int err)
36 {
37 	i915_request_get(rq);
38 	i915_request_add(rq);
39 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
40 		err = -EIO;
41 	i915_request_put(rq);
42 
43 	return err;
44 }
45 
46 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
47 {
48 	int err = 0;
49 
50 	i915_request_get(rq);
51 	i915_request_add(rq);
52 	if (spin && !igt_wait_for_spinner(spin, rq))
53 		err = -ETIMEDOUT;
54 	i915_request_put(rq);
55 
56 	return err;
57 }
58 
59 static void
60 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
61 {
62 	struct intel_engine_cs *engine;
63 	enum intel_engine_id id;
64 
65 	memset(lists, 0, sizeof(*lists));
66 
67 	wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
68 	gt_init_workarounds(gt->i915, &lists->gt_wa_list);
69 	wa_init_finish(&lists->gt_wa_list);
70 
71 	for_each_engine(engine, gt, id) {
72 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
73 
74 		wa_init_start(wal, "REF", engine->name);
75 		engine_init_workarounds(engine, wal);
76 		wa_init_finish(wal);
77 
78 		__intel_engine_init_ctx_wa(engine,
79 					   &lists->engine[id].ctx_wa_list,
80 					   "CTX_REF");
81 	}
82 }
83 
84 static void
85 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
86 {
87 	struct intel_engine_cs *engine;
88 	enum intel_engine_id id;
89 
90 	for_each_engine(engine, gt, id)
91 		intel_wa_list_free(&lists->engine[id].wa_list);
92 
93 	intel_wa_list_free(&lists->gt_wa_list);
94 }
95 
96 static struct drm_i915_gem_object *
97 read_nonprivs(struct intel_context *ce)
98 {
99 	struct intel_engine_cs *engine = ce->engine;
100 	const u32 base = engine->mmio_base;
101 	struct drm_i915_gem_object *result;
102 	struct i915_request *rq;
103 	struct i915_vma *vma;
104 	u32 srm, *cs;
105 	int err;
106 	int i;
107 
108 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
109 	if (IS_ERR(result))
110 		return result;
111 
112 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
113 
114 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
115 	if (IS_ERR(cs)) {
116 		err = PTR_ERR(cs);
117 		goto err_obj;
118 	}
119 	memset(cs, 0xc5, PAGE_SIZE);
120 	i915_gem_object_flush_map(result);
121 	i915_gem_object_unpin_map(result);
122 
123 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
124 	if (IS_ERR(vma)) {
125 		err = PTR_ERR(vma);
126 		goto err_obj;
127 	}
128 
129 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
130 	if (err)
131 		goto err_obj;
132 
133 	rq = intel_context_create_request(ce);
134 	if (IS_ERR(rq)) {
135 		err = PTR_ERR(rq);
136 		goto err_pin;
137 	}
138 
139 	i915_vma_lock(vma);
140 	err = i915_request_await_object(rq, vma->obj, true);
141 	if (err == 0)
142 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
143 	i915_vma_unlock(vma);
144 	if (err)
145 		goto err_req;
146 
147 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
148 	if (INTEL_GEN(engine->i915) >= 8)
149 		srm++;
150 
151 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
152 	if (IS_ERR(cs)) {
153 		err = PTR_ERR(cs);
154 		goto err_req;
155 	}
156 
157 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
158 		*cs++ = srm;
159 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
160 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
161 		*cs++ = 0;
162 	}
163 	intel_ring_advance(rq, cs);
164 
165 	i915_request_add(rq);
166 	i915_vma_unpin(vma);
167 
168 	return result;
169 
170 err_req:
171 	i915_request_add(rq);
172 err_pin:
173 	i915_vma_unpin(vma);
174 err_obj:
175 	i915_gem_object_put(result);
176 	return ERR_PTR(err);
177 }
178 
179 static u32
180 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
181 {
182 	i915_reg_t reg = i < engine->whitelist.count ?
183 			 engine->whitelist.list[i].reg :
184 			 RING_NOPID(engine->mmio_base);
185 
186 	return i915_mmio_reg_offset(reg);
187 }
188 
189 static void
190 print_results(const struct intel_engine_cs *engine, const u32 *results)
191 {
192 	unsigned int i;
193 
194 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
195 		u32 expected = get_whitelist_reg(engine, i);
196 		u32 actual = results[i];
197 
198 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
199 			i, expected, actual);
200 	}
201 }
202 
203 static int check_whitelist(struct intel_context *ce)
204 {
205 	struct intel_engine_cs *engine = ce->engine;
206 	struct drm_i915_gem_object *results;
207 	struct intel_wedge_me wedge;
208 	u32 *vaddr;
209 	int err;
210 	int i;
211 
212 	results = read_nonprivs(ce);
213 	if (IS_ERR(results))
214 		return PTR_ERR(results);
215 
216 	err = 0;
217 	i915_gem_object_lock(results, NULL);
218 	intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
219 		err = i915_gem_object_set_to_cpu_domain(results, false);
220 	i915_gem_object_unlock(results);
221 	if (intel_gt_is_wedged(engine->gt))
222 		err = -EIO;
223 	if (err)
224 		goto out_put;
225 
226 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
227 	if (IS_ERR(vaddr)) {
228 		err = PTR_ERR(vaddr);
229 		goto out_put;
230 	}
231 
232 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
233 		u32 expected = get_whitelist_reg(engine, i);
234 		u32 actual = vaddr[i];
235 
236 		if (expected != actual) {
237 			print_results(engine, vaddr);
238 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
239 			       i, expected, actual);
240 
241 			err = -EINVAL;
242 			break;
243 		}
244 	}
245 
246 	i915_gem_object_unpin_map(results);
247 out_put:
248 	i915_gem_object_put(results);
249 	return err;
250 }
251 
252 static int do_device_reset(struct intel_engine_cs *engine)
253 {
254 	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
255 	return 0;
256 }
257 
258 static int do_engine_reset(struct intel_engine_cs *engine)
259 {
260 	return intel_engine_reset(engine, "live_workarounds");
261 }
262 
263 static int
264 switch_to_scratch_context(struct intel_engine_cs *engine,
265 			  struct igt_spinner *spin)
266 {
267 	struct intel_context *ce;
268 	struct i915_request *rq;
269 	int err = 0;
270 
271 	ce = intel_context_create(engine);
272 	if (IS_ERR(ce))
273 		return PTR_ERR(ce);
274 
275 	rq = igt_spinner_create_request(spin, ce, MI_NOOP);
276 	intel_context_put(ce);
277 
278 	if (IS_ERR(rq)) {
279 		spin = NULL;
280 		err = PTR_ERR(rq);
281 		goto err;
282 	}
283 
284 	err = request_add_spin(rq, spin);
285 err:
286 	if (err && spin)
287 		igt_spinner_end(spin);
288 
289 	return err;
290 }
291 
292 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
293 					int (*reset)(struct intel_engine_cs *),
294 					const char *name)
295 {
296 	struct intel_context *ce, *tmp;
297 	struct igt_spinner spin;
298 	intel_wakeref_t wakeref;
299 	int err;
300 
301 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
302 		engine->whitelist.count, engine->name, name);
303 
304 	ce = intel_context_create(engine);
305 	if (IS_ERR(ce))
306 		return PTR_ERR(ce);
307 
308 	err = igt_spinner_init(&spin, engine->gt);
309 	if (err)
310 		goto out_ctx;
311 
312 	err = check_whitelist(ce);
313 	if (err) {
314 		pr_err("Invalid whitelist *before* %s reset!\n", name);
315 		goto out_spin;
316 	}
317 
318 	err = switch_to_scratch_context(engine, &spin);
319 	if (err)
320 		goto out_spin;
321 
322 	with_intel_runtime_pm(engine->uncore->rpm, wakeref)
323 		err = reset(engine);
324 
325 	igt_spinner_end(&spin);
326 
327 	if (err) {
328 		pr_err("%s reset failed\n", name);
329 		goto out_spin;
330 	}
331 
332 	err = check_whitelist(ce);
333 	if (err) {
334 		pr_err("Whitelist not preserved in context across %s reset!\n",
335 		       name);
336 		goto out_spin;
337 	}
338 
339 	tmp = intel_context_create(engine);
340 	if (IS_ERR(tmp)) {
341 		err = PTR_ERR(tmp);
342 		goto out_spin;
343 	}
344 	intel_context_put(ce);
345 	ce = tmp;
346 
347 	err = check_whitelist(ce);
348 	if (err) {
349 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
350 		       name);
351 		goto out_spin;
352 	}
353 
354 out_spin:
355 	igt_spinner_fini(&spin);
356 out_ctx:
357 	intel_context_put(ce);
358 	return err;
359 }
360 
361 static struct i915_vma *create_batch(struct i915_address_space *vm)
362 {
363 	struct drm_i915_gem_object *obj;
364 	struct i915_vma *vma;
365 	int err;
366 
367 	obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
368 	if (IS_ERR(obj))
369 		return ERR_CAST(obj);
370 
371 	vma = i915_vma_instance(obj, vm, NULL);
372 	if (IS_ERR(vma)) {
373 		err = PTR_ERR(vma);
374 		goto err_obj;
375 	}
376 
377 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
378 	if (err)
379 		goto err_obj;
380 
381 	return vma;
382 
383 err_obj:
384 	i915_gem_object_put(obj);
385 	return ERR_PTR(err);
386 }
387 
388 static u32 reg_write(u32 old, u32 new, u32 rsvd)
389 {
390 	if (rsvd == 0x0000ffff) {
391 		old &= ~(new >> 16);
392 		old |= new & (new >> 16);
393 	} else {
394 		old &= ~rsvd;
395 		old |= new & rsvd;
396 	}
397 
398 	return old;
399 }
400 
401 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
402 {
403 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
404 	int i;
405 
406 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
407 	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
408 		return true;
409 
410 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
411 		if (wo_registers[i].platform == platform &&
412 		    wo_registers[i].reg == reg)
413 			return true;
414 	}
415 
416 	return false;
417 }
418 
419 static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
420 {
421 	reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
422 	switch (reg) {
423 	case 0x358:
424 	case 0x35c:
425 	case 0x3a8:
426 		return true;
427 
428 	default:
429 		return false;
430 	}
431 }
432 
433 static bool ro_register(u32 reg)
434 {
435 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
436 	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
437 		return true;
438 
439 	return false;
440 }
441 
442 static int whitelist_writable_count(struct intel_engine_cs *engine)
443 {
444 	int count = engine->whitelist.count;
445 	int i;
446 
447 	for (i = 0; i < engine->whitelist.count; i++) {
448 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
449 
450 		if (ro_register(reg))
451 			count--;
452 	}
453 
454 	return count;
455 }
456 
457 static int check_dirty_whitelist(struct intel_context *ce)
458 {
459 	const u32 values[] = {
460 		0x00000000,
461 		0x01010101,
462 		0x10100101,
463 		0x03030303,
464 		0x30300303,
465 		0x05050505,
466 		0x50500505,
467 		0x0f0f0f0f,
468 		0xf00ff00f,
469 		0x10101010,
470 		0xf0f01010,
471 		0x30303030,
472 		0xa0a03030,
473 		0x50505050,
474 		0xc0c05050,
475 		0xf0f0f0f0,
476 		0x11111111,
477 		0x33333333,
478 		0x55555555,
479 		0x0000ffff,
480 		0x00ff00ff,
481 		0xff0000ff,
482 		0xffff00ff,
483 		0xffffffff,
484 	};
485 	struct intel_engine_cs *engine = ce->engine;
486 	struct i915_vma *scratch;
487 	struct i915_vma *batch;
488 	int err = 0, i, v, sz;
489 	u32 *cs, *results;
490 
491 	sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
492 	scratch = __vm_create_scratch_for_read(ce->vm, sz);
493 	if (IS_ERR(scratch))
494 		return PTR_ERR(scratch);
495 
496 	batch = create_batch(ce->vm);
497 	if (IS_ERR(batch)) {
498 		err = PTR_ERR(batch);
499 		goto out_scratch;
500 	}
501 
502 	for (i = 0; i < engine->whitelist.count; i++) {
503 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
504 		u64 addr = scratch->node.start;
505 		struct i915_request *rq;
506 		u32 srm, lrm, rsvd;
507 		u32 expect;
508 		int idx;
509 		bool ro_reg;
510 
511 		if (wo_register(engine, reg))
512 			continue;
513 
514 		if (timestamp(engine, reg))
515 			continue; /* timestamps are expected to autoincrement */
516 
517 		ro_reg = ro_register(reg);
518 
519 		/* Clear non priv flags */
520 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
521 
522 		srm = MI_STORE_REGISTER_MEM;
523 		lrm = MI_LOAD_REGISTER_MEM;
524 		if (INTEL_GEN(engine->i915) >= 8)
525 			lrm++, srm++;
526 
527 		pr_debug("%s: Writing garbage to %x\n",
528 			 engine->name, reg);
529 
530 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
531 		if (IS_ERR(cs)) {
532 			err = PTR_ERR(cs);
533 			goto out_batch;
534 		}
535 
536 		/* SRM original */
537 		*cs++ = srm;
538 		*cs++ = reg;
539 		*cs++ = lower_32_bits(addr);
540 		*cs++ = upper_32_bits(addr);
541 
542 		idx = 1;
543 		for (v = 0; v < ARRAY_SIZE(values); v++) {
544 			/* LRI garbage */
545 			*cs++ = MI_LOAD_REGISTER_IMM(1);
546 			*cs++ = reg;
547 			*cs++ = values[v];
548 
549 			/* SRM result */
550 			*cs++ = srm;
551 			*cs++ = reg;
552 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
553 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
554 			idx++;
555 		}
556 		for (v = 0; v < ARRAY_SIZE(values); v++) {
557 			/* LRI garbage */
558 			*cs++ = MI_LOAD_REGISTER_IMM(1);
559 			*cs++ = reg;
560 			*cs++ = ~values[v];
561 
562 			/* SRM result */
563 			*cs++ = srm;
564 			*cs++ = reg;
565 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
566 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
567 			idx++;
568 		}
569 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
570 
571 		/* LRM original -- don't leave garbage in the context! */
572 		*cs++ = lrm;
573 		*cs++ = reg;
574 		*cs++ = lower_32_bits(addr);
575 		*cs++ = upper_32_bits(addr);
576 
577 		*cs++ = MI_BATCH_BUFFER_END;
578 
579 		i915_gem_object_flush_map(batch->obj);
580 		i915_gem_object_unpin_map(batch->obj);
581 		intel_gt_chipset_flush(engine->gt);
582 
583 		rq = intel_context_create_request(ce);
584 		if (IS_ERR(rq)) {
585 			err = PTR_ERR(rq);
586 			goto out_batch;
587 		}
588 
589 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
590 			err = engine->emit_init_breadcrumb(rq);
591 			if (err)
592 				goto err_request;
593 		}
594 
595 		i915_vma_lock(batch);
596 		err = i915_request_await_object(rq, batch->obj, false);
597 		if (err == 0)
598 			err = i915_vma_move_to_active(batch, rq, 0);
599 		i915_vma_unlock(batch);
600 		if (err)
601 			goto err_request;
602 
603 		i915_vma_lock(scratch);
604 		err = i915_request_await_object(rq, scratch->obj, true);
605 		if (err == 0)
606 			err = i915_vma_move_to_active(scratch, rq,
607 						      EXEC_OBJECT_WRITE);
608 		i915_vma_unlock(scratch);
609 		if (err)
610 			goto err_request;
611 
612 		err = engine->emit_bb_start(rq,
613 					    batch->node.start, PAGE_SIZE,
614 					    0);
615 		if (err)
616 			goto err_request;
617 
618 err_request:
619 		err = request_add_sync(rq, err);
620 		if (err) {
621 			pr_err("%s: Futzing %x timedout; cancelling test\n",
622 			       engine->name, reg);
623 			intel_gt_set_wedged(engine->gt);
624 			goto out_batch;
625 		}
626 
627 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
628 		if (IS_ERR(results)) {
629 			err = PTR_ERR(results);
630 			goto out_batch;
631 		}
632 
633 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
634 		if (!ro_reg) {
635 			/* detect write masking */
636 			rsvd = results[ARRAY_SIZE(values)];
637 			if (!rsvd) {
638 				pr_err("%s: Unable to write to whitelisted register %x\n",
639 				       engine->name, reg);
640 				err = -EINVAL;
641 				goto out_unpin;
642 			}
643 		} else {
644 			rsvd = 0;
645 		}
646 
647 		expect = results[0];
648 		idx = 1;
649 		for (v = 0; v < ARRAY_SIZE(values); v++) {
650 			if (ro_reg)
651 				expect = results[0];
652 			else
653 				expect = reg_write(expect, values[v], rsvd);
654 
655 			if (results[idx] != expect)
656 				err++;
657 			idx++;
658 		}
659 		for (v = 0; v < ARRAY_SIZE(values); v++) {
660 			if (ro_reg)
661 				expect = results[0];
662 			else
663 				expect = reg_write(expect, ~values[v], rsvd);
664 
665 			if (results[idx] != expect)
666 				err++;
667 			idx++;
668 		}
669 		if (err) {
670 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
671 			       engine->name, err, reg);
672 
673 			if (ro_reg)
674 				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
675 					engine->name, reg, results[0]);
676 			else
677 				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
678 					engine->name, reg, results[0], rsvd);
679 
680 			expect = results[0];
681 			idx = 1;
682 			for (v = 0; v < ARRAY_SIZE(values); v++) {
683 				u32 w = values[v];
684 
685 				if (ro_reg)
686 					expect = results[0];
687 				else
688 					expect = reg_write(expect, w, rsvd);
689 				pr_info("Wrote %08x, read %08x, expect %08x\n",
690 					w, results[idx], expect);
691 				idx++;
692 			}
693 			for (v = 0; v < ARRAY_SIZE(values); v++) {
694 				u32 w = ~values[v];
695 
696 				if (ro_reg)
697 					expect = results[0];
698 				else
699 					expect = reg_write(expect, w, rsvd);
700 				pr_info("Wrote %08x, read %08x, expect %08x\n",
701 					w, results[idx], expect);
702 				idx++;
703 			}
704 
705 			err = -EINVAL;
706 		}
707 out_unpin:
708 		i915_gem_object_unpin_map(scratch->obj);
709 		if (err)
710 			break;
711 	}
712 
713 	if (igt_flush_test(engine->i915))
714 		err = -EIO;
715 out_batch:
716 	i915_vma_unpin_and_release(&batch, 0);
717 out_scratch:
718 	i915_vma_unpin_and_release(&scratch, 0);
719 	return err;
720 }
721 
722 static int live_dirty_whitelist(void *arg)
723 {
724 	struct intel_gt *gt = arg;
725 	struct intel_engine_cs *engine;
726 	enum intel_engine_id id;
727 
728 	/* Can the user write to the whitelisted registers? */
729 
730 	if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
731 		return 0;
732 
733 	for_each_engine(engine, gt, id) {
734 		struct intel_context *ce;
735 		int err;
736 
737 		if (engine->whitelist.count == 0)
738 			continue;
739 
740 		ce = intel_context_create(engine);
741 		if (IS_ERR(ce))
742 			return PTR_ERR(ce);
743 
744 		err = check_dirty_whitelist(ce);
745 		intel_context_put(ce);
746 		if (err)
747 			return err;
748 	}
749 
750 	return 0;
751 }
752 
753 static int live_reset_whitelist(void *arg)
754 {
755 	struct intel_gt *gt = arg;
756 	struct intel_engine_cs *engine;
757 	enum intel_engine_id id;
758 	int err = 0;
759 
760 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
761 	igt_global_reset_lock(gt);
762 
763 	for_each_engine(engine, gt, id) {
764 		if (engine->whitelist.count == 0)
765 			continue;
766 
767 		if (intel_has_reset_engine(gt)) {
768 			err = check_whitelist_across_reset(engine,
769 							   do_engine_reset,
770 							   "engine");
771 			if (err)
772 				goto out;
773 		}
774 
775 		if (intel_has_gpu_reset(gt)) {
776 			err = check_whitelist_across_reset(engine,
777 							   do_device_reset,
778 							   "device");
779 			if (err)
780 				goto out;
781 		}
782 	}
783 
784 out:
785 	igt_global_reset_unlock(gt);
786 	return err;
787 }
788 
789 static int read_whitelisted_registers(struct intel_context *ce,
790 				      struct i915_vma *results)
791 {
792 	struct intel_engine_cs *engine = ce->engine;
793 	struct i915_request *rq;
794 	int i, err = 0;
795 	u32 srm, *cs;
796 
797 	rq = intel_context_create_request(ce);
798 	if (IS_ERR(rq))
799 		return PTR_ERR(rq);
800 
801 	i915_vma_lock(results);
802 	err = i915_request_await_object(rq, results->obj, true);
803 	if (err == 0)
804 		err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
805 	i915_vma_unlock(results);
806 	if (err)
807 		goto err_req;
808 
809 	srm = MI_STORE_REGISTER_MEM;
810 	if (INTEL_GEN(engine->i915) >= 8)
811 		srm++;
812 
813 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
814 	if (IS_ERR(cs)) {
815 		err = PTR_ERR(cs);
816 		goto err_req;
817 	}
818 
819 	for (i = 0; i < engine->whitelist.count; i++) {
820 		u64 offset = results->node.start + sizeof(u32) * i;
821 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
822 
823 		/* Clear non priv flags */
824 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
825 
826 		*cs++ = srm;
827 		*cs++ = reg;
828 		*cs++ = lower_32_bits(offset);
829 		*cs++ = upper_32_bits(offset);
830 	}
831 	intel_ring_advance(rq, cs);
832 
833 err_req:
834 	return request_add_sync(rq, err);
835 }
836 
837 static int scrub_whitelisted_registers(struct intel_context *ce)
838 {
839 	struct intel_engine_cs *engine = ce->engine;
840 	struct i915_request *rq;
841 	struct i915_vma *batch;
842 	int i, err = 0;
843 	u32 *cs;
844 
845 	batch = create_batch(ce->vm);
846 	if (IS_ERR(batch))
847 		return PTR_ERR(batch);
848 
849 	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
850 	if (IS_ERR(cs)) {
851 		err = PTR_ERR(cs);
852 		goto err_batch;
853 	}
854 
855 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
856 	for (i = 0; i < engine->whitelist.count; i++) {
857 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
858 
859 		if (ro_register(reg))
860 			continue;
861 
862 		/* Clear non priv flags */
863 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
864 
865 		*cs++ = reg;
866 		*cs++ = 0xffffffff;
867 	}
868 	*cs++ = MI_BATCH_BUFFER_END;
869 
870 	i915_gem_object_flush_map(batch->obj);
871 	intel_gt_chipset_flush(engine->gt);
872 
873 	rq = intel_context_create_request(ce);
874 	if (IS_ERR(rq)) {
875 		err = PTR_ERR(rq);
876 		goto err_unpin;
877 	}
878 
879 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
880 		err = engine->emit_init_breadcrumb(rq);
881 		if (err)
882 			goto err_request;
883 	}
884 
885 	i915_vma_lock(batch);
886 	err = i915_request_await_object(rq, batch->obj, false);
887 	if (err == 0)
888 		err = i915_vma_move_to_active(batch, rq, 0);
889 	i915_vma_unlock(batch);
890 	if (err)
891 		goto err_request;
892 
893 	/* Perform the writes from an unprivileged "user" batch */
894 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
895 
896 err_request:
897 	err = request_add_sync(rq, err);
898 
899 err_unpin:
900 	i915_gem_object_unpin_map(batch->obj);
901 err_batch:
902 	i915_vma_unpin_and_release(&batch, 0);
903 	return err;
904 }
905 
906 struct regmask {
907 	i915_reg_t reg;
908 	unsigned long gen_mask;
909 };
910 
911 static bool find_reg(struct drm_i915_private *i915,
912 		     i915_reg_t reg,
913 		     const struct regmask *tbl,
914 		     unsigned long count)
915 {
916 	u32 offset = i915_mmio_reg_offset(reg);
917 
918 	while (count--) {
919 		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
920 		    i915_mmio_reg_offset(tbl->reg) == offset)
921 			return true;
922 		tbl++;
923 	}
924 
925 	return false;
926 }
927 
928 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
929 {
930 	/* Alas, we must pardon some whitelists. Mistakes already made */
931 	static const struct regmask pardon[] = {
932 		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
933 		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
934 	};
935 
936 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
937 }
938 
939 static bool result_eq(struct intel_engine_cs *engine,
940 		      u32 a, u32 b, i915_reg_t reg)
941 {
942 	if (a != b && !pardon_reg(engine->i915, reg)) {
943 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
944 		       i915_mmio_reg_offset(reg), a, b);
945 		return false;
946 	}
947 
948 	return true;
949 }
950 
951 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
952 {
953 	/* Some registers do not seem to behave and our writes unreadable */
954 	static const struct regmask wo[] = {
955 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
956 	};
957 
958 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
959 }
960 
961 static bool result_neq(struct intel_engine_cs *engine,
962 		       u32 a, u32 b, i915_reg_t reg)
963 {
964 	if (a == b && !writeonly_reg(engine->i915, reg)) {
965 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
966 		       i915_mmio_reg_offset(reg), a);
967 		return false;
968 	}
969 
970 	return true;
971 }
972 
973 static int
974 check_whitelisted_registers(struct intel_engine_cs *engine,
975 			    struct i915_vma *A,
976 			    struct i915_vma *B,
977 			    bool (*fn)(struct intel_engine_cs *engine,
978 				       u32 a, u32 b,
979 				       i915_reg_t reg))
980 {
981 	u32 *a, *b;
982 	int i, err;
983 
984 	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
985 	if (IS_ERR(a))
986 		return PTR_ERR(a);
987 
988 	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
989 	if (IS_ERR(b)) {
990 		err = PTR_ERR(b);
991 		goto err_a;
992 	}
993 
994 	err = 0;
995 	for (i = 0; i < engine->whitelist.count; i++) {
996 		const struct i915_wa *wa = &engine->whitelist.list[i];
997 
998 		if (i915_mmio_reg_offset(wa->reg) &
999 		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
1000 			continue;
1001 
1002 		if (!fn(engine, a[i], b[i], wa->reg))
1003 			err = -EINVAL;
1004 	}
1005 
1006 	i915_gem_object_unpin_map(B->obj);
1007 err_a:
1008 	i915_gem_object_unpin_map(A->obj);
1009 	return err;
1010 }
1011 
1012 static int live_isolated_whitelist(void *arg)
1013 {
1014 	struct intel_gt *gt = arg;
1015 	struct {
1016 		struct i915_vma *scratch[2];
1017 	} client[2] = {};
1018 	struct intel_engine_cs *engine;
1019 	enum intel_engine_id id;
1020 	int i, err = 0;
1021 
1022 	/*
1023 	 * Check that a write into a whitelist register works, but
1024 	 * invisible to a second context.
1025 	 */
1026 
1027 	if (!intel_engines_has_context_isolation(gt->i915))
1028 		return 0;
1029 
1030 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1031 		client[i].scratch[0] =
1032 			__vm_create_scratch_for_read(gt->vm, 4096);
1033 		if (IS_ERR(client[i].scratch[0])) {
1034 			err = PTR_ERR(client[i].scratch[0]);
1035 			goto err;
1036 		}
1037 
1038 		client[i].scratch[1] =
1039 			__vm_create_scratch_for_read(gt->vm, 4096);
1040 		if (IS_ERR(client[i].scratch[1])) {
1041 			err = PTR_ERR(client[i].scratch[1]);
1042 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1043 			goto err;
1044 		}
1045 	}
1046 
1047 	for_each_engine(engine, gt, id) {
1048 		struct intel_context *ce[2];
1049 
1050 		if (!engine->kernel_context->vm)
1051 			continue;
1052 
1053 		if (!whitelist_writable_count(engine))
1054 			continue;
1055 
1056 		ce[0] = intel_context_create(engine);
1057 		if (IS_ERR(ce[0])) {
1058 			err = PTR_ERR(ce[0]);
1059 			break;
1060 		}
1061 		ce[1] = intel_context_create(engine);
1062 		if (IS_ERR(ce[1])) {
1063 			err = PTR_ERR(ce[1]);
1064 			intel_context_put(ce[0]);
1065 			break;
1066 		}
1067 
1068 		/* Read default values */
1069 		err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
1070 		if (err)
1071 			goto err_ce;
1072 
1073 		/* Try to overwrite registers (should only affect ctx0) */
1074 		err = scrub_whitelisted_registers(ce[0]);
1075 		if (err)
1076 			goto err_ce;
1077 
1078 		/* Read values from ctx1, we expect these to be defaults */
1079 		err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
1080 		if (err)
1081 			goto err_ce;
1082 
1083 		/* Verify that both reads return the same default values */
1084 		err = check_whitelisted_registers(engine,
1085 						  client[0].scratch[0],
1086 						  client[1].scratch[0],
1087 						  result_eq);
1088 		if (err)
1089 			goto err_ce;
1090 
1091 		/* Read back the updated values in ctx0 */
1092 		err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
1093 		if (err)
1094 			goto err_ce;
1095 
1096 		/* User should be granted privilege to overwhite regs */
1097 		err = check_whitelisted_registers(engine,
1098 						  client[0].scratch[0],
1099 						  client[0].scratch[1],
1100 						  result_neq);
1101 err_ce:
1102 		intel_context_put(ce[1]);
1103 		intel_context_put(ce[0]);
1104 		if (err)
1105 			break;
1106 	}
1107 
1108 err:
1109 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1110 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1111 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1112 	}
1113 
1114 	if (igt_flush_test(gt->i915))
1115 		err = -EIO;
1116 
1117 	return err;
1118 }
1119 
1120 static bool
1121 verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
1122 		const char *str)
1123 {
1124 	struct intel_engine_cs *engine;
1125 	enum intel_engine_id id;
1126 	bool ok = true;
1127 
1128 	ok &= wa_list_verify(gt->uncore, &lists->gt_wa_list, str);
1129 
1130 	for_each_engine(engine, gt, id) {
1131 		struct intel_context *ce;
1132 
1133 		ce = intel_context_create(engine);
1134 		if (IS_ERR(ce))
1135 			return false;
1136 
1137 		ok &= engine_wa_list_verify(ce,
1138 					    &lists->engine[id].wa_list,
1139 					    str) == 0;
1140 
1141 		ok &= engine_wa_list_verify(ce,
1142 					    &lists->engine[id].ctx_wa_list,
1143 					    str) == 0;
1144 
1145 		intel_context_put(ce);
1146 	}
1147 
1148 	return ok;
1149 }
1150 
1151 static int
1152 live_gpu_reset_workarounds(void *arg)
1153 {
1154 	struct intel_gt *gt = arg;
1155 	intel_wakeref_t wakeref;
1156 	struct wa_lists lists;
1157 	bool ok;
1158 
1159 	if (!intel_has_gpu_reset(gt))
1160 		return 0;
1161 
1162 	pr_info("Verifying after GPU reset...\n");
1163 
1164 	igt_global_reset_lock(gt);
1165 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1166 
1167 	reference_lists_init(gt, &lists);
1168 
1169 	ok = verify_wa_lists(gt, &lists, "before reset");
1170 	if (!ok)
1171 		goto out;
1172 
1173 	intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1174 
1175 	ok = verify_wa_lists(gt, &lists, "after reset");
1176 
1177 out:
1178 	reference_lists_fini(gt, &lists);
1179 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1180 	igt_global_reset_unlock(gt);
1181 
1182 	return ok ? 0 : -ESRCH;
1183 }
1184 
1185 static int
1186 live_engine_reset_workarounds(void *arg)
1187 {
1188 	struct intel_gt *gt = arg;
1189 	struct intel_engine_cs *engine;
1190 	enum intel_engine_id id;
1191 	struct intel_context *ce;
1192 	struct igt_spinner spin;
1193 	struct i915_request *rq;
1194 	intel_wakeref_t wakeref;
1195 	struct wa_lists lists;
1196 	int ret = 0;
1197 
1198 	if (!intel_has_reset_engine(gt))
1199 		return 0;
1200 
1201 	igt_global_reset_lock(gt);
1202 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1203 
1204 	reference_lists_init(gt, &lists);
1205 
1206 	for_each_engine(engine, gt, id) {
1207 		bool ok;
1208 
1209 		pr_info("Verifying after %s reset...\n", engine->name);
1210 		ce = intel_context_create(engine);
1211 		if (IS_ERR(ce)) {
1212 			ret = PTR_ERR(ce);
1213 			break;
1214 		}
1215 
1216 		ok = verify_wa_lists(gt, &lists, "before reset");
1217 		if (!ok) {
1218 			ret = -ESRCH;
1219 			goto err;
1220 		}
1221 
1222 		ret = intel_engine_reset(engine, "live_workarounds:idle");
1223 		if (ret) {
1224 			pr_err("%s: Reset failed while idle\n", engine->name);
1225 			goto err;
1226 		}
1227 
1228 		ok = verify_wa_lists(gt, &lists, "after idle reset");
1229 		if (!ok) {
1230 			ret = -ESRCH;
1231 			goto err;
1232 		}
1233 
1234 		ret = igt_spinner_init(&spin, engine->gt);
1235 		if (ret)
1236 			goto err;
1237 
1238 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1239 		if (IS_ERR(rq)) {
1240 			ret = PTR_ERR(rq);
1241 			igt_spinner_fini(&spin);
1242 			goto err;
1243 		}
1244 
1245 		ret = request_add_spin(rq, &spin);
1246 		if (ret) {
1247 			pr_err("%s: Spinner failed to start\n", engine->name);
1248 			igt_spinner_fini(&spin);
1249 			goto err;
1250 		}
1251 
1252 		ret = intel_engine_reset(engine, "live_workarounds:active");
1253 		if (ret) {
1254 			pr_err("%s: Reset failed on an active spinner\n",
1255 			       engine->name);
1256 			igt_spinner_fini(&spin);
1257 			goto err;
1258 		}
1259 
1260 		igt_spinner_end(&spin);
1261 		igt_spinner_fini(&spin);
1262 
1263 		ok = verify_wa_lists(gt, &lists, "after busy reset");
1264 		if (!ok) {
1265 			ret = -ESRCH;
1266 			goto err;
1267 		}
1268 
1269 err:
1270 		intel_context_put(ce);
1271 		if (ret)
1272 			break;
1273 	}
1274 
1275 	reference_lists_fini(gt, &lists);
1276 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1277 	igt_global_reset_unlock(gt);
1278 
1279 	igt_flush_test(gt->i915);
1280 
1281 	return ret;
1282 }
1283 
1284 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1285 {
1286 	static const struct i915_subtest tests[] = {
1287 		SUBTEST(live_dirty_whitelist),
1288 		SUBTEST(live_reset_whitelist),
1289 		SUBTEST(live_isolated_whitelist),
1290 		SUBTEST(live_gpu_reset_workarounds),
1291 		SUBTEST(live_engine_reset_workarounds),
1292 	};
1293 
1294 	if (intel_gt_is_wedged(&i915->gt))
1295 		return 0;
1296 
1297 	return intel_gt_live_subtests(tests, &i915->gt);
1298 }
1299