1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
12 
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
17 
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
20 
21 static const struct wo_register {
22 	enum intel_platform platform;
23 	u32 reg;
24 } wo_registers[] = {
25 	{ INTEL_GEMINILAKE, 0x731c }
26 };
27 
28 struct wa_lists {
29 	struct i915_wa_list gt_wa_list;
30 	struct {
31 		struct i915_wa_list wa_list;
32 		struct i915_wa_list ctx_wa_list;
33 	} engine[I915_NUM_ENGINES];
34 };
35 
36 static int request_add_sync(struct i915_request *rq, int err)
37 {
38 	i915_request_get(rq);
39 	i915_request_add(rq);
40 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
41 		err = -EIO;
42 	i915_request_put(rq);
43 
44 	return err;
45 }
46 
47 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
48 {
49 	int err = 0;
50 
51 	i915_request_get(rq);
52 	i915_request_add(rq);
53 	if (spin && !igt_wait_for_spinner(spin, rq))
54 		err = -ETIMEDOUT;
55 	i915_request_put(rq);
56 
57 	return err;
58 }
59 
60 static void
61 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
62 {
63 	struct intel_engine_cs *engine;
64 	enum intel_engine_id id;
65 
66 	memset(lists, 0, sizeof(*lists));
67 
68 	wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
69 	gt_init_workarounds(gt->i915, &lists->gt_wa_list);
70 	wa_init_finish(&lists->gt_wa_list);
71 
72 	for_each_engine(engine, gt, id) {
73 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
74 
75 		wa_init_start(wal, "REF", engine->name);
76 		engine_init_workarounds(engine, wal);
77 		wa_init_finish(wal);
78 
79 		__intel_engine_init_ctx_wa(engine,
80 					   &lists->engine[id].ctx_wa_list,
81 					   "CTX_REF");
82 	}
83 }
84 
85 static void
86 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
87 {
88 	struct intel_engine_cs *engine;
89 	enum intel_engine_id id;
90 
91 	for_each_engine(engine, gt, id)
92 		intel_wa_list_free(&lists->engine[id].wa_list);
93 
94 	intel_wa_list_free(&lists->gt_wa_list);
95 }
96 
97 static struct drm_i915_gem_object *
98 read_nonprivs(struct intel_context *ce)
99 {
100 	struct intel_engine_cs *engine = ce->engine;
101 	const u32 base = engine->mmio_base;
102 	struct drm_i915_gem_object *result;
103 	struct i915_request *rq;
104 	struct i915_vma *vma;
105 	u32 srm, *cs;
106 	int err;
107 	int i;
108 
109 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
110 	if (IS_ERR(result))
111 		return result;
112 
113 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
114 
115 	cs = i915_gem_object_pin_map_unlocked(result, I915_MAP_WB);
116 	if (IS_ERR(cs)) {
117 		err = PTR_ERR(cs);
118 		goto err_obj;
119 	}
120 	memset(cs, 0xc5, PAGE_SIZE);
121 	i915_gem_object_flush_map(result);
122 	i915_gem_object_unpin_map(result);
123 
124 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
125 	if (IS_ERR(vma)) {
126 		err = PTR_ERR(vma);
127 		goto err_obj;
128 	}
129 
130 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
131 	if (err)
132 		goto err_obj;
133 
134 	rq = intel_context_create_request(ce);
135 	if (IS_ERR(rq)) {
136 		err = PTR_ERR(rq);
137 		goto err_pin;
138 	}
139 
140 	i915_vma_lock(vma);
141 	err = i915_request_await_object(rq, vma->obj, true);
142 	if (err == 0)
143 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
144 	i915_vma_unlock(vma);
145 	if (err)
146 		goto err_req;
147 
148 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
149 	if (INTEL_GEN(engine->i915) >= 8)
150 		srm++;
151 
152 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
153 	if (IS_ERR(cs)) {
154 		err = PTR_ERR(cs);
155 		goto err_req;
156 	}
157 
158 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
159 		*cs++ = srm;
160 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
161 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
162 		*cs++ = 0;
163 	}
164 	intel_ring_advance(rq, cs);
165 
166 	i915_request_add(rq);
167 	i915_vma_unpin(vma);
168 
169 	return result;
170 
171 err_req:
172 	i915_request_add(rq);
173 err_pin:
174 	i915_vma_unpin(vma);
175 err_obj:
176 	i915_gem_object_put(result);
177 	return ERR_PTR(err);
178 }
179 
180 static u32
181 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
182 {
183 	i915_reg_t reg = i < engine->whitelist.count ?
184 			 engine->whitelist.list[i].reg :
185 			 RING_NOPID(engine->mmio_base);
186 
187 	return i915_mmio_reg_offset(reg);
188 }
189 
190 static void
191 print_results(const struct intel_engine_cs *engine, const u32 *results)
192 {
193 	unsigned int i;
194 
195 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
196 		u32 expected = get_whitelist_reg(engine, i);
197 		u32 actual = results[i];
198 
199 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
200 			i, expected, actual);
201 	}
202 }
203 
204 static int check_whitelist(struct intel_context *ce)
205 {
206 	struct intel_engine_cs *engine = ce->engine;
207 	struct drm_i915_gem_object *results;
208 	struct intel_wedge_me wedge;
209 	u32 *vaddr;
210 	int err;
211 	int i;
212 
213 	results = read_nonprivs(ce);
214 	if (IS_ERR(results))
215 		return PTR_ERR(results);
216 
217 	err = 0;
218 	i915_gem_object_lock(results, NULL);
219 	intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
220 		err = i915_gem_object_set_to_cpu_domain(results, false);
221 
222 	if (intel_gt_is_wedged(engine->gt))
223 		err = -EIO;
224 	if (err)
225 		goto out_put;
226 
227 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
228 	if (IS_ERR(vaddr)) {
229 		err = PTR_ERR(vaddr);
230 		goto out_put;
231 	}
232 
233 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
234 		u32 expected = get_whitelist_reg(engine, i);
235 		u32 actual = vaddr[i];
236 
237 		if (expected != actual) {
238 			print_results(engine, vaddr);
239 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
240 			       i, expected, actual);
241 
242 			err = -EINVAL;
243 			break;
244 		}
245 	}
246 
247 	i915_gem_object_unpin_map(results);
248 out_put:
249 	i915_gem_object_unlock(results);
250 	i915_gem_object_put(results);
251 	return err;
252 }
253 
254 static int do_device_reset(struct intel_engine_cs *engine)
255 {
256 	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
257 	return 0;
258 }
259 
260 static int do_engine_reset(struct intel_engine_cs *engine)
261 {
262 	return intel_engine_reset(engine, "live_workarounds");
263 }
264 
265 static int
266 switch_to_scratch_context(struct intel_engine_cs *engine,
267 			  struct igt_spinner *spin)
268 {
269 	struct intel_context *ce;
270 	struct i915_request *rq;
271 	int err = 0;
272 
273 	ce = intel_context_create(engine);
274 	if (IS_ERR(ce))
275 		return PTR_ERR(ce);
276 
277 	rq = igt_spinner_create_request(spin, ce, MI_NOOP);
278 	intel_context_put(ce);
279 
280 	if (IS_ERR(rq)) {
281 		spin = NULL;
282 		err = PTR_ERR(rq);
283 		goto err;
284 	}
285 
286 	err = request_add_spin(rq, spin);
287 err:
288 	if (err && spin)
289 		igt_spinner_end(spin);
290 
291 	return err;
292 }
293 
294 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
295 					int (*reset)(struct intel_engine_cs *),
296 					const char *name)
297 {
298 	struct intel_context *ce, *tmp;
299 	struct igt_spinner spin;
300 	intel_wakeref_t wakeref;
301 	int err;
302 
303 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
304 		engine->whitelist.count, engine->name, name);
305 
306 	ce = intel_context_create(engine);
307 	if (IS_ERR(ce))
308 		return PTR_ERR(ce);
309 
310 	err = igt_spinner_init(&spin, engine->gt);
311 	if (err)
312 		goto out_ctx;
313 
314 	err = check_whitelist(ce);
315 	if (err) {
316 		pr_err("Invalid whitelist *before* %s reset!\n", name);
317 		goto out_spin;
318 	}
319 
320 	err = switch_to_scratch_context(engine, &spin);
321 	if (err)
322 		goto out_spin;
323 
324 	with_intel_runtime_pm(engine->uncore->rpm, wakeref)
325 		err = reset(engine);
326 
327 	igt_spinner_end(&spin);
328 
329 	if (err) {
330 		pr_err("%s reset failed\n", name);
331 		goto out_spin;
332 	}
333 
334 	err = check_whitelist(ce);
335 	if (err) {
336 		pr_err("Whitelist not preserved in context across %s reset!\n",
337 		       name);
338 		goto out_spin;
339 	}
340 
341 	tmp = intel_context_create(engine);
342 	if (IS_ERR(tmp)) {
343 		err = PTR_ERR(tmp);
344 		goto out_spin;
345 	}
346 	intel_context_put(ce);
347 	ce = tmp;
348 
349 	err = check_whitelist(ce);
350 	if (err) {
351 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
352 		       name);
353 		goto out_spin;
354 	}
355 
356 out_spin:
357 	igt_spinner_fini(&spin);
358 out_ctx:
359 	intel_context_put(ce);
360 	return err;
361 }
362 
363 static struct i915_vma *create_batch(struct i915_address_space *vm)
364 {
365 	struct drm_i915_gem_object *obj;
366 	struct i915_vma *vma;
367 	int err;
368 
369 	obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
370 	if (IS_ERR(obj))
371 		return ERR_CAST(obj);
372 
373 	vma = i915_vma_instance(obj, vm, NULL);
374 	if (IS_ERR(vma)) {
375 		err = PTR_ERR(vma);
376 		goto err_obj;
377 	}
378 
379 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
380 	if (err)
381 		goto err_obj;
382 
383 	return vma;
384 
385 err_obj:
386 	i915_gem_object_put(obj);
387 	return ERR_PTR(err);
388 }
389 
390 static u32 reg_write(u32 old, u32 new, u32 rsvd)
391 {
392 	if (rsvd == 0x0000ffff) {
393 		old &= ~(new >> 16);
394 		old |= new & (new >> 16);
395 	} else {
396 		old &= ~rsvd;
397 		old |= new & rsvd;
398 	}
399 
400 	return old;
401 }
402 
403 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
404 {
405 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
406 	int i;
407 
408 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
409 	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
410 		return true;
411 
412 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
413 		if (wo_registers[i].platform == platform &&
414 		    wo_registers[i].reg == reg)
415 			return true;
416 	}
417 
418 	return false;
419 }
420 
421 static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
422 {
423 	reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
424 	switch (reg) {
425 	case 0x358:
426 	case 0x35c:
427 	case 0x3a8:
428 		return true;
429 
430 	default:
431 		return false;
432 	}
433 }
434 
435 static bool ro_register(u32 reg)
436 {
437 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
438 	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
439 		return true;
440 
441 	return false;
442 }
443 
444 static int whitelist_writable_count(struct intel_engine_cs *engine)
445 {
446 	int count = engine->whitelist.count;
447 	int i;
448 
449 	for (i = 0; i < engine->whitelist.count; i++) {
450 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
451 
452 		if (ro_register(reg))
453 			count--;
454 	}
455 
456 	return count;
457 }
458 
459 static int check_dirty_whitelist(struct intel_context *ce)
460 {
461 	const u32 values[] = {
462 		0x00000000,
463 		0x01010101,
464 		0x10100101,
465 		0x03030303,
466 		0x30300303,
467 		0x05050505,
468 		0x50500505,
469 		0x0f0f0f0f,
470 		0xf00ff00f,
471 		0x10101010,
472 		0xf0f01010,
473 		0x30303030,
474 		0xa0a03030,
475 		0x50505050,
476 		0xc0c05050,
477 		0xf0f0f0f0,
478 		0x11111111,
479 		0x33333333,
480 		0x55555555,
481 		0x0000ffff,
482 		0x00ff00ff,
483 		0xff0000ff,
484 		0xffff00ff,
485 		0xffffffff,
486 	};
487 	struct intel_engine_cs *engine = ce->engine;
488 	struct i915_vma *scratch;
489 	struct i915_vma *batch;
490 	int err = 0, i, v, sz;
491 	u32 *cs, *results;
492 
493 	sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
494 	scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
495 	if (IS_ERR(scratch))
496 		return PTR_ERR(scratch);
497 
498 	batch = create_batch(ce->vm);
499 	if (IS_ERR(batch)) {
500 		err = PTR_ERR(batch);
501 		goto out_scratch;
502 	}
503 
504 	for (i = 0; i < engine->whitelist.count; i++) {
505 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
506 		struct i915_gem_ww_ctx ww;
507 		u64 addr = scratch->node.start;
508 		struct i915_request *rq;
509 		u32 srm, lrm, rsvd;
510 		u32 expect;
511 		int idx;
512 		bool ro_reg;
513 
514 		if (wo_register(engine, reg))
515 			continue;
516 
517 		if (timestamp(engine, reg))
518 			continue; /* timestamps are expected to autoincrement */
519 
520 		ro_reg = ro_register(reg);
521 
522 		i915_gem_ww_ctx_init(&ww, false);
523 retry:
524 		cs = NULL;
525 		err = i915_gem_object_lock(scratch->obj, &ww);
526 		if (!err)
527 			err = i915_gem_object_lock(batch->obj, &ww);
528 		if (!err)
529 			err = intel_context_pin_ww(ce, &ww);
530 		if (err)
531 			goto out;
532 
533 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
534 		if (IS_ERR(cs)) {
535 			err = PTR_ERR(cs);
536 			goto out_ctx;
537 		}
538 
539 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
540 		if (IS_ERR(results)) {
541 			err = PTR_ERR(results);
542 			goto out_unmap_batch;
543 		}
544 
545 		/* Clear non priv flags */
546 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
547 
548 		srm = MI_STORE_REGISTER_MEM;
549 		lrm = MI_LOAD_REGISTER_MEM;
550 		if (INTEL_GEN(engine->i915) >= 8)
551 			lrm++, srm++;
552 
553 		pr_debug("%s: Writing garbage to %x\n",
554 			 engine->name, reg);
555 
556 		/* SRM original */
557 		*cs++ = srm;
558 		*cs++ = reg;
559 		*cs++ = lower_32_bits(addr);
560 		*cs++ = upper_32_bits(addr);
561 
562 		idx = 1;
563 		for (v = 0; v < ARRAY_SIZE(values); v++) {
564 			/* LRI garbage */
565 			*cs++ = MI_LOAD_REGISTER_IMM(1);
566 			*cs++ = reg;
567 			*cs++ = values[v];
568 
569 			/* SRM result */
570 			*cs++ = srm;
571 			*cs++ = reg;
572 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
573 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
574 			idx++;
575 		}
576 		for (v = 0; v < ARRAY_SIZE(values); v++) {
577 			/* LRI garbage */
578 			*cs++ = MI_LOAD_REGISTER_IMM(1);
579 			*cs++ = reg;
580 			*cs++ = ~values[v];
581 
582 			/* SRM result */
583 			*cs++ = srm;
584 			*cs++ = reg;
585 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
586 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
587 			idx++;
588 		}
589 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
590 
591 		/* LRM original -- don't leave garbage in the context! */
592 		*cs++ = lrm;
593 		*cs++ = reg;
594 		*cs++ = lower_32_bits(addr);
595 		*cs++ = upper_32_bits(addr);
596 
597 		*cs++ = MI_BATCH_BUFFER_END;
598 
599 		i915_gem_object_flush_map(batch->obj);
600 		i915_gem_object_unpin_map(batch->obj);
601 		intel_gt_chipset_flush(engine->gt);
602 		cs = NULL;
603 
604 		rq = i915_request_create(ce);
605 		if (IS_ERR(rq)) {
606 			err = PTR_ERR(rq);
607 			goto out_unmap_scratch;
608 		}
609 
610 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
611 			err = engine->emit_init_breadcrumb(rq);
612 			if (err)
613 				goto err_request;
614 		}
615 
616 		err = i915_request_await_object(rq, batch->obj, false);
617 		if (err == 0)
618 			err = i915_vma_move_to_active(batch, rq, 0);
619 		if (err)
620 			goto err_request;
621 
622 		err = i915_request_await_object(rq, scratch->obj, true);
623 		if (err == 0)
624 			err = i915_vma_move_to_active(scratch, rq,
625 						      EXEC_OBJECT_WRITE);
626 		if (err)
627 			goto err_request;
628 
629 		err = engine->emit_bb_start(rq,
630 					    batch->node.start, PAGE_SIZE,
631 					    0);
632 		if (err)
633 			goto err_request;
634 
635 err_request:
636 		err = request_add_sync(rq, err);
637 		if (err) {
638 			pr_err("%s: Futzing %x timedout; cancelling test\n",
639 			       engine->name, reg);
640 			intel_gt_set_wedged(engine->gt);
641 			goto out_unmap_scratch;
642 		}
643 
644 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
645 		if (!ro_reg) {
646 			/* detect write masking */
647 			rsvd = results[ARRAY_SIZE(values)];
648 			if (!rsvd) {
649 				pr_err("%s: Unable to write to whitelisted register %x\n",
650 				       engine->name, reg);
651 				err = -EINVAL;
652 				goto out_unmap_scratch;
653 			}
654 		} else {
655 			rsvd = 0;
656 		}
657 
658 		expect = results[0];
659 		idx = 1;
660 		for (v = 0; v < ARRAY_SIZE(values); v++) {
661 			if (ro_reg)
662 				expect = results[0];
663 			else
664 				expect = reg_write(expect, values[v], rsvd);
665 
666 			if (results[idx] != expect)
667 				err++;
668 			idx++;
669 		}
670 		for (v = 0; v < ARRAY_SIZE(values); v++) {
671 			if (ro_reg)
672 				expect = results[0];
673 			else
674 				expect = reg_write(expect, ~values[v], rsvd);
675 
676 			if (results[idx] != expect)
677 				err++;
678 			idx++;
679 		}
680 		if (err) {
681 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
682 			       engine->name, err, reg);
683 
684 			if (ro_reg)
685 				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
686 					engine->name, reg, results[0]);
687 			else
688 				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
689 					engine->name, reg, results[0], rsvd);
690 
691 			expect = results[0];
692 			idx = 1;
693 			for (v = 0; v < ARRAY_SIZE(values); v++) {
694 				u32 w = values[v];
695 
696 				if (ro_reg)
697 					expect = results[0];
698 				else
699 					expect = reg_write(expect, w, rsvd);
700 				pr_info("Wrote %08x, read %08x, expect %08x\n",
701 					w, results[idx], expect);
702 				idx++;
703 			}
704 			for (v = 0; v < ARRAY_SIZE(values); v++) {
705 				u32 w = ~values[v];
706 
707 				if (ro_reg)
708 					expect = results[0];
709 				else
710 					expect = reg_write(expect, w, rsvd);
711 				pr_info("Wrote %08x, read %08x, expect %08x\n",
712 					w, results[idx], expect);
713 				idx++;
714 			}
715 
716 			err = -EINVAL;
717 		}
718 out_unmap_scratch:
719 		i915_gem_object_unpin_map(scratch->obj);
720 out_unmap_batch:
721 		if (cs)
722 			i915_gem_object_unpin_map(batch->obj);
723 out_ctx:
724 		intel_context_unpin(ce);
725 out:
726 		if (err == -EDEADLK) {
727 			err = i915_gem_ww_ctx_backoff(&ww);
728 			if (!err)
729 				goto retry;
730 		}
731 		i915_gem_ww_ctx_fini(&ww);
732 		if (err)
733 			break;
734 	}
735 
736 	if (igt_flush_test(engine->i915))
737 		err = -EIO;
738 
739 	i915_vma_unpin_and_release(&batch, 0);
740 out_scratch:
741 	i915_vma_unpin_and_release(&scratch, 0);
742 	return err;
743 }
744 
745 static int live_dirty_whitelist(void *arg)
746 {
747 	struct intel_gt *gt = arg;
748 	struct intel_engine_cs *engine;
749 	enum intel_engine_id id;
750 
751 	/* Can the user write to the whitelisted registers? */
752 
753 	if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
754 		return 0;
755 
756 	for_each_engine(engine, gt, id) {
757 		struct intel_context *ce;
758 		int err;
759 
760 		if (engine->whitelist.count == 0)
761 			continue;
762 
763 		ce = intel_context_create(engine);
764 		if (IS_ERR(ce))
765 			return PTR_ERR(ce);
766 
767 		err = check_dirty_whitelist(ce);
768 		intel_context_put(ce);
769 		if (err)
770 			return err;
771 	}
772 
773 	return 0;
774 }
775 
776 static int live_reset_whitelist(void *arg)
777 {
778 	struct intel_gt *gt = arg;
779 	struct intel_engine_cs *engine;
780 	enum intel_engine_id id;
781 	int err = 0;
782 
783 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
784 	igt_global_reset_lock(gt);
785 
786 	for_each_engine(engine, gt, id) {
787 		if (engine->whitelist.count == 0)
788 			continue;
789 
790 		if (intel_has_reset_engine(gt)) {
791 			err = check_whitelist_across_reset(engine,
792 							   do_engine_reset,
793 							   "engine");
794 			if (err)
795 				goto out;
796 		}
797 
798 		if (intel_has_gpu_reset(gt)) {
799 			err = check_whitelist_across_reset(engine,
800 							   do_device_reset,
801 							   "device");
802 			if (err)
803 				goto out;
804 		}
805 	}
806 
807 out:
808 	igt_global_reset_unlock(gt);
809 	return err;
810 }
811 
812 static int read_whitelisted_registers(struct intel_context *ce,
813 				      struct i915_vma *results)
814 {
815 	struct intel_engine_cs *engine = ce->engine;
816 	struct i915_request *rq;
817 	int i, err = 0;
818 	u32 srm, *cs;
819 
820 	rq = intel_context_create_request(ce);
821 	if (IS_ERR(rq))
822 		return PTR_ERR(rq);
823 
824 	i915_vma_lock(results);
825 	err = i915_request_await_object(rq, results->obj, true);
826 	if (err == 0)
827 		err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
828 	i915_vma_unlock(results);
829 	if (err)
830 		goto err_req;
831 
832 	srm = MI_STORE_REGISTER_MEM;
833 	if (INTEL_GEN(engine->i915) >= 8)
834 		srm++;
835 
836 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
837 	if (IS_ERR(cs)) {
838 		err = PTR_ERR(cs);
839 		goto err_req;
840 	}
841 
842 	for (i = 0; i < engine->whitelist.count; i++) {
843 		u64 offset = results->node.start + sizeof(u32) * i;
844 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
845 
846 		/* Clear non priv flags */
847 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
848 
849 		*cs++ = srm;
850 		*cs++ = reg;
851 		*cs++ = lower_32_bits(offset);
852 		*cs++ = upper_32_bits(offset);
853 	}
854 	intel_ring_advance(rq, cs);
855 
856 err_req:
857 	return request_add_sync(rq, err);
858 }
859 
860 static int scrub_whitelisted_registers(struct intel_context *ce)
861 {
862 	struct intel_engine_cs *engine = ce->engine;
863 	struct i915_request *rq;
864 	struct i915_vma *batch;
865 	int i, err = 0;
866 	u32 *cs;
867 
868 	batch = create_batch(ce->vm);
869 	if (IS_ERR(batch))
870 		return PTR_ERR(batch);
871 
872 	cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
873 	if (IS_ERR(cs)) {
874 		err = PTR_ERR(cs);
875 		goto err_batch;
876 	}
877 
878 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
879 	for (i = 0; i < engine->whitelist.count; i++) {
880 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
881 
882 		if (ro_register(reg))
883 			continue;
884 
885 		/* Clear non priv flags */
886 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
887 
888 		*cs++ = reg;
889 		*cs++ = 0xffffffff;
890 	}
891 	*cs++ = MI_BATCH_BUFFER_END;
892 
893 	i915_gem_object_flush_map(batch->obj);
894 	intel_gt_chipset_flush(engine->gt);
895 
896 	rq = intel_context_create_request(ce);
897 	if (IS_ERR(rq)) {
898 		err = PTR_ERR(rq);
899 		goto err_unpin;
900 	}
901 
902 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
903 		err = engine->emit_init_breadcrumb(rq);
904 		if (err)
905 			goto err_request;
906 	}
907 
908 	i915_vma_lock(batch);
909 	err = i915_request_await_object(rq, batch->obj, false);
910 	if (err == 0)
911 		err = i915_vma_move_to_active(batch, rq, 0);
912 	i915_vma_unlock(batch);
913 	if (err)
914 		goto err_request;
915 
916 	/* Perform the writes from an unprivileged "user" batch */
917 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
918 
919 err_request:
920 	err = request_add_sync(rq, err);
921 
922 err_unpin:
923 	i915_gem_object_unpin_map(batch->obj);
924 err_batch:
925 	i915_vma_unpin_and_release(&batch, 0);
926 	return err;
927 }
928 
929 struct regmask {
930 	i915_reg_t reg;
931 	unsigned long gen_mask;
932 };
933 
934 static bool find_reg(struct drm_i915_private *i915,
935 		     i915_reg_t reg,
936 		     const struct regmask *tbl,
937 		     unsigned long count)
938 {
939 	u32 offset = i915_mmio_reg_offset(reg);
940 
941 	while (count--) {
942 		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
943 		    i915_mmio_reg_offset(tbl->reg) == offset)
944 			return true;
945 		tbl++;
946 	}
947 
948 	return false;
949 }
950 
951 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
952 {
953 	/* Alas, we must pardon some whitelists. Mistakes already made */
954 	static const struct regmask pardon[] = {
955 		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
956 		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
957 	};
958 
959 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
960 }
961 
962 static bool result_eq(struct intel_engine_cs *engine,
963 		      u32 a, u32 b, i915_reg_t reg)
964 {
965 	if (a != b && !pardon_reg(engine->i915, reg)) {
966 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
967 		       i915_mmio_reg_offset(reg), a, b);
968 		return false;
969 	}
970 
971 	return true;
972 }
973 
974 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
975 {
976 	/* Some registers do not seem to behave and our writes unreadable */
977 	static const struct regmask wo[] = {
978 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
979 	};
980 
981 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
982 }
983 
984 static bool result_neq(struct intel_engine_cs *engine,
985 		       u32 a, u32 b, i915_reg_t reg)
986 {
987 	if (a == b && !writeonly_reg(engine->i915, reg)) {
988 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
989 		       i915_mmio_reg_offset(reg), a);
990 		return false;
991 	}
992 
993 	return true;
994 }
995 
996 static int
997 check_whitelisted_registers(struct intel_engine_cs *engine,
998 			    struct i915_vma *A,
999 			    struct i915_vma *B,
1000 			    bool (*fn)(struct intel_engine_cs *engine,
1001 				       u32 a, u32 b,
1002 				       i915_reg_t reg))
1003 {
1004 	u32 *a, *b;
1005 	int i, err;
1006 
1007 	a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB);
1008 	if (IS_ERR(a))
1009 		return PTR_ERR(a);
1010 
1011 	b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB);
1012 	if (IS_ERR(b)) {
1013 		err = PTR_ERR(b);
1014 		goto err_a;
1015 	}
1016 
1017 	err = 0;
1018 	for (i = 0; i < engine->whitelist.count; i++) {
1019 		const struct i915_wa *wa = &engine->whitelist.list[i];
1020 
1021 		if (i915_mmio_reg_offset(wa->reg) &
1022 		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
1023 			continue;
1024 
1025 		if (!fn(engine, a[i], b[i], wa->reg))
1026 			err = -EINVAL;
1027 	}
1028 
1029 	i915_gem_object_unpin_map(B->obj);
1030 err_a:
1031 	i915_gem_object_unpin_map(A->obj);
1032 	return err;
1033 }
1034 
1035 static int live_isolated_whitelist(void *arg)
1036 {
1037 	struct intel_gt *gt = arg;
1038 	struct {
1039 		struct i915_vma *scratch[2];
1040 	} client[2] = {};
1041 	struct intel_engine_cs *engine;
1042 	enum intel_engine_id id;
1043 	int i, err = 0;
1044 
1045 	/*
1046 	 * Check that a write into a whitelist register works, but
1047 	 * invisible to a second context.
1048 	 */
1049 
1050 	if (!intel_engines_has_context_isolation(gt->i915))
1051 		return 0;
1052 
1053 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1054 		client[i].scratch[0] =
1055 			__vm_create_scratch_for_read_pinned(gt->vm, 4096);
1056 		if (IS_ERR(client[i].scratch[0])) {
1057 			err = PTR_ERR(client[i].scratch[0]);
1058 			goto err;
1059 		}
1060 
1061 		client[i].scratch[1] =
1062 			__vm_create_scratch_for_read_pinned(gt->vm, 4096);
1063 		if (IS_ERR(client[i].scratch[1])) {
1064 			err = PTR_ERR(client[i].scratch[1]);
1065 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1066 			goto err;
1067 		}
1068 	}
1069 
1070 	for_each_engine(engine, gt, id) {
1071 		struct intel_context *ce[2];
1072 
1073 		if (!engine->kernel_context->vm)
1074 			continue;
1075 
1076 		if (!whitelist_writable_count(engine))
1077 			continue;
1078 
1079 		ce[0] = intel_context_create(engine);
1080 		if (IS_ERR(ce[0])) {
1081 			err = PTR_ERR(ce[0]);
1082 			break;
1083 		}
1084 		ce[1] = intel_context_create(engine);
1085 		if (IS_ERR(ce[1])) {
1086 			err = PTR_ERR(ce[1]);
1087 			intel_context_put(ce[0]);
1088 			break;
1089 		}
1090 
1091 		/* Read default values */
1092 		err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
1093 		if (err)
1094 			goto err_ce;
1095 
1096 		/* Try to overwrite registers (should only affect ctx0) */
1097 		err = scrub_whitelisted_registers(ce[0]);
1098 		if (err)
1099 			goto err_ce;
1100 
1101 		/* Read values from ctx1, we expect these to be defaults */
1102 		err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
1103 		if (err)
1104 			goto err_ce;
1105 
1106 		/* Verify that both reads return the same default values */
1107 		err = check_whitelisted_registers(engine,
1108 						  client[0].scratch[0],
1109 						  client[1].scratch[0],
1110 						  result_eq);
1111 		if (err)
1112 			goto err_ce;
1113 
1114 		/* Read back the updated values in ctx0 */
1115 		err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
1116 		if (err)
1117 			goto err_ce;
1118 
1119 		/* User should be granted privilege to overwhite regs */
1120 		err = check_whitelisted_registers(engine,
1121 						  client[0].scratch[0],
1122 						  client[0].scratch[1],
1123 						  result_neq);
1124 err_ce:
1125 		intel_context_put(ce[1]);
1126 		intel_context_put(ce[0]);
1127 		if (err)
1128 			break;
1129 	}
1130 
1131 err:
1132 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1133 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1134 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1135 	}
1136 
1137 	if (igt_flush_test(gt->i915))
1138 		err = -EIO;
1139 
1140 	return err;
1141 }
1142 
1143 static bool
1144 verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
1145 		const char *str)
1146 {
1147 	struct intel_engine_cs *engine;
1148 	enum intel_engine_id id;
1149 	bool ok = true;
1150 
1151 	ok &= wa_list_verify(gt->uncore, &lists->gt_wa_list, str);
1152 
1153 	for_each_engine(engine, gt, id) {
1154 		struct intel_context *ce;
1155 
1156 		ce = intel_context_create(engine);
1157 		if (IS_ERR(ce))
1158 			return false;
1159 
1160 		ok &= engine_wa_list_verify(ce,
1161 					    &lists->engine[id].wa_list,
1162 					    str) == 0;
1163 
1164 		ok &= engine_wa_list_verify(ce,
1165 					    &lists->engine[id].ctx_wa_list,
1166 					    str) == 0;
1167 
1168 		intel_context_put(ce);
1169 	}
1170 
1171 	return ok;
1172 }
1173 
1174 static int
1175 live_gpu_reset_workarounds(void *arg)
1176 {
1177 	struct intel_gt *gt = arg;
1178 	intel_wakeref_t wakeref;
1179 	struct wa_lists lists;
1180 	bool ok;
1181 
1182 	if (!intel_has_gpu_reset(gt))
1183 		return 0;
1184 
1185 	pr_info("Verifying after GPU reset...\n");
1186 
1187 	igt_global_reset_lock(gt);
1188 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1189 
1190 	reference_lists_init(gt, &lists);
1191 
1192 	ok = verify_wa_lists(gt, &lists, "before reset");
1193 	if (!ok)
1194 		goto out;
1195 
1196 	intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1197 
1198 	ok = verify_wa_lists(gt, &lists, "after reset");
1199 
1200 out:
1201 	reference_lists_fini(gt, &lists);
1202 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1203 	igt_global_reset_unlock(gt);
1204 
1205 	return ok ? 0 : -ESRCH;
1206 }
1207 
1208 static int
1209 live_engine_reset_workarounds(void *arg)
1210 {
1211 	struct intel_gt *gt = arg;
1212 	struct intel_engine_cs *engine;
1213 	enum intel_engine_id id;
1214 	struct intel_context *ce;
1215 	struct igt_spinner spin;
1216 	struct i915_request *rq;
1217 	intel_wakeref_t wakeref;
1218 	struct wa_lists lists;
1219 	int ret = 0;
1220 
1221 	if (!intel_has_reset_engine(gt))
1222 		return 0;
1223 
1224 	igt_global_reset_lock(gt);
1225 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1226 
1227 	reference_lists_init(gt, &lists);
1228 
1229 	for_each_engine(engine, gt, id) {
1230 		bool ok;
1231 
1232 		pr_info("Verifying after %s reset...\n", engine->name);
1233 		ce = intel_context_create(engine);
1234 		if (IS_ERR(ce)) {
1235 			ret = PTR_ERR(ce);
1236 			break;
1237 		}
1238 
1239 		ok = verify_wa_lists(gt, &lists, "before reset");
1240 		if (!ok) {
1241 			ret = -ESRCH;
1242 			goto err;
1243 		}
1244 
1245 		intel_engine_reset(engine, "live_workarounds:idle");
1246 
1247 		ok = verify_wa_lists(gt, &lists, "after idle reset");
1248 		if (!ok) {
1249 			ret = -ESRCH;
1250 			goto err;
1251 		}
1252 
1253 		ret = igt_spinner_init(&spin, engine->gt);
1254 		if (ret)
1255 			goto err;
1256 
1257 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1258 		if (IS_ERR(rq)) {
1259 			ret = PTR_ERR(rq);
1260 			igt_spinner_fini(&spin);
1261 			goto err;
1262 		}
1263 
1264 		ret = request_add_spin(rq, &spin);
1265 		if (ret) {
1266 			pr_err("Spinner failed to start\n");
1267 			igt_spinner_fini(&spin);
1268 			goto err;
1269 		}
1270 
1271 		intel_engine_reset(engine, "live_workarounds:active");
1272 
1273 		igt_spinner_end(&spin);
1274 		igt_spinner_fini(&spin);
1275 
1276 		ok = verify_wa_lists(gt, &lists, "after busy reset");
1277 		if (!ok) {
1278 			ret = -ESRCH;
1279 			goto err;
1280 		}
1281 
1282 err:
1283 		intel_context_put(ce);
1284 		if (ret)
1285 			break;
1286 	}
1287 
1288 	reference_lists_fini(gt, &lists);
1289 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1290 	igt_global_reset_unlock(gt);
1291 
1292 	igt_flush_test(gt->i915);
1293 
1294 	return ret;
1295 }
1296 
1297 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1298 {
1299 	static const struct i915_subtest tests[] = {
1300 		SUBTEST(live_dirty_whitelist),
1301 		SUBTEST(live_reset_whitelist),
1302 		SUBTEST(live_isolated_whitelist),
1303 		SUBTEST(live_gpu_reset_workarounds),
1304 		SUBTEST(live_engine_reset_workarounds),
1305 	};
1306 
1307 	if (intel_gt_is_wedged(&i915->gt))
1308 		return 0;
1309 
1310 	return intel_gt_live_subtests(tests, &i915->gt);
1311 }
1312