1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
12 
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
17 
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
20 
21 static const struct wo_register {
22 	enum intel_platform platform;
23 	u32 reg;
24 } wo_registers[] = {
25 	{ INTEL_GEMINILAKE, 0x731c }
26 };
27 
28 struct wa_lists {
29 	struct i915_wa_list gt_wa_list;
30 	struct {
31 		struct i915_wa_list wa_list;
32 		struct i915_wa_list ctx_wa_list;
33 	} engine[I915_NUM_ENGINES];
34 };
35 
36 static void
37 reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
38 {
39 	struct intel_engine_cs *engine;
40 	enum intel_engine_id id;
41 
42 	memset(lists, 0, sizeof(*lists));
43 
44 	wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
45 	gt_init_workarounds(i915, &lists->gt_wa_list);
46 	wa_init_finish(&lists->gt_wa_list);
47 
48 	for_each_engine(engine, i915, id) {
49 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
50 
51 		wa_init_start(wal, "REF", engine->name);
52 		engine_init_workarounds(engine, wal);
53 		wa_init_finish(wal);
54 
55 		__intel_engine_init_ctx_wa(engine,
56 					   &lists->engine[id].ctx_wa_list,
57 					   "CTX_REF");
58 	}
59 }
60 
61 static void
62 reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
63 {
64 	struct intel_engine_cs *engine;
65 	enum intel_engine_id id;
66 
67 	for_each_engine(engine, i915, id)
68 		intel_wa_list_free(&lists->engine[id].wa_list);
69 
70 	intel_wa_list_free(&lists->gt_wa_list);
71 }
72 
73 static struct drm_i915_gem_object *
74 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
75 {
76 	const u32 base = engine->mmio_base;
77 	struct drm_i915_gem_object *result;
78 	struct i915_request *rq;
79 	struct i915_vma *vma;
80 	u32 srm, *cs;
81 	int err;
82 	int i;
83 
84 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
85 	if (IS_ERR(result))
86 		return result;
87 
88 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
89 
90 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
91 	if (IS_ERR(cs)) {
92 		err = PTR_ERR(cs);
93 		goto err_obj;
94 	}
95 	memset(cs, 0xc5, PAGE_SIZE);
96 	i915_gem_object_flush_map(result);
97 	i915_gem_object_unpin_map(result);
98 
99 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
100 	if (IS_ERR(vma)) {
101 		err = PTR_ERR(vma);
102 		goto err_obj;
103 	}
104 
105 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
106 	if (err)
107 		goto err_obj;
108 
109 	rq = igt_request_alloc(ctx, engine);
110 	if (IS_ERR(rq)) {
111 		err = PTR_ERR(rq);
112 		goto err_pin;
113 	}
114 
115 	i915_vma_lock(vma);
116 	err = i915_request_await_object(rq, vma->obj, true);
117 	if (err == 0)
118 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
119 	i915_vma_unlock(vma);
120 	if (err)
121 		goto err_req;
122 
123 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
124 	if (INTEL_GEN(ctx->i915) >= 8)
125 		srm++;
126 
127 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
128 	if (IS_ERR(cs)) {
129 		err = PTR_ERR(cs);
130 		goto err_req;
131 	}
132 
133 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
134 		*cs++ = srm;
135 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
136 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
137 		*cs++ = 0;
138 	}
139 	intel_ring_advance(rq, cs);
140 
141 	i915_request_add(rq);
142 	i915_vma_unpin(vma);
143 
144 	return result;
145 
146 err_req:
147 	i915_request_add(rq);
148 err_pin:
149 	i915_vma_unpin(vma);
150 err_obj:
151 	i915_gem_object_put(result);
152 	return ERR_PTR(err);
153 }
154 
155 static u32
156 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
157 {
158 	i915_reg_t reg = i < engine->whitelist.count ?
159 			 engine->whitelist.list[i].reg :
160 			 RING_NOPID(engine->mmio_base);
161 
162 	return i915_mmio_reg_offset(reg);
163 }
164 
165 static void
166 print_results(const struct intel_engine_cs *engine, const u32 *results)
167 {
168 	unsigned int i;
169 
170 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
171 		u32 expected = get_whitelist_reg(engine, i);
172 		u32 actual = results[i];
173 
174 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
175 			i, expected, actual);
176 	}
177 }
178 
179 static int check_whitelist(struct i915_gem_context *ctx,
180 			   struct intel_engine_cs *engine)
181 {
182 	struct drm_i915_gem_object *results;
183 	struct intel_wedge_me wedge;
184 	u32 *vaddr;
185 	int err;
186 	int i;
187 
188 	results = read_nonprivs(ctx, engine);
189 	if (IS_ERR(results))
190 		return PTR_ERR(results);
191 
192 	err = 0;
193 	i915_gem_object_lock(results);
194 	intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */
195 		err = i915_gem_object_set_to_cpu_domain(results, false);
196 	i915_gem_object_unlock(results);
197 	if (intel_gt_is_wedged(&ctx->i915->gt))
198 		err = -EIO;
199 	if (err)
200 		goto out_put;
201 
202 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
203 	if (IS_ERR(vaddr)) {
204 		err = PTR_ERR(vaddr);
205 		goto out_put;
206 	}
207 
208 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
209 		u32 expected = get_whitelist_reg(engine, i);
210 		u32 actual = vaddr[i];
211 
212 		if (expected != actual) {
213 			print_results(engine, vaddr);
214 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
215 			       i, expected, actual);
216 
217 			err = -EINVAL;
218 			break;
219 		}
220 	}
221 
222 	i915_gem_object_unpin_map(results);
223 out_put:
224 	i915_gem_object_put(results);
225 	return err;
226 }
227 
228 static int do_device_reset(struct intel_engine_cs *engine)
229 {
230 	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
231 	return 0;
232 }
233 
234 static int do_engine_reset(struct intel_engine_cs *engine)
235 {
236 	return intel_engine_reset(engine, "live_workarounds");
237 }
238 
239 static int
240 switch_to_scratch_context(struct intel_engine_cs *engine,
241 			  struct igt_spinner *spin)
242 {
243 	struct i915_gem_context *ctx;
244 	struct intel_context *ce;
245 	struct i915_request *rq;
246 	intel_wakeref_t wakeref;
247 	int err = 0;
248 
249 	ctx = kernel_context(engine->i915);
250 	if (IS_ERR(ctx))
251 		return PTR_ERR(ctx);
252 
253 	GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
254 
255 	ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
256 	GEM_BUG_ON(IS_ERR(ce));
257 
258 	rq = ERR_PTR(-ENODEV);
259 	with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
260 		rq = igt_spinner_create_request(spin, ce, MI_NOOP);
261 
262 	intel_context_put(ce);
263 
264 	if (IS_ERR(rq)) {
265 		spin = NULL;
266 		err = PTR_ERR(rq);
267 		goto err;
268 	}
269 
270 	i915_request_add(rq);
271 
272 	if (spin && !igt_wait_for_spinner(spin, rq)) {
273 		pr_err("Spinner failed to start\n");
274 		err = -ETIMEDOUT;
275 	}
276 
277 err:
278 	if (err && spin)
279 		igt_spinner_end(spin);
280 
281 	kernel_context_close(ctx);
282 	return err;
283 }
284 
285 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
286 					int (*reset)(struct intel_engine_cs *),
287 					const char *name)
288 {
289 	struct drm_i915_private *i915 = engine->i915;
290 	struct i915_gem_context *ctx, *tmp;
291 	struct igt_spinner spin;
292 	intel_wakeref_t wakeref;
293 	int err;
294 
295 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
296 		engine->whitelist.count, engine->name, name);
297 
298 	ctx = kernel_context(i915);
299 	if (IS_ERR(ctx))
300 		return PTR_ERR(ctx);
301 
302 	err = igt_spinner_init(&spin, engine->gt);
303 	if (err)
304 		goto out_ctx;
305 
306 	err = check_whitelist(ctx, engine);
307 	if (err) {
308 		pr_err("Invalid whitelist *before* %s reset!\n", name);
309 		goto out_spin;
310 	}
311 
312 	err = switch_to_scratch_context(engine, &spin);
313 	if (err)
314 		goto out_spin;
315 
316 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
317 		err = reset(engine);
318 
319 	igt_spinner_end(&spin);
320 
321 	if (err) {
322 		pr_err("%s reset failed\n", name);
323 		goto out_spin;
324 	}
325 
326 	err = check_whitelist(ctx, engine);
327 	if (err) {
328 		pr_err("Whitelist not preserved in context across %s reset!\n",
329 		       name);
330 		goto out_spin;
331 	}
332 
333 	tmp = kernel_context(i915);
334 	if (IS_ERR(tmp)) {
335 		err = PTR_ERR(tmp);
336 		goto out_spin;
337 	}
338 	kernel_context_close(ctx);
339 	ctx = tmp;
340 
341 	err = check_whitelist(ctx, engine);
342 	if (err) {
343 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
344 		       name);
345 		goto out_spin;
346 	}
347 
348 out_spin:
349 	igt_spinner_fini(&spin);
350 out_ctx:
351 	kernel_context_close(ctx);
352 	return err;
353 }
354 
355 static struct i915_vma *create_batch(struct i915_gem_context *ctx)
356 {
357 	struct drm_i915_gem_object *obj;
358 	struct i915_address_space *vm;
359 	struct i915_vma *vma;
360 	int err;
361 
362 	obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
363 	if (IS_ERR(obj))
364 		return ERR_CAST(obj);
365 
366 	vm = i915_gem_context_get_vm_rcu(ctx);
367 	vma = i915_vma_instance(obj, vm, NULL);
368 	i915_vm_put(vm);
369 	if (IS_ERR(vma)) {
370 		err = PTR_ERR(vma);
371 		goto err_obj;
372 	}
373 
374 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
375 	if (err)
376 		goto err_obj;
377 
378 	return vma;
379 
380 err_obj:
381 	i915_gem_object_put(obj);
382 	return ERR_PTR(err);
383 }
384 
385 static u32 reg_write(u32 old, u32 new, u32 rsvd)
386 {
387 	if (rsvd == 0x0000ffff) {
388 		old &= ~(new >> 16);
389 		old |= new & (new >> 16);
390 	} else {
391 		old &= ~rsvd;
392 		old |= new & rsvd;
393 	}
394 
395 	return old;
396 }
397 
398 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
399 {
400 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
401 	int i;
402 
403 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
404 	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
405 		return true;
406 
407 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
408 		if (wo_registers[i].platform == platform &&
409 		    wo_registers[i].reg == reg)
410 			return true;
411 	}
412 
413 	return false;
414 }
415 
416 static bool ro_register(u32 reg)
417 {
418 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
419 	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
420 		return true;
421 
422 	return false;
423 }
424 
425 static int whitelist_writable_count(struct intel_engine_cs *engine)
426 {
427 	int count = engine->whitelist.count;
428 	int i;
429 
430 	for (i = 0; i < engine->whitelist.count; i++) {
431 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
432 
433 		if (ro_register(reg))
434 			count--;
435 	}
436 
437 	return count;
438 }
439 
440 static int check_dirty_whitelist(struct i915_gem_context *ctx,
441 				 struct intel_engine_cs *engine)
442 {
443 	const u32 values[] = {
444 		0x00000000,
445 		0x01010101,
446 		0x10100101,
447 		0x03030303,
448 		0x30300303,
449 		0x05050505,
450 		0x50500505,
451 		0x0f0f0f0f,
452 		0xf00ff00f,
453 		0x10101010,
454 		0xf0f01010,
455 		0x30303030,
456 		0xa0a03030,
457 		0x50505050,
458 		0xc0c05050,
459 		0xf0f0f0f0,
460 		0x11111111,
461 		0x33333333,
462 		0x55555555,
463 		0x0000ffff,
464 		0x00ff00ff,
465 		0xff0000ff,
466 		0xffff00ff,
467 		0xffffffff,
468 	};
469 	struct i915_address_space *vm;
470 	struct i915_vma *scratch;
471 	struct i915_vma *batch;
472 	int err = 0, i, v;
473 	u32 *cs, *results;
474 
475 	vm = i915_gem_context_get_vm_rcu(ctx);
476 	scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
477 	i915_vm_put(vm);
478 	if (IS_ERR(scratch))
479 		return PTR_ERR(scratch);
480 
481 	batch = create_batch(ctx);
482 	if (IS_ERR(batch)) {
483 		err = PTR_ERR(batch);
484 		goto out_scratch;
485 	}
486 
487 	for (i = 0; i < engine->whitelist.count; i++) {
488 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
489 		u64 addr = scratch->node.start;
490 		struct i915_request *rq;
491 		u32 srm, lrm, rsvd;
492 		u32 expect;
493 		int idx;
494 		bool ro_reg;
495 
496 		if (wo_register(engine, reg))
497 			continue;
498 
499 		ro_reg = ro_register(reg);
500 
501 		srm = MI_STORE_REGISTER_MEM;
502 		lrm = MI_LOAD_REGISTER_MEM;
503 		if (INTEL_GEN(ctx->i915) >= 8)
504 			lrm++, srm++;
505 
506 		pr_debug("%s: Writing garbage to %x\n",
507 			 engine->name, reg);
508 
509 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
510 		if (IS_ERR(cs)) {
511 			err = PTR_ERR(cs);
512 			goto out_batch;
513 		}
514 
515 		/* SRM original */
516 		*cs++ = srm;
517 		*cs++ = reg;
518 		*cs++ = lower_32_bits(addr);
519 		*cs++ = upper_32_bits(addr);
520 
521 		idx = 1;
522 		for (v = 0; v < ARRAY_SIZE(values); v++) {
523 			/* LRI garbage */
524 			*cs++ = MI_LOAD_REGISTER_IMM(1);
525 			*cs++ = reg;
526 			*cs++ = values[v];
527 
528 			/* SRM result */
529 			*cs++ = srm;
530 			*cs++ = reg;
531 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
532 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
533 			idx++;
534 		}
535 		for (v = 0; v < ARRAY_SIZE(values); v++) {
536 			/* LRI garbage */
537 			*cs++ = MI_LOAD_REGISTER_IMM(1);
538 			*cs++ = reg;
539 			*cs++ = ~values[v];
540 
541 			/* SRM result */
542 			*cs++ = srm;
543 			*cs++ = reg;
544 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
545 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
546 			idx++;
547 		}
548 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
549 
550 		/* LRM original -- don't leave garbage in the context! */
551 		*cs++ = lrm;
552 		*cs++ = reg;
553 		*cs++ = lower_32_bits(addr);
554 		*cs++ = upper_32_bits(addr);
555 
556 		*cs++ = MI_BATCH_BUFFER_END;
557 
558 		i915_gem_object_flush_map(batch->obj);
559 		i915_gem_object_unpin_map(batch->obj);
560 		intel_gt_chipset_flush(engine->gt);
561 
562 		rq = igt_request_alloc(ctx, engine);
563 		if (IS_ERR(rq)) {
564 			err = PTR_ERR(rq);
565 			goto out_batch;
566 		}
567 
568 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
569 			err = engine->emit_init_breadcrumb(rq);
570 			if (err)
571 				goto err_request;
572 		}
573 
574 		i915_vma_lock(batch);
575 		err = i915_request_await_object(rq, batch->obj, false);
576 		if (err == 0)
577 			err = i915_vma_move_to_active(batch, rq, 0);
578 		i915_vma_unlock(batch);
579 		if (err)
580 			goto err_request;
581 
582 		err = engine->emit_bb_start(rq,
583 					    batch->node.start, PAGE_SIZE,
584 					    0);
585 		if (err)
586 			goto err_request;
587 
588 err_request:
589 		i915_request_add(rq);
590 		if (err)
591 			goto out_batch;
592 
593 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
594 			pr_err("%s: Futzing %x timedout; cancelling test\n",
595 			       engine->name, reg);
596 			intel_gt_set_wedged(&ctx->i915->gt);
597 			err = -EIO;
598 			goto out_batch;
599 		}
600 
601 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
602 		if (IS_ERR(results)) {
603 			err = PTR_ERR(results);
604 			goto out_batch;
605 		}
606 
607 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
608 		if (!ro_reg) {
609 			/* detect write masking */
610 			rsvd = results[ARRAY_SIZE(values)];
611 			if (!rsvd) {
612 				pr_err("%s: Unable to write to whitelisted register %x\n",
613 				       engine->name, reg);
614 				err = -EINVAL;
615 				goto out_unpin;
616 			}
617 		}
618 
619 		expect = results[0];
620 		idx = 1;
621 		for (v = 0; v < ARRAY_SIZE(values); v++) {
622 			if (ro_reg)
623 				expect = results[0];
624 			else
625 				expect = reg_write(expect, values[v], rsvd);
626 
627 			if (results[idx] != expect)
628 				err++;
629 			idx++;
630 		}
631 		for (v = 0; v < ARRAY_SIZE(values); v++) {
632 			if (ro_reg)
633 				expect = results[0];
634 			else
635 				expect = reg_write(expect, ~values[v], rsvd);
636 
637 			if (results[idx] != expect)
638 				err++;
639 			idx++;
640 		}
641 		if (err) {
642 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
643 			       engine->name, err, reg);
644 
645 			if (ro_reg)
646 				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
647 					engine->name, reg, results[0]);
648 			else
649 				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
650 					engine->name, reg, results[0], rsvd);
651 
652 			expect = results[0];
653 			idx = 1;
654 			for (v = 0; v < ARRAY_SIZE(values); v++) {
655 				u32 w = values[v];
656 
657 				if (ro_reg)
658 					expect = results[0];
659 				else
660 					expect = reg_write(expect, w, rsvd);
661 				pr_info("Wrote %08x, read %08x, expect %08x\n",
662 					w, results[idx], expect);
663 				idx++;
664 			}
665 			for (v = 0; v < ARRAY_SIZE(values); v++) {
666 				u32 w = ~values[v];
667 
668 				if (ro_reg)
669 					expect = results[0];
670 				else
671 					expect = reg_write(expect, w, rsvd);
672 				pr_info("Wrote %08x, read %08x, expect %08x\n",
673 					w, results[idx], expect);
674 				idx++;
675 			}
676 
677 			err = -EINVAL;
678 		}
679 out_unpin:
680 		i915_gem_object_unpin_map(scratch->obj);
681 		if (err)
682 			break;
683 	}
684 
685 	if (igt_flush_test(ctx->i915))
686 		err = -EIO;
687 out_batch:
688 	i915_vma_unpin_and_release(&batch, 0);
689 out_scratch:
690 	i915_vma_unpin_and_release(&scratch, 0);
691 	return err;
692 }
693 
694 static int live_dirty_whitelist(void *arg)
695 {
696 	struct drm_i915_private *i915 = arg;
697 	struct intel_engine_cs *engine;
698 	struct i915_gem_context *ctx;
699 	enum intel_engine_id id;
700 	intel_wakeref_t wakeref;
701 	struct drm_file *file;
702 	int err = 0;
703 
704 	/* Can the user write to the whitelisted registers? */
705 
706 	if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
707 		return 0;
708 
709 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
710 
711 	file = mock_file(i915);
712 	if (IS_ERR(file)) {
713 		err = PTR_ERR(file);
714 		goto out_rpm;
715 	}
716 
717 	ctx = live_context(i915, file);
718 	if (IS_ERR(ctx)) {
719 		err = PTR_ERR(ctx);
720 		goto out_file;
721 	}
722 
723 	for_each_engine(engine, i915, id) {
724 		if (engine->whitelist.count == 0)
725 			continue;
726 
727 		err = check_dirty_whitelist(ctx, engine);
728 		if (err)
729 			goto out_file;
730 	}
731 
732 out_file:
733 	mock_file_free(i915, file);
734 out_rpm:
735 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
736 	return err;
737 }
738 
739 static int live_reset_whitelist(void *arg)
740 {
741 	struct drm_i915_private *i915 = arg;
742 	struct intel_engine_cs *engine = i915->engine[RCS0];
743 	int err = 0;
744 
745 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
746 
747 	if (!engine || engine->whitelist.count == 0)
748 		return 0;
749 
750 	igt_global_reset_lock(&i915->gt);
751 
752 	if (intel_has_reset_engine(&i915->gt)) {
753 		err = check_whitelist_across_reset(engine,
754 						   do_engine_reset,
755 						   "engine");
756 		if (err)
757 			goto out;
758 	}
759 
760 	if (intel_has_gpu_reset(&i915->gt)) {
761 		err = check_whitelist_across_reset(engine,
762 						   do_device_reset,
763 						   "device");
764 		if (err)
765 			goto out;
766 	}
767 
768 out:
769 	igt_global_reset_unlock(&i915->gt);
770 	return err;
771 }
772 
773 static int read_whitelisted_registers(struct i915_gem_context *ctx,
774 				      struct intel_engine_cs *engine,
775 				      struct i915_vma *results)
776 {
777 	struct i915_request *rq;
778 	int i, err = 0;
779 	u32 srm, *cs;
780 
781 	rq = igt_request_alloc(ctx, engine);
782 	if (IS_ERR(rq))
783 		return PTR_ERR(rq);
784 
785 	srm = MI_STORE_REGISTER_MEM;
786 	if (INTEL_GEN(ctx->i915) >= 8)
787 		srm++;
788 
789 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
790 	if (IS_ERR(cs)) {
791 		err = PTR_ERR(cs);
792 		goto err_req;
793 	}
794 
795 	for (i = 0; i < engine->whitelist.count; i++) {
796 		u64 offset = results->node.start + sizeof(u32) * i;
797 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
798 
799 		/* Clear access permission field */
800 		reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
801 
802 		*cs++ = srm;
803 		*cs++ = reg;
804 		*cs++ = lower_32_bits(offset);
805 		*cs++ = upper_32_bits(offset);
806 	}
807 	intel_ring_advance(rq, cs);
808 
809 err_req:
810 	i915_request_add(rq);
811 
812 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
813 		err = -EIO;
814 
815 	return err;
816 }
817 
818 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
819 				       struct intel_engine_cs *engine)
820 {
821 	struct i915_request *rq;
822 	struct i915_vma *batch;
823 	int i, err = 0;
824 	u32 *cs;
825 
826 	batch = create_batch(ctx);
827 	if (IS_ERR(batch))
828 		return PTR_ERR(batch);
829 
830 	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
831 	if (IS_ERR(cs)) {
832 		err = PTR_ERR(cs);
833 		goto err_batch;
834 	}
835 
836 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
837 	for (i = 0; i < engine->whitelist.count; i++) {
838 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
839 
840 		if (ro_register(reg))
841 			continue;
842 
843 		*cs++ = reg;
844 		*cs++ = 0xffffffff;
845 	}
846 	*cs++ = MI_BATCH_BUFFER_END;
847 
848 	i915_gem_object_flush_map(batch->obj);
849 	intel_gt_chipset_flush(engine->gt);
850 
851 	rq = igt_request_alloc(ctx, engine);
852 	if (IS_ERR(rq)) {
853 		err = PTR_ERR(rq);
854 		goto err_unpin;
855 	}
856 
857 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
858 		err = engine->emit_init_breadcrumb(rq);
859 		if (err)
860 			goto err_request;
861 	}
862 
863 	i915_vma_lock(batch);
864 	err = i915_request_await_object(rq, batch->obj, false);
865 	if (err == 0)
866 		err = i915_vma_move_to_active(batch, rq, 0);
867 	i915_vma_unlock(batch);
868 	if (err)
869 		goto err_request;
870 
871 	/* Perform the writes from an unprivileged "user" batch */
872 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
873 
874 err_request:
875 	i915_request_add(rq);
876 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
877 		err = -EIO;
878 
879 err_unpin:
880 	i915_gem_object_unpin_map(batch->obj);
881 err_batch:
882 	i915_vma_unpin_and_release(&batch, 0);
883 	return err;
884 }
885 
886 struct regmask {
887 	i915_reg_t reg;
888 	unsigned long gen_mask;
889 };
890 
891 static bool find_reg(struct drm_i915_private *i915,
892 		     i915_reg_t reg,
893 		     const struct regmask *tbl,
894 		     unsigned long count)
895 {
896 	u32 offset = i915_mmio_reg_offset(reg);
897 
898 	while (count--) {
899 		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
900 		    i915_mmio_reg_offset(tbl->reg) == offset)
901 			return true;
902 		tbl++;
903 	}
904 
905 	return false;
906 }
907 
908 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
909 {
910 	/* Alas, we must pardon some whitelists. Mistakes already made */
911 	static const struct regmask pardon[] = {
912 		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
913 		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
914 	};
915 
916 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
917 }
918 
919 static bool result_eq(struct intel_engine_cs *engine,
920 		      u32 a, u32 b, i915_reg_t reg)
921 {
922 	if (a != b && !pardon_reg(engine->i915, reg)) {
923 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
924 		       i915_mmio_reg_offset(reg), a, b);
925 		return false;
926 	}
927 
928 	return true;
929 }
930 
931 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
932 {
933 	/* Some registers do not seem to behave and our writes unreadable */
934 	static const struct regmask wo[] = {
935 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
936 	};
937 
938 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
939 }
940 
941 static bool result_neq(struct intel_engine_cs *engine,
942 		       u32 a, u32 b, i915_reg_t reg)
943 {
944 	if (a == b && !writeonly_reg(engine->i915, reg)) {
945 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
946 		       i915_mmio_reg_offset(reg), a);
947 		return false;
948 	}
949 
950 	return true;
951 }
952 
953 static int
954 check_whitelisted_registers(struct intel_engine_cs *engine,
955 			    struct i915_vma *A,
956 			    struct i915_vma *B,
957 			    bool (*fn)(struct intel_engine_cs *engine,
958 				       u32 a, u32 b,
959 				       i915_reg_t reg))
960 {
961 	u32 *a, *b;
962 	int i, err;
963 
964 	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
965 	if (IS_ERR(a))
966 		return PTR_ERR(a);
967 
968 	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
969 	if (IS_ERR(b)) {
970 		err = PTR_ERR(b);
971 		goto err_a;
972 	}
973 
974 	err = 0;
975 	for (i = 0; i < engine->whitelist.count; i++) {
976 		const struct i915_wa *wa = &engine->whitelist.list[i];
977 
978 		if (i915_mmio_reg_offset(wa->reg) &
979 		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
980 			continue;
981 
982 		if (!fn(engine, a[i], b[i], wa->reg))
983 			err = -EINVAL;
984 	}
985 
986 	i915_gem_object_unpin_map(B->obj);
987 err_a:
988 	i915_gem_object_unpin_map(A->obj);
989 	return err;
990 }
991 
992 static int live_isolated_whitelist(void *arg)
993 {
994 	struct drm_i915_private *i915 = arg;
995 	struct {
996 		struct i915_gem_context *ctx;
997 		struct i915_vma *scratch[2];
998 	} client[2] = {};
999 	struct intel_engine_cs *engine;
1000 	enum intel_engine_id id;
1001 	int i, err = 0;
1002 
1003 	/*
1004 	 * Check that a write into a whitelist register works, but
1005 	 * invisible to a second context.
1006 	 */
1007 
1008 	if (!intel_engines_has_context_isolation(i915))
1009 		return 0;
1010 
1011 	if (!i915->kernel_context->vm)
1012 		return 0;
1013 
1014 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1015 		struct i915_address_space *vm;
1016 		struct i915_gem_context *c;
1017 
1018 		c = kernel_context(i915);
1019 		if (IS_ERR(c)) {
1020 			err = PTR_ERR(c);
1021 			goto err;
1022 		}
1023 
1024 		vm = i915_gem_context_get_vm_rcu(c);
1025 
1026 		client[i].scratch[0] = create_scratch(vm, 1024);
1027 		if (IS_ERR(client[i].scratch[0])) {
1028 			err = PTR_ERR(client[i].scratch[0]);
1029 			i915_vm_put(vm);
1030 			kernel_context_close(c);
1031 			goto err;
1032 		}
1033 
1034 		client[i].scratch[1] = create_scratch(vm, 1024);
1035 		if (IS_ERR(client[i].scratch[1])) {
1036 			err = PTR_ERR(client[i].scratch[1]);
1037 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1038 			i915_vm_put(vm);
1039 			kernel_context_close(c);
1040 			goto err;
1041 		}
1042 
1043 		client[i].ctx = c;
1044 		i915_vm_put(vm);
1045 	}
1046 
1047 	for_each_engine(engine, i915, id) {
1048 		if (!whitelist_writable_count(engine))
1049 			continue;
1050 
1051 		/* Read default values */
1052 		err = read_whitelisted_registers(client[0].ctx, engine,
1053 						 client[0].scratch[0]);
1054 		if (err)
1055 			goto err;
1056 
1057 		/* Try to overwrite registers (should only affect ctx0) */
1058 		err = scrub_whitelisted_registers(client[0].ctx, engine);
1059 		if (err)
1060 			goto err;
1061 
1062 		/* Read values from ctx1, we expect these to be defaults */
1063 		err = read_whitelisted_registers(client[1].ctx, engine,
1064 						 client[1].scratch[0]);
1065 		if (err)
1066 			goto err;
1067 
1068 		/* Verify that both reads return the same default values */
1069 		err = check_whitelisted_registers(engine,
1070 						  client[0].scratch[0],
1071 						  client[1].scratch[0],
1072 						  result_eq);
1073 		if (err)
1074 			goto err;
1075 
1076 		/* Read back the updated values in ctx0 */
1077 		err = read_whitelisted_registers(client[0].ctx, engine,
1078 						 client[0].scratch[1]);
1079 		if (err)
1080 			goto err;
1081 
1082 		/* User should be granted privilege to overwhite regs */
1083 		err = check_whitelisted_registers(engine,
1084 						  client[0].scratch[0],
1085 						  client[0].scratch[1],
1086 						  result_neq);
1087 		if (err)
1088 			goto err;
1089 	}
1090 
1091 err:
1092 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1093 		if (!client[i].ctx)
1094 			break;
1095 
1096 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1097 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1098 		kernel_context_close(client[i].ctx);
1099 	}
1100 
1101 	if (igt_flush_test(i915))
1102 		err = -EIO;
1103 
1104 	return err;
1105 }
1106 
1107 static bool
1108 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1109 		const char *str)
1110 {
1111 	struct drm_i915_private *i915 = ctx->i915;
1112 	struct i915_gem_engines_iter it;
1113 	struct intel_context *ce;
1114 	bool ok = true;
1115 
1116 	ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1117 
1118 	for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
1119 		enum intel_engine_id id = ce->engine->id;
1120 
1121 		ok &= engine_wa_list_verify(ce,
1122 					    &lists->engine[id].wa_list,
1123 					    str) == 0;
1124 
1125 		ok &= engine_wa_list_verify(ce,
1126 					    &lists->engine[id].ctx_wa_list,
1127 					    str) == 0;
1128 	}
1129 
1130 	return ok;
1131 }
1132 
1133 static int
1134 live_gpu_reset_workarounds(void *arg)
1135 {
1136 	struct drm_i915_private *i915 = arg;
1137 	struct i915_gem_context *ctx;
1138 	intel_wakeref_t wakeref;
1139 	struct wa_lists lists;
1140 	bool ok;
1141 
1142 	if (!intel_has_gpu_reset(&i915->gt))
1143 		return 0;
1144 
1145 	ctx = kernel_context(i915);
1146 	if (IS_ERR(ctx))
1147 		return PTR_ERR(ctx);
1148 
1149 	i915_gem_context_lock_engines(ctx);
1150 
1151 	pr_info("Verifying after GPU reset...\n");
1152 
1153 	igt_global_reset_lock(&i915->gt);
1154 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1155 
1156 	reference_lists_init(i915, &lists);
1157 
1158 	ok = verify_wa_lists(ctx, &lists, "before reset");
1159 	if (!ok)
1160 		goto out;
1161 
1162 	intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds");
1163 
1164 	ok = verify_wa_lists(ctx, &lists, "after reset");
1165 
1166 out:
1167 	i915_gem_context_unlock_engines(ctx);
1168 	kernel_context_close(ctx);
1169 	reference_lists_fini(i915, &lists);
1170 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1171 	igt_global_reset_unlock(&i915->gt);
1172 
1173 	return ok ? 0 : -ESRCH;
1174 }
1175 
1176 static int
1177 live_engine_reset_workarounds(void *arg)
1178 {
1179 	struct drm_i915_private *i915 = arg;
1180 	struct i915_gem_engines_iter it;
1181 	struct i915_gem_context *ctx;
1182 	struct intel_context *ce;
1183 	struct igt_spinner spin;
1184 	struct i915_request *rq;
1185 	intel_wakeref_t wakeref;
1186 	struct wa_lists lists;
1187 	int ret = 0;
1188 
1189 	if (!intel_has_reset_engine(&i915->gt))
1190 		return 0;
1191 
1192 	ctx = kernel_context(i915);
1193 	if (IS_ERR(ctx))
1194 		return PTR_ERR(ctx);
1195 
1196 	igt_global_reset_lock(&i915->gt);
1197 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1198 
1199 	reference_lists_init(i915, &lists);
1200 
1201 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1202 		struct intel_engine_cs *engine = ce->engine;
1203 		bool ok;
1204 
1205 		pr_info("Verifying after %s reset...\n", engine->name);
1206 
1207 		ok = verify_wa_lists(ctx, &lists, "before reset");
1208 		if (!ok) {
1209 			ret = -ESRCH;
1210 			goto err;
1211 		}
1212 
1213 		intel_engine_reset(engine, "live_workarounds");
1214 
1215 		ok = verify_wa_lists(ctx, &lists, "after idle reset");
1216 		if (!ok) {
1217 			ret = -ESRCH;
1218 			goto err;
1219 		}
1220 
1221 		ret = igt_spinner_init(&spin, engine->gt);
1222 		if (ret)
1223 			goto err;
1224 
1225 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1226 		if (IS_ERR(rq)) {
1227 			ret = PTR_ERR(rq);
1228 			igt_spinner_fini(&spin);
1229 			goto err;
1230 		}
1231 
1232 		i915_request_add(rq);
1233 
1234 		if (!igt_wait_for_spinner(&spin, rq)) {
1235 			pr_err("Spinner failed to start\n");
1236 			igt_spinner_fini(&spin);
1237 			ret = -ETIMEDOUT;
1238 			goto err;
1239 		}
1240 
1241 		intel_engine_reset(engine, "live_workarounds");
1242 
1243 		igt_spinner_end(&spin);
1244 		igt_spinner_fini(&spin);
1245 
1246 		ok = verify_wa_lists(ctx, &lists, "after busy reset");
1247 		if (!ok) {
1248 			ret = -ESRCH;
1249 			goto err;
1250 		}
1251 	}
1252 err:
1253 	i915_gem_context_unlock_engines(ctx);
1254 	reference_lists_fini(i915, &lists);
1255 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1256 	igt_global_reset_unlock(&i915->gt);
1257 	kernel_context_close(ctx);
1258 
1259 	igt_flush_test(i915);
1260 
1261 	return ret;
1262 }
1263 
1264 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1265 {
1266 	static const struct i915_subtest tests[] = {
1267 		SUBTEST(live_dirty_whitelist),
1268 		SUBTEST(live_reset_whitelist),
1269 		SUBTEST(live_isolated_whitelist),
1270 		SUBTEST(live_gpu_reset_workarounds),
1271 		SUBTEST(live_engine_reset_workarounds),
1272 	};
1273 
1274 	if (intel_gt_is_wedged(&i915->gt))
1275 		return 0;
1276 
1277 	return i915_subtests(tests, i915);
1278 }
1279