1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 #include "i915_selftest.h"
9 #include "intel_reset.h"
10 
11 #include "selftests/igt_flush_test.h"
12 #include "selftests/igt_reset.h"
13 #include "selftests/igt_spinner.h"
14 #include "selftests/igt_wedge_me.h"
15 #include "selftests/mock_drm.h"
16 
17 #include "gem/selftests/igt_gem_utils.h"
18 #include "gem/selftests/mock_context.h"
19 
20 static const struct wo_register {
21 	enum intel_platform platform;
22 	u32 reg;
23 } wo_registers[] = {
24 	{ INTEL_GEMINILAKE, 0x731c }
25 };
26 
27 #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8)
28 struct wa_lists {
29 	struct i915_wa_list gt_wa_list;
30 	struct {
31 		char name[REF_NAME_MAX];
32 		struct i915_wa_list wa_list;
33 		struct i915_wa_list ctx_wa_list;
34 	} engine[I915_NUM_ENGINES];
35 };
36 
37 static void
38 reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
39 {
40 	struct intel_engine_cs *engine;
41 	enum intel_engine_id id;
42 
43 	memset(lists, 0, sizeof(*lists));
44 
45 	wa_init_start(&lists->gt_wa_list, "GT_REF");
46 	gt_init_workarounds(i915, &lists->gt_wa_list);
47 	wa_init_finish(&lists->gt_wa_list);
48 
49 	for_each_engine(engine, i915, id) {
50 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
51 		char *name = lists->engine[id].name;
52 
53 		snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
54 
55 		wa_init_start(wal, name);
56 		engine_init_workarounds(engine, wal);
57 		wa_init_finish(wal);
58 
59 		snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name);
60 
61 		__intel_engine_init_ctx_wa(engine,
62 					   &lists->engine[id].ctx_wa_list,
63 					   name);
64 	}
65 }
66 
67 static void
68 reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
69 {
70 	struct intel_engine_cs *engine;
71 	enum intel_engine_id id;
72 
73 	for_each_engine(engine, i915, id)
74 		intel_wa_list_free(&lists->engine[id].wa_list);
75 
76 	intel_wa_list_free(&lists->gt_wa_list);
77 }
78 
79 static struct drm_i915_gem_object *
80 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
81 {
82 	const u32 base = engine->mmio_base;
83 	struct drm_i915_gem_object *result;
84 	struct i915_request *rq;
85 	struct i915_vma *vma;
86 	u32 srm, *cs;
87 	int err;
88 	int i;
89 
90 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
91 	if (IS_ERR(result))
92 		return result;
93 
94 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
95 
96 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
97 	if (IS_ERR(cs)) {
98 		err = PTR_ERR(cs);
99 		goto err_obj;
100 	}
101 	memset(cs, 0xc5, PAGE_SIZE);
102 	i915_gem_object_flush_map(result);
103 	i915_gem_object_unpin_map(result);
104 
105 	vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
106 	if (IS_ERR(vma)) {
107 		err = PTR_ERR(vma);
108 		goto err_obj;
109 	}
110 
111 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
112 	if (err)
113 		goto err_obj;
114 
115 	rq = igt_request_alloc(ctx, engine);
116 	if (IS_ERR(rq)) {
117 		err = PTR_ERR(rq);
118 		goto err_pin;
119 	}
120 
121 	i915_vma_lock(vma);
122 	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
123 	i915_vma_unlock(vma);
124 	if (err)
125 		goto err_req;
126 
127 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
128 	if (INTEL_GEN(ctx->i915) >= 8)
129 		srm++;
130 
131 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
132 	if (IS_ERR(cs)) {
133 		err = PTR_ERR(cs);
134 		goto err_req;
135 	}
136 
137 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
138 		*cs++ = srm;
139 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
140 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
141 		*cs++ = 0;
142 	}
143 	intel_ring_advance(rq, cs);
144 
145 	i915_request_add(rq);
146 	i915_vma_unpin(vma);
147 
148 	return result;
149 
150 err_req:
151 	i915_request_add(rq);
152 err_pin:
153 	i915_vma_unpin(vma);
154 err_obj:
155 	i915_gem_object_put(result);
156 	return ERR_PTR(err);
157 }
158 
159 static u32
160 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
161 {
162 	i915_reg_t reg = i < engine->whitelist.count ?
163 			 engine->whitelist.list[i].reg :
164 			 RING_NOPID(engine->mmio_base);
165 
166 	return i915_mmio_reg_offset(reg);
167 }
168 
169 static void
170 print_results(const struct intel_engine_cs *engine, const u32 *results)
171 {
172 	unsigned int i;
173 
174 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
175 		u32 expected = get_whitelist_reg(engine, i);
176 		u32 actual = results[i];
177 
178 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
179 			i, expected, actual);
180 	}
181 }
182 
183 static int check_whitelist(struct i915_gem_context *ctx,
184 			   struct intel_engine_cs *engine)
185 {
186 	struct drm_i915_gem_object *results;
187 	struct igt_wedge_me wedge;
188 	u32 *vaddr;
189 	int err;
190 	int i;
191 
192 	results = read_nonprivs(ctx, engine);
193 	if (IS_ERR(results))
194 		return PTR_ERR(results);
195 
196 	err = 0;
197 	i915_gem_object_lock(results);
198 	igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
199 		err = i915_gem_object_set_to_cpu_domain(results, false);
200 	i915_gem_object_unlock(results);
201 	if (i915_terminally_wedged(ctx->i915))
202 		err = -EIO;
203 	if (err)
204 		goto out_put;
205 
206 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
207 	if (IS_ERR(vaddr)) {
208 		err = PTR_ERR(vaddr);
209 		goto out_put;
210 	}
211 
212 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
213 		u32 expected = get_whitelist_reg(engine, i);
214 		u32 actual = vaddr[i];
215 
216 		if (expected != actual) {
217 			print_results(engine, vaddr);
218 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
219 			       i, expected, actual);
220 
221 			err = -EINVAL;
222 			break;
223 		}
224 	}
225 
226 	i915_gem_object_unpin_map(results);
227 out_put:
228 	i915_gem_object_put(results);
229 	return err;
230 }
231 
232 static int do_device_reset(struct intel_engine_cs *engine)
233 {
234 	i915_reset(engine->i915, engine->mask, "live_workarounds");
235 	return 0;
236 }
237 
238 static int do_engine_reset(struct intel_engine_cs *engine)
239 {
240 	return i915_reset_engine(engine, "live_workarounds");
241 }
242 
243 static int
244 switch_to_scratch_context(struct intel_engine_cs *engine,
245 			  struct igt_spinner *spin)
246 {
247 	struct i915_gem_context *ctx;
248 	struct i915_request *rq;
249 	intel_wakeref_t wakeref;
250 	int err = 0;
251 
252 	ctx = kernel_context(engine->i915);
253 	if (IS_ERR(ctx))
254 		return PTR_ERR(ctx);
255 
256 	GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
257 
258 	rq = ERR_PTR(-ENODEV);
259 	with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
260 		rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
261 
262 	kernel_context_close(ctx);
263 
264 	if (IS_ERR(rq)) {
265 		spin = NULL;
266 		err = PTR_ERR(rq);
267 		goto err;
268 	}
269 
270 	i915_request_add(rq);
271 
272 	if (spin && !igt_wait_for_spinner(spin, rq)) {
273 		pr_err("Spinner failed to start\n");
274 		err = -ETIMEDOUT;
275 	}
276 
277 err:
278 	if (err && spin)
279 		igt_spinner_end(spin);
280 
281 	return err;
282 }
283 
284 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
285 					int (*reset)(struct intel_engine_cs *),
286 					const char *name)
287 {
288 	struct drm_i915_private *i915 = engine->i915;
289 	struct i915_gem_context *ctx;
290 	struct igt_spinner spin;
291 	intel_wakeref_t wakeref;
292 	int err;
293 
294 	pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
295 		engine->whitelist.count, name);
296 
297 	err = igt_spinner_init(&spin, i915);
298 	if (err)
299 		return err;
300 
301 	ctx = kernel_context(i915);
302 	if (IS_ERR(ctx))
303 		return PTR_ERR(ctx);
304 
305 	err = check_whitelist(ctx, engine);
306 	if (err) {
307 		pr_err("Invalid whitelist *before* %s reset!\n", name);
308 		goto out;
309 	}
310 
311 	err = switch_to_scratch_context(engine, &spin);
312 	if (err)
313 		goto out;
314 
315 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
316 		err = reset(engine);
317 
318 	igt_spinner_end(&spin);
319 	igt_spinner_fini(&spin);
320 
321 	if (err) {
322 		pr_err("%s reset failed\n", name);
323 		goto out;
324 	}
325 
326 	err = check_whitelist(ctx, engine);
327 	if (err) {
328 		pr_err("Whitelist not preserved in context across %s reset!\n",
329 		       name);
330 		goto out;
331 	}
332 
333 	kernel_context_close(ctx);
334 
335 	ctx = kernel_context(i915);
336 	if (IS_ERR(ctx))
337 		return PTR_ERR(ctx);
338 
339 	err = check_whitelist(ctx, engine);
340 	if (err) {
341 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
342 		       name);
343 		goto out;
344 	}
345 
346 out:
347 	kernel_context_close(ctx);
348 	return err;
349 }
350 
351 static struct i915_vma *create_batch(struct i915_gem_context *ctx)
352 {
353 	struct drm_i915_gem_object *obj;
354 	struct i915_vma *vma;
355 	int err;
356 
357 	obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
358 	if (IS_ERR(obj))
359 		return ERR_CAST(obj);
360 
361 	vma = i915_vma_instance(obj, ctx->vm, NULL);
362 	if (IS_ERR(vma)) {
363 		err = PTR_ERR(vma);
364 		goto err_obj;
365 	}
366 
367 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
368 	if (err)
369 		goto err_obj;
370 
371 	return vma;
372 
373 err_obj:
374 	i915_gem_object_put(obj);
375 	return ERR_PTR(err);
376 }
377 
378 static u32 reg_write(u32 old, u32 new, u32 rsvd)
379 {
380 	if (rsvd == 0x0000ffff) {
381 		old &= ~(new >> 16);
382 		old |= new & (new >> 16);
383 	} else {
384 		old &= ~rsvd;
385 		old |= new & rsvd;
386 	}
387 
388 	return old;
389 }
390 
391 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
392 {
393 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
394 	int i;
395 
396 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
397 		if (wo_registers[i].platform == platform &&
398 		    wo_registers[i].reg == reg)
399 			return true;
400 	}
401 
402 	return false;
403 }
404 
405 static bool ro_register(u32 reg)
406 {
407 	if (reg & RING_FORCE_TO_NONPRIV_RD)
408 		return true;
409 
410 	return false;
411 }
412 
413 static int whitelist_writable_count(struct intel_engine_cs *engine)
414 {
415 	int count = engine->whitelist.count;
416 	int i;
417 
418 	for (i = 0; i < engine->whitelist.count; i++) {
419 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
420 
421 		if (ro_register(reg))
422 			count--;
423 	}
424 
425 	return count;
426 }
427 
428 static int check_dirty_whitelist(struct i915_gem_context *ctx,
429 				 struct intel_engine_cs *engine)
430 {
431 	const u32 values[] = {
432 		0x00000000,
433 		0x01010101,
434 		0x10100101,
435 		0x03030303,
436 		0x30300303,
437 		0x05050505,
438 		0x50500505,
439 		0x0f0f0f0f,
440 		0xf00ff00f,
441 		0x10101010,
442 		0xf0f01010,
443 		0x30303030,
444 		0xa0a03030,
445 		0x50505050,
446 		0xc0c05050,
447 		0xf0f0f0f0,
448 		0x11111111,
449 		0x33333333,
450 		0x55555555,
451 		0x0000ffff,
452 		0x00ff00ff,
453 		0xff0000ff,
454 		0xffff00ff,
455 		0xffffffff,
456 	};
457 	struct i915_vma *scratch;
458 	struct i915_vma *batch;
459 	int err = 0, i, v;
460 	u32 *cs, *results;
461 
462 	scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
463 	if (IS_ERR(scratch))
464 		return PTR_ERR(scratch);
465 
466 	batch = create_batch(ctx);
467 	if (IS_ERR(batch)) {
468 		err = PTR_ERR(batch);
469 		goto out_scratch;
470 	}
471 
472 	for (i = 0; i < engine->whitelist.count; i++) {
473 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
474 		u64 addr = scratch->node.start;
475 		struct i915_request *rq;
476 		u32 srm, lrm, rsvd;
477 		u32 expect;
478 		int idx;
479 
480 		if (wo_register(engine, reg))
481 			continue;
482 
483 		if (ro_register(reg))
484 			continue;
485 
486 		srm = MI_STORE_REGISTER_MEM;
487 		lrm = MI_LOAD_REGISTER_MEM;
488 		if (INTEL_GEN(ctx->i915) >= 8)
489 			lrm++, srm++;
490 
491 		pr_debug("%s: Writing garbage to %x\n",
492 			 engine->name, reg);
493 
494 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
495 		if (IS_ERR(cs)) {
496 			err = PTR_ERR(cs);
497 			goto out_batch;
498 		}
499 
500 		/* SRM original */
501 		*cs++ = srm;
502 		*cs++ = reg;
503 		*cs++ = lower_32_bits(addr);
504 		*cs++ = upper_32_bits(addr);
505 
506 		idx = 1;
507 		for (v = 0; v < ARRAY_SIZE(values); v++) {
508 			/* LRI garbage */
509 			*cs++ = MI_LOAD_REGISTER_IMM(1);
510 			*cs++ = reg;
511 			*cs++ = values[v];
512 
513 			/* SRM result */
514 			*cs++ = srm;
515 			*cs++ = reg;
516 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
517 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
518 			idx++;
519 		}
520 		for (v = 0; v < ARRAY_SIZE(values); v++) {
521 			/* LRI garbage */
522 			*cs++ = MI_LOAD_REGISTER_IMM(1);
523 			*cs++ = reg;
524 			*cs++ = ~values[v];
525 
526 			/* SRM result */
527 			*cs++ = srm;
528 			*cs++ = reg;
529 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
530 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
531 			idx++;
532 		}
533 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
534 
535 		/* LRM original -- don't leave garbage in the context! */
536 		*cs++ = lrm;
537 		*cs++ = reg;
538 		*cs++ = lower_32_bits(addr);
539 		*cs++ = upper_32_bits(addr);
540 
541 		*cs++ = MI_BATCH_BUFFER_END;
542 
543 		i915_gem_object_flush_map(batch->obj);
544 		i915_gem_object_unpin_map(batch->obj);
545 		i915_gem_chipset_flush(ctx->i915);
546 
547 		rq = igt_request_alloc(ctx, engine);
548 		if (IS_ERR(rq)) {
549 			err = PTR_ERR(rq);
550 			goto out_batch;
551 		}
552 
553 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
554 			err = engine->emit_init_breadcrumb(rq);
555 			if (err)
556 				goto err_request;
557 		}
558 
559 		err = engine->emit_bb_start(rq,
560 					    batch->node.start, PAGE_SIZE,
561 					    0);
562 		if (err)
563 			goto err_request;
564 
565 err_request:
566 		i915_request_add(rq);
567 		if (err)
568 			goto out_batch;
569 
570 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
571 			pr_err("%s: Futzing %x timedout; cancelling test\n",
572 			       engine->name, reg);
573 			i915_gem_set_wedged(ctx->i915);
574 			err = -EIO;
575 			goto out_batch;
576 		}
577 
578 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
579 		if (IS_ERR(results)) {
580 			err = PTR_ERR(results);
581 			goto out_batch;
582 		}
583 
584 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
585 		rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
586 		if (!rsvd) {
587 			pr_err("%s: Unable to write to whitelisted register %x\n",
588 			       engine->name, reg);
589 			err = -EINVAL;
590 			goto out_unpin;
591 		}
592 
593 		expect = results[0];
594 		idx = 1;
595 		for (v = 0; v < ARRAY_SIZE(values); v++) {
596 			expect = reg_write(expect, values[v], rsvd);
597 			if (results[idx] != expect)
598 				err++;
599 			idx++;
600 		}
601 		for (v = 0; v < ARRAY_SIZE(values); v++) {
602 			expect = reg_write(expect, ~values[v], rsvd);
603 			if (results[idx] != expect)
604 				err++;
605 			idx++;
606 		}
607 		if (err) {
608 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
609 			       engine->name, err, reg);
610 
611 			pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
612 				engine->name, reg, results[0], rsvd);
613 
614 			expect = results[0];
615 			idx = 1;
616 			for (v = 0; v < ARRAY_SIZE(values); v++) {
617 				u32 w = values[v];
618 
619 				expect = reg_write(expect, w, rsvd);
620 				pr_info("Wrote %08x, read %08x, expect %08x\n",
621 					w, results[idx], expect);
622 				idx++;
623 			}
624 			for (v = 0; v < ARRAY_SIZE(values); v++) {
625 				u32 w = ~values[v];
626 
627 				expect = reg_write(expect, w, rsvd);
628 				pr_info("Wrote %08x, read %08x, expect %08x\n",
629 					w, results[idx], expect);
630 				idx++;
631 			}
632 
633 			err = -EINVAL;
634 		}
635 out_unpin:
636 		i915_gem_object_unpin_map(scratch->obj);
637 		if (err)
638 			break;
639 	}
640 
641 	if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
642 		err = -EIO;
643 out_batch:
644 	i915_vma_unpin_and_release(&batch, 0);
645 out_scratch:
646 	i915_vma_unpin_and_release(&scratch, 0);
647 	return err;
648 }
649 
650 static int live_dirty_whitelist(void *arg)
651 {
652 	struct drm_i915_private *i915 = arg;
653 	struct intel_engine_cs *engine;
654 	struct i915_gem_context *ctx;
655 	enum intel_engine_id id;
656 	intel_wakeref_t wakeref;
657 	struct drm_file *file;
658 	int err = 0;
659 
660 	/* Can the user write to the whitelisted registers? */
661 
662 	if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
663 		return 0;
664 
665 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
666 
667 	mutex_unlock(&i915->drm.struct_mutex);
668 	file = mock_file(i915);
669 	mutex_lock(&i915->drm.struct_mutex);
670 	if (IS_ERR(file)) {
671 		err = PTR_ERR(file);
672 		goto out_rpm;
673 	}
674 
675 	ctx = live_context(i915, file);
676 	if (IS_ERR(ctx)) {
677 		err = PTR_ERR(ctx);
678 		goto out_file;
679 	}
680 
681 	for_each_engine(engine, i915, id) {
682 		if (engine->whitelist.count == 0)
683 			continue;
684 
685 		err = check_dirty_whitelist(ctx, engine);
686 		if (err)
687 			goto out_file;
688 	}
689 
690 out_file:
691 	mutex_unlock(&i915->drm.struct_mutex);
692 	mock_file_free(i915, file);
693 	mutex_lock(&i915->drm.struct_mutex);
694 out_rpm:
695 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
696 	return err;
697 }
698 
699 static int live_reset_whitelist(void *arg)
700 {
701 	struct drm_i915_private *i915 = arg;
702 	struct intel_engine_cs *engine = i915->engine[RCS0];
703 	int err = 0;
704 
705 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
706 
707 	if (!engine || engine->whitelist.count == 0)
708 		return 0;
709 
710 	igt_global_reset_lock(i915);
711 
712 	if (intel_has_reset_engine(i915)) {
713 		err = check_whitelist_across_reset(engine,
714 						   do_engine_reset,
715 						   "engine");
716 		if (err)
717 			goto out;
718 	}
719 
720 	if (intel_has_gpu_reset(i915)) {
721 		err = check_whitelist_across_reset(engine,
722 						   do_device_reset,
723 						   "device");
724 		if (err)
725 			goto out;
726 	}
727 
728 out:
729 	igt_global_reset_unlock(i915);
730 	return err;
731 }
732 
733 static int read_whitelisted_registers(struct i915_gem_context *ctx,
734 				      struct intel_engine_cs *engine,
735 				      struct i915_vma *results)
736 {
737 	struct i915_request *rq;
738 	int i, err = 0;
739 	u32 srm, *cs;
740 
741 	rq = igt_request_alloc(ctx, engine);
742 	if (IS_ERR(rq))
743 		return PTR_ERR(rq);
744 
745 	srm = MI_STORE_REGISTER_MEM;
746 	if (INTEL_GEN(ctx->i915) >= 8)
747 		srm++;
748 
749 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
750 	if (IS_ERR(cs)) {
751 		err = PTR_ERR(cs);
752 		goto err_req;
753 	}
754 
755 	for (i = 0; i < engine->whitelist.count; i++) {
756 		u64 offset = results->node.start + sizeof(u32) * i;
757 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
758 
759 		/* Clear RD only and WR only flags */
760 		reg &= ~(RING_FORCE_TO_NONPRIV_RD | RING_FORCE_TO_NONPRIV_WR);
761 
762 		*cs++ = srm;
763 		*cs++ = reg;
764 		*cs++ = lower_32_bits(offset);
765 		*cs++ = upper_32_bits(offset);
766 	}
767 	intel_ring_advance(rq, cs);
768 
769 err_req:
770 	i915_request_add(rq);
771 
772 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
773 		err = -EIO;
774 
775 	return err;
776 }
777 
778 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
779 				       struct intel_engine_cs *engine)
780 {
781 	struct i915_request *rq;
782 	struct i915_vma *batch;
783 	int i, err = 0;
784 	u32 *cs;
785 
786 	batch = create_batch(ctx);
787 	if (IS_ERR(batch))
788 		return PTR_ERR(batch);
789 
790 	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
791 	if (IS_ERR(cs)) {
792 		err = PTR_ERR(cs);
793 		goto err_batch;
794 	}
795 
796 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
797 	for (i = 0; i < engine->whitelist.count; i++) {
798 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
799 
800 		if (ro_register(reg))
801 			continue;
802 
803 		*cs++ = reg;
804 		*cs++ = 0xffffffff;
805 	}
806 	*cs++ = MI_BATCH_BUFFER_END;
807 
808 	i915_gem_object_flush_map(batch->obj);
809 	i915_gem_chipset_flush(ctx->i915);
810 
811 	rq = igt_request_alloc(ctx, engine);
812 	if (IS_ERR(rq)) {
813 		err = PTR_ERR(rq);
814 		goto err_unpin;
815 	}
816 
817 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
818 		err = engine->emit_init_breadcrumb(rq);
819 		if (err)
820 			goto err_request;
821 	}
822 
823 	/* Perform the writes from an unprivileged "user" batch */
824 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
825 
826 err_request:
827 	i915_request_add(rq);
828 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
829 		err = -EIO;
830 
831 err_unpin:
832 	i915_gem_object_unpin_map(batch->obj);
833 err_batch:
834 	i915_vma_unpin_and_release(&batch, 0);
835 	return err;
836 }
837 
838 struct regmask {
839 	i915_reg_t reg;
840 	unsigned long gen_mask;
841 };
842 
843 static bool find_reg(struct drm_i915_private *i915,
844 		     i915_reg_t reg,
845 		     const struct regmask *tbl,
846 		     unsigned long count)
847 {
848 	u32 offset = i915_mmio_reg_offset(reg);
849 
850 	while (count--) {
851 		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
852 		    i915_mmio_reg_offset(tbl->reg) == offset)
853 			return true;
854 		tbl++;
855 	}
856 
857 	return false;
858 }
859 
860 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
861 {
862 	/* Alas, we must pardon some whitelists. Mistakes already made */
863 	static const struct regmask pardon[] = {
864 		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
865 		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
866 	};
867 
868 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
869 }
870 
871 static bool result_eq(struct intel_engine_cs *engine,
872 		      u32 a, u32 b, i915_reg_t reg)
873 {
874 	if (a != b && !pardon_reg(engine->i915, reg)) {
875 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
876 		       i915_mmio_reg_offset(reg), a, b);
877 		return false;
878 	}
879 
880 	return true;
881 }
882 
883 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
884 {
885 	/* Some registers do not seem to behave and our writes unreadable */
886 	static const struct regmask wo[] = {
887 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
888 	};
889 
890 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
891 }
892 
893 static bool result_neq(struct intel_engine_cs *engine,
894 		       u32 a, u32 b, i915_reg_t reg)
895 {
896 	if (a == b && !writeonly_reg(engine->i915, reg)) {
897 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
898 		       i915_mmio_reg_offset(reg), a);
899 		return false;
900 	}
901 
902 	return true;
903 }
904 
905 static int
906 check_whitelisted_registers(struct intel_engine_cs *engine,
907 			    struct i915_vma *A,
908 			    struct i915_vma *B,
909 			    bool (*fn)(struct intel_engine_cs *engine,
910 				       u32 a, u32 b,
911 				       i915_reg_t reg))
912 {
913 	u32 *a, *b;
914 	int i, err;
915 
916 	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
917 	if (IS_ERR(a))
918 		return PTR_ERR(a);
919 
920 	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
921 	if (IS_ERR(b)) {
922 		err = PTR_ERR(b);
923 		goto err_a;
924 	}
925 
926 	err = 0;
927 	for (i = 0; i < engine->whitelist.count; i++) {
928 		if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg))
929 			err = -EINVAL;
930 	}
931 
932 	i915_gem_object_unpin_map(B->obj);
933 err_a:
934 	i915_gem_object_unpin_map(A->obj);
935 	return err;
936 }
937 
938 static int live_isolated_whitelist(void *arg)
939 {
940 	struct drm_i915_private *i915 = arg;
941 	struct {
942 		struct i915_gem_context *ctx;
943 		struct i915_vma *scratch[2];
944 	} client[2] = {};
945 	struct intel_engine_cs *engine;
946 	enum intel_engine_id id;
947 	int i, err = 0;
948 
949 	/*
950 	 * Check that a write into a whitelist register works, but
951 	 * invisible to a second context.
952 	 */
953 
954 	if (!intel_engines_has_context_isolation(i915))
955 		return 0;
956 
957 	if (!i915->kernel_context->vm)
958 		return 0;
959 
960 	for (i = 0; i < ARRAY_SIZE(client); i++) {
961 		struct i915_gem_context *c;
962 
963 		c = kernel_context(i915);
964 		if (IS_ERR(c)) {
965 			err = PTR_ERR(c);
966 			goto err;
967 		}
968 
969 		client[i].scratch[0] = create_scratch(c->vm, 1024);
970 		if (IS_ERR(client[i].scratch[0])) {
971 			err = PTR_ERR(client[i].scratch[0]);
972 			kernel_context_close(c);
973 			goto err;
974 		}
975 
976 		client[i].scratch[1] = create_scratch(c->vm, 1024);
977 		if (IS_ERR(client[i].scratch[1])) {
978 			err = PTR_ERR(client[i].scratch[1]);
979 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
980 			kernel_context_close(c);
981 			goto err;
982 		}
983 
984 		client[i].ctx = c;
985 	}
986 
987 	for_each_engine(engine, i915, id) {
988 		if (!whitelist_writable_count(engine))
989 			continue;
990 
991 		/* Read default values */
992 		err = read_whitelisted_registers(client[0].ctx, engine,
993 						 client[0].scratch[0]);
994 		if (err)
995 			goto err;
996 
997 		/* Try to overwrite registers (should only affect ctx0) */
998 		err = scrub_whitelisted_registers(client[0].ctx, engine);
999 		if (err)
1000 			goto err;
1001 
1002 		/* Read values from ctx1, we expect these to be defaults */
1003 		err = read_whitelisted_registers(client[1].ctx, engine,
1004 						 client[1].scratch[0]);
1005 		if (err)
1006 			goto err;
1007 
1008 		/* Verify that both reads return the same default values */
1009 		err = check_whitelisted_registers(engine,
1010 						  client[0].scratch[0],
1011 						  client[1].scratch[0],
1012 						  result_eq);
1013 		if (err)
1014 			goto err;
1015 
1016 		/* Read back the updated values in ctx0 */
1017 		err = read_whitelisted_registers(client[0].ctx, engine,
1018 						 client[0].scratch[1]);
1019 		if (err)
1020 			goto err;
1021 
1022 		/* User should be granted privilege to overwhite regs */
1023 		err = check_whitelisted_registers(engine,
1024 						  client[0].scratch[0],
1025 						  client[0].scratch[1],
1026 						  result_neq);
1027 		if (err)
1028 			goto err;
1029 	}
1030 
1031 err:
1032 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1033 		if (!client[i].ctx)
1034 			break;
1035 
1036 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1037 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1038 		kernel_context_close(client[i].ctx);
1039 	}
1040 
1041 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
1042 		err = -EIO;
1043 
1044 	return err;
1045 }
1046 
1047 static bool
1048 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1049 		const char *str)
1050 {
1051 	struct drm_i915_private *i915 = ctx->i915;
1052 	struct i915_gem_engines_iter it;
1053 	struct intel_context *ce;
1054 	bool ok = true;
1055 
1056 	ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1057 
1058 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1059 		enum intel_engine_id id = ce->engine->id;
1060 
1061 		ok &= engine_wa_list_verify(ce,
1062 					    &lists->engine[id].wa_list,
1063 					    str) == 0;
1064 
1065 		ok &= engine_wa_list_verify(ce,
1066 					    &lists->engine[id].ctx_wa_list,
1067 					    str) == 0;
1068 	}
1069 	i915_gem_context_unlock_engines(ctx);
1070 
1071 	return ok;
1072 }
1073 
1074 static int
1075 live_gpu_reset_workarounds(void *arg)
1076 {
1077 	struct drm_i915_private *i915 = arg;
1078 	struct i915_gem_context *ctx;
1079 	intel_wakeref_t wakeref;
1080 	struct wa_lists lists;
1081 	bool ok;
1082 
1083 	if (!intel_has_gpu_reset(i915))
1084 		return 0;
1085 
1086 	ctx = kernel_context(i915);
1087 	if (IS_ERR(ctx))
1088 		return PTR_ERR(ctx);
1089 
1090 	pr_info("Verifying after GPU reset...\n");
1091 
1092 	igt_global_reset_lock(i915);
1093 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1094 
1095 	reference_lists_init(i915, &lists);
1096 
1097 	ok = verify_wa_lists(ctx, &lists, "before reset");
1098 	if (!ok)
1099 		goto out;
1100 
1101 	i915_reset(i915, ALL_ENGINES, "live_workarounds");
1102 
1103 	ok = verify_wa_lists(ctx, &lists, "after reset");
1104 
1105 out:
1106 	kernel_context_close(ctx);
1107 	reference_lists_fini(i915, &lists);
1108 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1109 	igt_global_reset_unlock(i915);
1110 
1111 	return ok ? 0 : -ESRCH;
1112 }
1113 
1114 static int
1115 live_engine_reset_workarounds(void *arg)
1116 {
1117 	struct drm_i915_private *i915 = arg;
1118 	struct intel_engine_cs *engine;
1119 	struct i915_gem_context *ctx;
1120 	struct igt_spinner spin;
1121 	enum intel_engine_id id;
1122 	struct i915_request *rq;
1123 	intel_wakeref_t wakeref;
1124 	struct wa_lists lists;
1125 	int ret = 0;
1126 
1127 	if (!intel_has_reset_engine(i915))
1128 		return 0;
1129 
1130 	ctx = kernel_context(i915);
1131 	if (IS_ERR(ctx))
1132 		return PTR_ERR(ctx);
1133 
1134 	igt_global_reset_lock(i915);
1135 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1136 
1137 	reference_lists_init(i915, &lists);
1138 
1139 	for_each_engine(engine, i915, id) {
1140 		bool ok;
1141 
1142 		pr_info("Verifying after %s reset...\n", engine->name);
1143 
1144 		ok = verify_wa_lists(ctx, &lists, "before reset");
1145 		if (!ok) {
1146 			ret = -ESRCH;
1147 			goto err;
1148 		}
1149 
1150 		i915_reset_engine(engine, "live_workarounds");
1151 
1152 		ok = verify_wa_lists(ctx, &lists, "after idle reset");
1153 		if (!ok) {
1154 			ret = -ESRCH;
1155 			goto err;
1156 		}
1157 
1158 		ret = igt_spinner_init(&spin, i915);
1159 		if (ret)
1160 			goto err;
1161 
1162 		rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
1163 		if (IS_ERR(rq)) {
1164 			ret = PTR_ERR(rq);
1165 			igt_spinner_fini(&spin);
1166 			goto err;
1167 		}
1168 
1169 		i915_request_add(rq);
1170 
1171 		if (!igt_wait_for_spinner(&spin, rq)) {
1172 			pr_err("Spinner failed to start\n");
1173 			igt_spinner_fini(&spin);
1174 			ret = -ETIMEDOUT;
1175 			goto err;
1176 		}
1177 
1178 		i915_reset_engine(engine, "live_workarounds");
1179 
1180 		igt_spinner_end(&spin);
1181 		igt_spinner_fini(&spin);
1182 
1183 		ok = verify_wa_lists(ctx, &lists, "after busy reset");
1184 		if (!ok) {
1185 			ret = -ESRCH;
1186 			goto err;
1187 		}
1188 	}
1189 
1190 err:
1191 	reference_lists_fini(i915, &lists);
1192 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1193 	igt_global_reset_unlock(i915);
1194 	kernel_context_close(ctx);
1195 
1196 	igt_flush_test(i915, I915_WAIT_LOCKED);
1197 
1198 	return ret;
1199 }
1200 
1201 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1202 {
1203 	static const struct i915_subtest tests[] = {
1204 		SUBTEST(live_dirty_whitelist),
1205 		SUBTEST(live_reset_whitelist),
1206 		SUBTEST(live_isolated_whitelist),
1207 		SUBTEST(live_gpu_reset_workarounds),
1208 		SUBTEST(live_engine_reset_workarounds),
1209 	};
1210 	int err;
1211 
1212 	if (i915_terminally_wedged(i915))
1213 		return 0;
1214 
1215 	mutex_lock(&i915->drm.struct_mutex);
1216 	err = i915_subtests(tests, i915);
1217 	mutex_unlock(&i915->drm.struct_mutex);
1218 
1219 	return err;
1220 }
1221