1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2018 Intel Corporation
4  */
5 
6 #include <linux/prime_numbers.h>
7 
8 #include "gem/i915_gem_internal.h"
9 
10 #include "i915_selftest.h"
11 #include "intel_engine_heartbeat.h"
12 #include "intel_engine_pm.h"
13 #include "intel_reset.h"
14 #include "intel_ring.h"
15 #include "selftest_engine_heartbeat.h"
16 #include "selftests/i915_random.h"
17 #include "selftests/igt_flush_test.h"
18 #include "selftests/igt_live_test.h"
19 #include "selftests/igt_spinner.h"
20 #include "selftests/lib_sw_fence.h"
21 #include "shmem_utils.h"
22 
23 #include "gem/selftests/igt_gem_utils.h"
24 #include "gem/selftests/mock_context.h"
25 
26 #define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
27 #define NUM_GPR 16
28 #define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
29 
30 static struct i915_vma *create_scratch(struct intel_gt *gt)
31 {
32 	return __vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE);
33 }
34 
35 static bool is_active(struct i915_request *rq)
36 {
37 	if (i915_request_is_active(rq))
38 		return true;
39 
40 	if (i915_request_on_hold(rq))
41 		return true;
42 
43 	if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
44 		return true;
45 
46 	return false;
47 }
48 
49 static int wait_for_submit(struct intel_engine_cs *engine,
50 			   struct i915_request *rq,
51 			   unsigned long timeout)
52 {
53 	/* Ignore our own attempts to suppress excess tasklets */
54 	tasklet_hi_schedule(&engine->sched_engine->tasklet);
55 
56 	timeout += jiffies;
57 	do {
58 		bool done = time_after(jiffies, timeout);
59 
60 		if (i915_request_completed(rq)) /* that was quick! */
61 			return 0;
62 
63 		/* Wait until the HW has acknowleged the submission (or err) */
64 		intel_engine_flush_submission(engine);
65 		if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
66 			return 0;
67 
68 		if (done)
69 			return -ETIME;
70 
71 		cond_resched();
72 	} while (1);
73 }
74 
75 static int emit_semaphore_signal(struct intel_context *ce, void *slot)
76 {
77 	const u32 offset =
78 		i915_ggtt_offset(ce->engine->status_page.vma) +
79 		offset_in_page(slot);
80 	struct i915_request *rq;
81 	u32 *cs;
82 
83 	rq = intel_context_create_request(ce);
84 	if (IS_ERR(rq))
85 		return PTR_ERR(rq);
86 
87 	cs = intel_ring_begin(rq, 4);
88 	if (IS_ERR(cs)) {
89 		i915_request_add(rq);
90 		return PTR_ERR(cs);
91 	}
92 
93 	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
94 	*cs++ = offset;
95 	*cs++ = 0;
96 	*cs++ = 1;
97 
98 	intel_ring_advance(rq, cs);
99 
100 	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
101 	i915_request_add(rq);
102 	return 0;
103 }
104 
105 static int context_flush(struct intel_context *ce, long timeout)
106 {
107 	struct i915_request *rq;
108 	struct dma_fence *fence;
109 	int err = 0;
110 
111 	rq = intel_engine_create_kernel_request(ce->engine);
112 	if (IS_ERR(rq))
113 		return PTR_ERR(rq);
114 
115 	fence = i915_active_fence_get(&ce->timeline->last_request);
116 	if (fence) {
117 		i915_request_await_dma_fence(rq, fence);
118 		dma_fence_put(fence);
119 	}
120 
121 	rq = i915_request_get(rq);
122 	i915_request_add(rq);
123 	if (i915_request_wait(rq, 0, timeout) < 0)
124 		err = -ETIME;
125 	i915_request_put(rq);
126 
127 	rmb(); /* We know the request is written, make sure all state is too! */
128 	return err;
129 }
130 
131 static int live_lrc_layout(void *arg)
132 {
133 	struct intel_gt *gt = arg;
134 	struct intel_engine_cs *engine;
135 	enum intel_engine_id id;
136 	u32 *lrc;
137 	int err;
138 
139 	/*
140 	 * Check the registers offsets we use to create the initial reg state
141 	 * match the layout saved by HW.
142 	 */
143 
144 	lrc = (u32 *)__get_free_page(GFP_KERNEL); /* requires page alignment */
145 	if (!lrc)
146 		return -ENOMEM;
147 	GEM_BUG_ON(offset_in_page(lrc));
148 
149 	err = 0;
150 	for_each_engine(engine, gt, id) {
151 		u32 *hw;
152 		int dw;
153 
154 		if (!engine->default_state)
155 			continue;
156 
157 		hw = shmem_pin_map(engine->default_state);
158 		if (IS_ERR(hw)) {
159 			err = PTR_ERR(hw);
160 			break;
161 		}
162 		hw += LRC_STATE_OFFSET / sizeof(*hw);
163 
164 		__lrc_init_regs(memset(lrc, POISON_INUSE, PAGE_SIZE),
165 				engine->kernel_context, engine, true);
166 
167 		dw = 0;
168 		do {
169 			u32 lri = READ_ONCE(hw[dw]);
170 
171 			if (lri == 0) {
172 				dw++;
173 				continue;
174 			}
175 
176 			if (lrc[dw] == 0) {
177 				pr_debug("%s: skipped instruction %x at dword %d\n",
178 					 engine->name, lri, dw);
179 				dw++;
180 				continue;
181 			}
182 
183 			if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
184 				pr_err("%s: Expected LRI command at dword %d, found %08x\n",
185 				       engine->name, dw, lri);
186 				err = -EINVAL;
187 				break;
188 			}
189 
190 			if (lrc[dw] != lri) {
191 				pr_err("%s: LRI command mismatch at dword %d, expected %08x found %08x\n",
192 				       engine->name, dw, lri, lrc[dw]);
193 				err = -EINVAL;
194 				break;
195 			}
196 
197 			lri &= 0x7f;
198 			lri++;
199 			dw++;
200 
201 			while (lri) {
202 				u32 offset = READ_ONCE(hw[dw]);
203 
204 				if (offset != lrc[dw]) {
205 					pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
206 					       engine->name, dw, offset, lrc[dw]);
207 					err = -EINVAL;
208 					break;
209 				}
210 
211 				/*
212 				 * Skip over the actual register value as we
213 				 * expect that to differ.
214 				 */
215 				dw += 2;
216 				lri -= 2;
217 			}
218 		} while (!err && (lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
219 
220 		if (err) {
221 			pr_info("%s: HW register image:\n", engine->name);
222 			igt_hexdump(hw, PAGE_SIZE);
223 
224 			pr_info("%s: SW register image:\n", engine->name);
225 			igt_hexdump(lrc, PAGE_SIZE);
226 		}
227 
228 		shmem_unpin_map(engine->default_state, hw);
229 		if (err)
230 			break;
231 	}
232 
233 	free_page((unsigned long)lrc);
234 	return err;
235 }
236 
237 static int find_offset(const u32 *lri, u32 offset)
238 {
239 	int i;
240 
241 	for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
242 		if (lri[i] == offset)
243 			return i;
244 
245 	return -1;
246 }
247 
248 static int live_lrc_fixed(void *arg)
249 {
250 	struct intel_gt *gt = arg;
251 	struct intel_engine_cs *engine;
252 	enum intel_engine_id id;
253 	int err = 0;
254 
255 	/*
256 	 * Check the assumed register offsets match the actual locations in
257 	 * the context image.
258 	 */
259 
260 	for_each_engine(engine, gt, id) {
261 		const struct {
262 			u32 reg;
263 			u32 offset;
264 			const char *name;
265 		} tbl[] = {
266 			{
267 				i915_mmio_reg_offset(RING_START(engine->mmio_base)),
268 				CTX_RING_START - 1,
269 				"RING_START"
270 			},
271 			{
272 				i915_mmio_reg_offset(RING_CTL(engine->mmio_base)),
273 				CTX_RING_CTL - 1,
274 				"RING_CTL"
275 			},
276 			{
277 				i915_mmio_reg_offset(RING_HEAD(engine->mmio_base)),
278 				CTX_RING_HEAD - 1,
279 				"RING_HEAD"
280 			},
281 			{
282 				i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)),
283 				CTX_RING_TAIL - 1,
284 				"RING_TAIL"
285 			},
286 			{
287 				i915_mmio_reg_offset(RING_MI_MODE(engine->mmio_base)),
288 				lrc_ring_mi_mode(engine),
289 				"RING_MI_MODE"
290 			},
291 			{
292 				i915_mmio_reg_offset(RING_BBSTATE(engine->mmio_base)),
293 				CTX_BB_STATE - 1,
294 				"BB_STATE"
295 			},
296 			{
297 				i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(engine->mmio_base)),
298 				lrc_ring_wa_bb_per_ctx(engine),
299 				"RING_BB_PER_CTX_PTR"
300 			},
301 			{
302 				i915_mmio_reg_offset(RING_INDIRECT_CTX(engine->mmio_base)),
303 				lrc_ring_indirect_ptr(engine),
304 				"RING_INDIRECT_CTX_PTR"
305 			},
306 			{
307 				i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(engine->mmio_base)),
308 				lrc_ring_indirect_offset(engine),
309 				"RING_INDIRECT_CTX_OFFSET"
310 			},
311 			{
312 				i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)),
313 				CTX_TIMESTAMP - 1,
314 				"RING_CTX_TIMESTAMP"
315 			},
316 			{
317 				i915_mmio_reg_offset(GEN8_RING_CS_GPR(engine->mmio_base, 0)),
318 				lrc_ring_gpr0(engine),
319 				"RING_CS_GPR0"
320 			},
321 			{
322 				i915_mmio_reg_offset(RING_CMD_BUF_CCTL(engine->mmio_base)),
323 				lrc_ring_cmd_buf_cctl(engine),
324 				"RING_CMD_BUF_CCTL"
325 			},
326 			{ },
327 		}, *t;
328 		u32 *hw;
329 
330 		if (!engine->default_state)
331 			continue;
332 
333 		hw = shmem_pin_map(engine->default_state);
334 		if (IS_ERR(hw)) {
335 			err = PTR_ERR(hw);
336 			break;
337 		}
338 		hw += LRC_STATE_OFFSET / sizeof(*hw);
339 
340 		for (t = tbl; t->name; t++) {
341 			int dw = find_offset(hw, t->reg);
342 
343 			if (dw != t->offset) {
344 				pr_err("%s: Offset for %s [0x%x] mismatch, found %x, expected %x\n",
345 				       engine->name,
346 				       t->name,
347 				       t->reg,
348 				       dw,
349 				       t->offset);
350 				err = -EINVAL;
351 			}
352 		}
353 
354 		shmem_unpin_map(engine->default_state, hw);
355 	}
356 
357 	return err;
358 }
359 
360 static int __live_lrc_state(struct intel_engine_cs *engine,
361 			    struct i915_vma *scratch)
362 {
363 	struct intel_context *ce;
364 	struct i915_request *rq;
365 	struct i915_gem_ww_ctx ww;
366 	enum {
367 		RING_START_IDX = 0,
368 		RING_TAIL_IDX,
369 		MAX_IDX
370 	};
371 	u32 expected[MAX_IDX];
372 	u32 *cs;
373 	int err;
374 	int n;
375 
376 	ce = intel_context_create(engine);
377 	if (IS_ERR(ce))
378 		return PTR_ERR(ce);
379 
380 	i915_gem_ww_ctx_init(&ww, false);
381 retry:
382 	err = i915_gem_object_lock(scratch->obj, &ww);
383 	if (!err)
384 		err = intel_context_pin_ww(ce, &ww);
385 	if (err)
386 		goto err_put;
387 
388 	rq = i915_request_create(ce);
389 	if (IS_ERR(rq)) {
390 		err = PTR_ERR(rq);
391 		goto err_unpin;
392 	}
393 
394 	cs = intel_ring_begin(rq, 4 * MAX_IDX);
395 	if (IS_ERR(cs)) {
396 		err = PTR_ERR(cs);
397 		i915_request_add(rq);
398 		goto err_unpin;
399 	}
400 
401 	*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
402 	*cs++ = i915_mmio_reg_offset(RING_START(engine->mmio_base));
403 	*cs++ = i915_ggtt_offset(scratch) + RING_START_IDX * sizeof(u32);
404 	*cs++ = 0;
405 
406 	expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma);
407 
408 	*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
409 	*cs++ = i915_mmio_reg_offset(RING_TAIL(engine->mmio_base));
410 	*cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
411 	*cs++ = 0;
412 
413 	err = i915_request_await_object(rq, scratch->obj, true);
414 	if (!err)
415 		err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
416 
417 	i915_request_get(rq);
418 	i915_request_add(rq);
419 	if (err)
420 		goto err_rq;
421 
422 	intel_engine_flush_submission(engine);
423 	expected[RING_TAIL_IDX] = ce->ring->tail;
424 
425 	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
426 		err = -ETIME;
427 		goto err_rq;
428 	}
429 
430 	cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
431 	if (IS_ERR(cs)) {
432 		err = PTR_ERR(cs);
433 		goto err_rq;
434 	}
435 
436 	for (n = 0; n < MAX_IDX; n++) {
437 		if (cs[n] != expected[n]) {
438 			pr_err("%s: Stored register[%d] value[0x%x] did not match expected[0x%x]\n",
439 			       engine->name, n, cs[n], expected[n]);
440 			err = -EINVAL;
441 			break;
442 		}
443 	}
444 
445 	i915_gem_object_unpin_map(scratch->obj);
446 
447 err_rq:
448 	i915_request_put(rq);
449 err_unpin:
450 	intel_context_unpin(ce);
451 err_put:
452 	if (err == -EDEADLK) {
453 		err = i915_gem_ww_ctx_backoff(&ww);
454 		if (!err)
455 			goto retry;
456 	}
457 	i915_gem_ww_ctx_fini(&ww);
458 	intel_context_put(ce);
459 	return err;
460 }
461 
462 static int live_lrc_state(void *arg)
463 {
464 	struct intel_gt *gt = arg;
465 	struct intel_engine_cs *engine;
466 	struct i915_vma *scratch;
467 	enum intel_engine_id id;
468 	int err = 0;
469 
470 	/*
471 	 * Check the live register state matches what we expect for this
472 	 * intel_context.
473 	 */
474 
475 	scratch = create_scratch(gt);
476 	if (IS_ERR(scratch))
477 		return PTR_ERR(scratch);
478 
479 	for_each_engine(engine, gt, id) {
480 		err = __live_lrc_state(engine, scratch);
481 		if (err)
482 			break;
483 	}
484 
485 	if (igt_flush_test(gt->i915))
486 		err = -EIO;
487 
488 	i915_vma_unpin_and_release(&scratch, 0);
489 	return err;
490 }
491 
492 static int gpr_make_dirty(struct intel_context *ce)
493 {
494 	struct i915_request *rq;
495 	u32 *cs;
496 	int n;
497 
498 	rq = intel_context_create_request(ce);
499 	if (IS_ERR(rq))
500 		return PTR_ERR(rq);
501 
502 	cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2);
503 	if (IS_ERR(cs)) {
504 		i915_request_add(rq);
505 		return PTR_ERR(cs);
506 	}
507 
508 	*cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
509 	for (n = 0; n < NUM_GPR_DW; n++) {
510 		*cs++ = CS_GPR(ce->engine, n);
511 		*cs++ = STACK_MAGIC;
512 	}
513 	*cs++ = MI_NOOP;
514 
515 	intel_ring_advance(rq, cs);
516 
517 	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
518 	i915_request_add(rq);
519 
520 	return 0;
521 }
522 
523 static struct i915_request *
524 __gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
525 {
526 	const u32 offset =
527 		i915_ggtt_offset(ce->engine->status_page.vma) +
528 		offset_in_page(slot);
529 	struct i915_request *rq;
530 	u32 *cs;
531 	int err;
532 	int n;
533 
534 	rq = intel_context_create_request(ce);
535 	if (IS_ERR(rq))
536 		return rq;
537 
538 	cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW);
539 	if (IS_ERR(cs)) {
540 		i915_request_add(rq);
541 		return ERR_CAST(cs);
542 	}
543 
544 	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
545 	*cs++ = MI_NOOP;
546 
547 	*cs++ = MI_SEMAPHORE_WAIT |
548 		MI_SEMAPHORE_GLOBAL_GTT |
549 		MI_SEMAPHORE_POLL |
550 		MI_SEMAPHORE_SAD_NEQ_SDD;
551 	*cs++ = 0;
552 	*cs++ = offset;
553 	*cs++ = 0;
554 
555 	for (n = 0; n < NUM_GPR_DW; n++) {
556 		*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
557 		*cs++ = CS_GPR(ce->engine, n);
558 		*cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
559 		*cs++ = 0;
560 	}
561 
562 	i915_vma_lock(scratch);
563 	err = i915_request_await_object(rq, scratch->obj, true);
564 	if (!err)
565 		err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
566 	i915_vma_unlock(scratch);
567 
568 	i915_request_get(rq);
569 	i915_request_add(rq);
570 	if (err) {
571 		i915_request_put(rq);
572 		rq = ERR_PTR(err);
573 	}
574 
575 	return rq;
576 }
577 
578 static int __live_lrc_gpr(struct intel_engine_cs *engine,
579 			  struct i915_vma *scratch,
580 			  bool preempt)
581 {
582 	u32 *slot = memset32(engine->status_page.addr + 1000, 0, 4);
583 	struct intel_context *ce;
584 	struct i915_request *rq;
585 	u32 *cs;
586 	int err;
587 	int n;
588 
589 	if (GRAPHICS_VER(engine->i915) < 9 && engine->class != RENDER_CLASS)
590 		return 0; /* GPR only on rcs0 for gen8 */
591 
592 	err = gpr_make_dirty(engine->kernel_context);
593 	if (err)
594 		return err;
595 
596 	ce = intel_context_create(engine);
597 	if (IS_ERR(ce))
598 		return PTR_ERR(ce);
599 
600 	rq = __gpr_read(ce, scratch, slot);
601 	if (IS_ERR(rq)) {
602 		err = PTR_ERR(rq);
603 		goto err_put;
604 	}
605 
606 	err = wait_for_submit(engine, rq, HZ / 2);
607 	if (err)
608 		goto err_rq;
609 
610 	if (preempt) {
611 		err = gpr_make_dirty(engine->kernel_context);
612 		if (err)
613 			goto err_rq;
614 
615 		err = emit_semaphore_signal(engine->kernel_context, slot);
616 		if (err)
617 			goto err_rq;
618 
619 		err = wait_for_submit(engine, rq, HZ / 2);
620 		if (err)
621 			goto err_rq;
622 	} else {
623 		slot[0] = 1;
624 		wmb();
625 	}
626 
627 	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
628 		err = -ETIME;
629 		goto err_rq;
630 	}
631 
632 	cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
633 	if (IS_ERR(cs)) {
634 		err = PTR_ERR(cs);
635 		goto err_rq;
636 	}
637 
638 	for (n = 0; n < NUM_GPR_DW; n++) {
639 		if (cs[n]) {
640 			pr_err("%s: GPR[%d].%s was not zero, found 0x%08x!\n",
641 			       engine->name,
642 			       n / 2, n & 1 ? "udw" : "ldw",
643 			       cs[n]);
644 			err = -EINVAL;
645 			break;
646 		}
647 	}
648 
649 	i915_gem_object_unpin_map(scratch->obj);
650 
651 err_rq:
652 	memset32(&slot[0], -1, 4);
653 	wmb();
654 	i915_request_put(rq);
655 err_put:
656 	intel_context_put(ce);
657 	return err;
658 }
659 
660 static int live_lrc_gpr(void *arg)
661 {
662 	struct intel_gt *gt = arg;
663 	struct intel_engine_cs *engine;
664 	struct i915_vma *scratch;
665 	enum intel_engine_id id;
666 	int err = 0;
667 
668 	/*
669 	 * Check that GPR registers are cleared in new contexts as we need
670 	 * to avoid leaking any information from previous contexts.
671 	 */
672 
673 	scratch = create_scratch(gt);
674 	if (IS_ERR(scratch))
675 		return PTR_ERR(scratch);
676 
677 	for_each_engine(engine, gt, id) {
678 		st_engine_heartbeat_disable(engine);
679 
680 		err = __live_lrc_gpr(engine, scratch, false);
681 		if (err)
682 			goto err;
683 
684 		err = __live_lrc_gpr(engine, scratch, true);
685 		if (err)
686 			goto err;
687 
688 err:
689 		st_engine_heartbeat_enable(engine);
690 		if (igt_flush_test(gt->i915))
691 			err = -EIO;
692 		if (err)
693 			break;
694 	}
695 
696 	i915_vma_unpin_and_release(&scratch, 0);
697 	return err;
698 }
699 
700 static struct i915_request *
701 create_timestamp(struct intel_context *ce, void *slot, int idx)
702 {
703 	const u32 offset =
704 		i915_ggtt_offset(ce->engine->status_page.vma) +
705 		offset_in_page(slot);
706 	struct i915_request *rq;
707 	u32 *cs;
708 	int err;
709 
710 	rq = intel_context_create_request(ce);
711 	if (IS_ERR(rq))
712 		return rq;
713 
714 	cs = intel_ring_begin(rq, 10);
715 	if (IS_ERR(cs)) {
716 		err = PTR_ERR(cs);
717 		goto err;
718 	}
719 
720 	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
721 	*cs++ = MI_NOOP;
722 
723 	*cs++ = MI_SEMAPHORE_WAIT |
724 		MI_SEMAPHORE_GLOBAL_GTT |
725 		MI_SEMAPHORE_POLL |
726 		MI_SEMAPHORE_SAD_NEQ_SDD;
727 	*cs++ = 0;
728 	*cs++ = offset;
729 	*cs++ = 0;
730 
731 	*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
732 	*cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base));
733 	*cs++ = offset + idx * sizeof(u32);
734 	*cs++ = 0;
735 
736 	intel_ring_advance(rq, cs);
737 
738 	err = 0;
739 err:
740 	i915_request_get(rq);
741 	i915_request_add(rq);
742 	if (err) {
743 		i915_request_put(rq);
744 		return ERR_PTR(err);
745 	}
746 
747 	return rq;
748 }
749 
750 struct lrc_timestamp {
751 	struct intel_engine_cs *engine;
752 	struct intel_context *ce[2];
753 	u32 poison;
754 };
755 
756 static bool timestamp_advanced(u32 start, u32 end)
757 {
758 	return (s32)(end - start) > 0;
759 }
760 
761 static int __lrc_timestamp(const struct lrc_timestamp *arg, bool preempt)
762 {
763 	u32 *slot = memset32(arg->engine->status_page.addr + 1000, 0, 4);
764 	struct i915_request *rq;
765 	u32 timestamp;
766 	int err = 0;
767 
768 	arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP] = arg->poison;
769 	rq = create_timestamp(arg->ce[0], slot, 1);
770 	if (IS_ERR(rq))
771 		return PTR_ERR(rq);
772 
773 	err = wait_for_submit(rq->engine, rq, HZ / 2);
774 	if (err)
775 		goto err;
776 
777 	if (preempt) {
778 		arg->ce[1]->lrc_reg_state[CTX_TIMESTAMP] = 0xdeadbeef;
779 		err = emit_semaphore_signal(arg->ce[1], slot);
780 		if (err)
781 			goto err;
782 	} else {
783 		slot[0] = 1;
784 		wmb();
785 	}
786 
787 	/* And wait for switch to kernel (to save our context to memory) */
788 	err = context_flush(arg->ce[0], HZ / 2);
789 	if (err)
790 		goto err;
791 
792 	if (!timestamp_advanced(arg->poison, slot[1])) {
793 		pr_err("%s(%s): invalid timestamp on restore, context:%x, request:%x\n",
794 		       arg->engine->name, preempt ? "preempt" : "simple",
795 		       arg->poison, slot[1]);
796 		err = -EINVAL;
797 	}
798 
799 	timestamp = READ_ONCE(arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP]);
800 	if (!timestamp_advanced(slot[1], timestamp)) {
801 		pr_err("%s(%s): invalid timestamp on save, request:%x, context:%x\n",
802 		       arg->engine->name, preempt ? "preempt" : "simple",
803 		       slot[1], timestamp);
804 		err = -EINVAL;
805 	}
806 
807 err:
808 	memset32(slot, -1, 4);
809 	i915_request_put(rq);
810 	return err;
811 }
812 
813 static int live_lrc_timestamp(void *arg)
814 {
815 	struct lrc_timestamp data = {};
816 	struct intel_gt *gt = arg;
817 	enum intel_engine_id id;
818 	const u32 poison[] = {
819 		0,
820 		S32_MAX,
821 		(u32)S32_MAX + 1,
822 		U32_MAX,
823 	};
824 
825 	/*
826 	 * We want to verify that the timestamp is saved and restore across
827 	 * context switches and is monotonic.
828 	 *
829 	 * So we do this with a little bit of LRC poisoning to check various
830 	 * boundary conditions, and see what happens if we preempt the context
831 	 * with a second request (carrying more poison into the timestamp).
832 	 */
833 
834 	for_each_engine(data.engine, gt, id) {
835 		int i, err = 0;
836 
837 		st_engine_heartbeat_disable(data.engine);
838 
839 		for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
840 			struct intel_context *tmp;
841 
842 			tmp = intel_context_create(data.engine);
843 			if (IS_ERR(tmp)) {
844 				err = PTR_ERR(tmp);
845 				goto err;
846 			}
847 
848 			err = intel_context_pin(tmp);
849 			if (err) {
850 				intel_context_put(tmp);
851 				goto err;
852 			}
853 
854 			data.ce[i] = tmp;
855 		}
856 
857 		for (i = 0; i < ARRAY_SIZE(poison); i++) {
858 			data.poison = poison[i];
859 
860 			err = __lrc_timestamp(&data, false);
861 			if (err)
862 				break;
863 
864 			err = __lrc_timestamp(&data, true);
865 			if (err)
866 				break;
867 		}
868 
869 err:
870 		st_engine_heartbeat_enable(data.engine);
871 		for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
872 			if (!data.ce[i])
873 				break;
874 
875 			intel_context_unpin(data.ce[i]);
876 			intel_context_put(data.ce[i]);
877 		}
878 
879 		if (igt_flush_test(gt->i915))
880 			err = -EIO;
881 		if (err)
882 			return err;
883 	}
884 
885 	return 0;
886 }
887 
888 static struct i915_vma *
889 create_user_vma(struct i915_address_space *vm, unsigned long size)
890 {
891 	struct drm_i915_gem_object *obj;
892 	struct i915_vma *vma;
893 	int err;
894 
895 	obj = i915_gem_object_create_internal(vm->i915, size);
896 	if (IS_ERR(obj))
897 		return ERR_CAST(obj);
898 
899 	vma = i915_vma_instance(obj, vm, NULL);
900 	if (IS_ERR(vma)) {
901 		i915_gem_object_put(obj);
902 		return vma;
903 	}
904 
905 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
906 	if (err) {
907 		i915_gem_object_put(obj);
908 		return ERR_PTR(err);
909 	}
910 
911 	return vma;
912 }
913 
914 static struct i915_vma *
915 store_context(struct intel_context *ce, struct i915_vma *scratch)
916 {
917 	struct i915_vma *batch;
918 	u32 dw, x, *cs, *hw;
919 	u32 *defaults;
920 
921 	batch = create_user_vma(ce->vm, SZ_64K);
922 	if (IS_ERR(batch))
923 		return batch;
924 
925 	cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
926 	if (IS_ERR(cs)) {
927 		i915_vma_put(batch);
928 		return ERR_CAST(cs);
929 	}
930 
931 	defaults = shmem_pin_map(ce->engine->default_state);
932 	if (!defaults) {
933 		i915_gem_object_unpin_map(batch->obj);
934 		i915_vma_put(batch);
935 		return ERR_PTR(-ENOMEM);
936 	}
937 
938 	x = 0;
939 	dw = 0;
940 	hw = defaults;
941 	hw += LRC_STATE_OFFSET / sizeof(*hw);
942 	do {
943 		u32 len = hw[dw] & 0x7f;
944 
945 		if (hw[dw] == 0) {
946 			dw++;
947 			continue;
948 		}
949 
950 		if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
951 			dw += len + 2;
952 			continue;
953 		}
954 
955 		dw++;
956 		len = (len + 1) / 2;
957 		while (len--) {
958 			*cs++ = MI_STORE_REGISTER_MEM_GEN8;
959 			*cs++ = hw[dw];
960 			*cs++ = lower_32_bits(scratch->node.start + x);
961 			*cs++ = upper_32_bits(scratch->node.start + x);
962 
963 			dw += 2;
964 			x += 4;
965 		}
966 	} while (dw < PAGE_SIZE / sizeof(u32) &&
967 		 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
968 
969 	*cs++ = MI_BATCH_BUFFER_END;
970 
971 	shmem_unpin_map(ce->engine->default_state, defaults);
972 
973 	i915_gem_object_flush_map(batch->obj);
974 	i915_gem_object_unpin_map(batch->obj);
975 
976 	return batch;
977 }
978 
979 static int move_to_active(struct i915_request *rq,
980 			  struct i915_vma *vma,
981 			  unsigned int flags)
982 {
983 	int err;
984 
985 	i915_vma_lock(vma);
986 	err = i915_request_await_object(rq, vma->obj, flags);
987 	if (!err)
988 		err = i915_vma_move_to_active(vma, rq, flags);
989 	i915_vma_unlock(vma);
990 
991 	return err;
992 }
993 
994 static struct i915_request *
995 record_registers(struct intel_context *ce,
996 		 struct i915_vma *before,
997 		 struct i915_vma *after,
998 		 u32 *sema)
999 {
1000 	struct i915_vma *b_before, *b_after;
1001 	struct i915_request *rq;
1002 	u32 *cs;
1003 	int err;
1004 
1005 	b_before = store_context(ce, before);
1006 	if (IS_ERR(b_before))
1007 		return ERR_CAST(b_before);
1008 
1009 	b_after = store_context(ce, after);
1010 	if (IS_ERR(b_after)) {
1011 		rq = ERR_CAST(b_after);
1012 		goto err_before;
1013 	}
1014 
1015 	rq = intel_context_create_request(ce);
1016 	if (IS_ERR(rq))
1017 		goto err_after;
1018 
1019 	err = move_to_active(rq, before, EXEC_OBJECT_WRITE);
1020 	if (err)
1021 		goto err_rq;
1022 
1023 	err = move_to_active(rq, b_before, 0);
1024 	if (err)
1025 		goto err_rq;
1026 
1027 	err = move_to_active(rq, after, EXEC_OBJECT_WRITE);
1028 	if (err)
1029 		goto err_rq;
1030 
1031 	err = move_to_active(rq, b_after, 0);
1032 	if (err)
1033 		goto err_rq;
1034 
1035 	cs = intel_ring_begin(rq, 14);
1036 	if (IS_ERR(cs)) {
1037 		err = PTR_ERR(cs);
1038 		goto err_rq;
1039 	}
1040 
1041 	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1042 	*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
1043 	*cs++ = lower_32_bits(b_before->node.start);
1044 	*cs++ = upper_32_bits(b_before->node.start);
1045 
1046 	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1047 	*cs++ = MI_SEMAPHORE_WAIT |
1048 		MI_SEMAPHORE_GLOBAL_GTT |
1049 		MI_SEMAPHORE_POLL |
1050 		MI_SEMAPHORE_SAD_NEQ_SDD;
1051 	*cs++ = 0;
1052 	*cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
1053 		offset_in_page(sema);
1054 	*cs++ = 0;
1055 	*cs++ = MI_NOOP;
1056 
1057 	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1058 	*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
1059 	*cs++ = lower_32_bits(b_after->node.start);
1060 	*cs++ = upper_32_bits(b_after->node.start);
1061 
1062 	intel_ring_advance(rq, cs);
1063 
1064 	WRITE_ONCE(*sema, 0);
1065 	i915_request_get(rq);
1066 	i915_request_add(rq);
1067 err_after:
1068 	i915_vma_put(b_after);
1069 err_before:
1070 	i915_vma_put(b_before);
1071 	return rq;
1072 
1073 err_rq:
1074 	i915_request_add(rq);
1075 	rq = ERR_PTR(err);
1076 	goto err_after;
1077 }
1078 
1079 static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
1080 {
1081 	struct i915_vma *batch;
1082 	u32 dw, *cs, *hw;
1083 	u32 *defaults;
1084 
1085 	batch = create_user_vma(ce->vm, SZ_64K);
1086 	if (IS_ERR(batch))
1087 		return batch;
1088 
1089 	cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
1090 	if (IS_ERR(cs)) {
1091 		i915_vma_put(batch);
1092 		return ERR_CAST(cs);
1093 	}
1094 
1095 	defaults = shmem_pin_map(ce->engine->default_state);
1096 	if (!defaults) {
1097 		i915_gem_object_unpin_map(batch->obj);
1098 		i915_vma_put(batch);
1099 		return ERR_PTR(-ENOMEM);
1100 	}
1101 
1102 	dw = 0;
1103 	hw = defaults;
1104 	hw += LRC_STATE_OFFSET / sizeof(*hw);
1105 	do {
1106 		u32 len = hw[dw] & 0x7f;
1107 
1108 		if (hw[dw] == 0) {
1109 			dw++;
1110 			continue;
1111 		}
1112 
1113 		if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
1114 			dw += len + 2;
1115 			continue;
1116 		}
1117 
1118 		dw++;
1119 		len = (len + 1) / 2;
1120 		*cs++ = MI_LOAD_REGISTER_IMM(len);
1121 		while (len--) {
1122 			*cs++ = hw[dw];
1123 			*cs++ = poison;
1124 			dw += 2;
1125 		}
1126 	} while (dw < PAGE_SIZE / sizeof(u32) &&
1127 		 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
1128 
1129 	*cs++ = MI_BATCH_BUFFER_END;
1130 
1131 	shmem_unpin_map(ce->engine->default_state, defaults);
1132 
1133 	i915_gem_object_flush_map(batch->obj);
1134 	i915_gem_object_unpin_map(batch->obj);
1135 
1136 	return batch;
1137 }
1138 
1139 static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
1140 {
1141 	struct i915_request *rq;
1142 	struct i915_vma *batch;
1143 	u32 *cs;
1144 	int err;
1145 
1146 	batch = load_context(ce, poison);
1147 	if (IS_ERR(batch))
1148 		return PTR_ERR(batch);
1149 
1150 	rq = intel_context_create_request(ce);
1151 	if (IS_ERR(rq)) {
1152 		err = PTR_ERR(rq);
1153 		goto err_batch;
1154 	}
1155 
1156 	err = move_to_active(rq, batch, 0);
1157 	if (err)
1158 		goto err_rq;
1159 
1160 	cs = intel_ring_begin(rq, 8);
1161 	if (IS_ERR(cs)) {
1162 		err = PTR_ERR(cs);
1163 		goto err_rq;
1164 	}
1165 
1166 	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1167 	*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
1168 	*cs++ = lower_32_bits(batch->node.start);
1169 	*cs++ = upper_32_bits(batch->node.start);
1170 
1171 	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1172 	*cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
1173 		offset_in_page(sema);
1174 	*cs++ = 0;
1175 	*cs++ = 1;
1176 
1177 	intel_ring_advance(rq, cs);
1178 
1179 	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
1180 err_rq:
1181 	i915_request_add(rq);
1182 err_batch:
1183 	i915_vma_put(batch);
1184 	return err;
1185 }
1186 
1187 static bool is_moving(u32 a, u32 b)
1188 {
1189 	return a != b;
1190 }
1191 
1192 static int compare_isolation(struct intel_engine_cs *engine,
1193 			     struct i915_vma *ref[2],
1194 			     struct i915_vma *result[2],
1195 			     struct intel_context *ce,
1196 			     u32 poison)
1197 {
1198 	u32 x, dw, *hw, *lrc;
1199 	u32 *A[2], *B[2];
1200 	u32 *defaults;
1201 	int err = 0;
1202 
1203 	A[0] = i915_gem_object_pin_map_unlocked(ref[0]->obj, I915_MAP_WC);
1204 	if (IS_ERR(A[0]))
1205 		return PTR_ERR(A[0]);
1206 
1207 	A[1] = i915_gem_object_pin_map_unlocked(ref[1]->obj, I915_MAP_WC);
1208 	if (IS_ERR(A[1])) {
1209 		err = PTR_ERR(A[1]);
1210 		goto err_A0;
1211 	}
1212 
1213 	B[0] = i915_gem_object_pin_map_unlocked(result[0]->obj, I915_MAP_WC);
1214 	if (IS_ERR(B[0])) {
1215 		err = PTR_ERR(B[0]);
1216 		goto err_A1;
1217 	}
1218 
1219 	B[1] = i915_gem_object_pin_map_unlocked(result[1]->obj, I915_MAP_WC);
1220 	if (IS_ERR(B[1])) {
1221 		err = PTR_ERR(B[1]);
1222 		goto err_B0;
1223 	}
1224 
1225 	lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
1226 					       i915_coherent_map_type(engine->i915,
1227 								      ce->state->obj,
1228 								      false));
1229 	if (IS_ERR(lrc)) {
1230 		err = PTR_ERR(lrc);
1231 		goto err_B1;
1232 	}
1233 	lrc += LRC_STATE_OFFSET / sizeof(*hw);
1234 
1235 	defaults = shmem_pin_map(ce->engine->default_state);
1236 	if (!defaults) {
1237 		err = -ENOMEM;
1238 		goto err_lrc;
1239 	}
1240 
1241 	x = 0;
1242 	dw = 0;
1243 	hw = defaults;
1244 	hw += LRC_STATE_OFFSET / sizeof(*hw);
1245 	do {
1246 		u32 len = hw[dw] & 0x7f;
1247 
1248 		if (hw[dw] == 0) {
1249 			dw++;
1250 			continue;
1251 		}
1252 
1253 		if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
1254 			dw += len + 2;
1255 			continue;
1256 		}
1257 
1258 		dw++;
1259 		len = (len + 1) / 2;
1260 		while (len--) {
1261 			if (!is_moving(A[0][x], A[1][x]) &&
1262 			    (A[0][x] != B[0][x] || A[1][x] != B[1][x])) {
1263 				switch (hw[dw] & 4095) {
1264 				case 0x30: /* RING_HEAD */
1265 				case 0x34: /* RING_TAIL */
1266 					break;
1267 
1268 				default:
1269 					pr_err("%s[%d]: Mismatch for register %4x, default %08x, reference %08x, result (%08x, %08x), poison %08x, context %08x\n",
1270 					       engine->name, dw,
1271 					       hw[dw], hw[dw + 1],
1272 					       A[0][x], B[0][x], B[1][x],
1273 					       poison, lrc[dw + 1]);
1274 					err = -EINVAL;
1275 				}
1276 			}
1277 			dw += 2;
1278 			x++;
1279 		}
1280 	} while (dw < PAGE_SIZE / sizeof(u32) &&
1281 		 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
1282 
1283 	shmem_unpin_map(ce->engine->default_state, defaults);
1284 err_lrc:
1285 	i915_gem_object_unpin_map(ce->state->obj);
1286 err_B1:
1287 	i915_gem_object_unpin_map(result[1]->obj);
1288 err_B0:
1289 	i915_gem_object_unpin_map(result[0]->obj);
1290 err_A1:
1291 	i915_gem_object_unpin_map(ref[1]->obj);
1292 err_A0:
1293 	i915_gem_object_unpin_map(ref[0]->obj);
1294 	return err;
1295 }
1296 
1297 static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
1298 {
1299 	u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
1300 	struct i915_vma *ref[2], *result[2];
1301 	struct intel_context *A, *B;
1302 	struct i915_request *rq;
1303 	int err;
1304 
1305 	A = intel_context_create(engine);
1306 	if (IS_ERR(A))
1307 		return PTR_ERR(A);
1308 
1309 	B = intel_context_create(engine);
1310 	if (IS_ERR(B)) {
1311 		err = PTR_ERR(B);
1312 		goto err_A;
1313 	}
1314 
1315 	ref[0] = create_user_vma(A->vm, SZ_64K);
1316 	if (IS_ERR(ref[0])) {
1317 		err = PTR_ERR(ref[0]);
1318 		goto err_B;
1319 	}
1320 
1321 	ref[1] = create_user_vma(A->vm, SZ_64K);
1322 	if (IS_ERR(ref[1])) {
1323 		err = PTR_ERR(ref[1]);
1324 		goto err_ref0;
1325 	}
1326 
1327 	rq = record_registers(A, ref[0], ref[1], sema);
1328 	if (IS_ERR(rq)) {
1329 		err = PTR_ERR(rq);
1330 		goto err_ref1;
1331 	}
1332 
1333 	WRITE_ONCE(*sema, 1);
1334 	wmb();
1335 
1336 	if (i915_request_wait(rq, 0, HZ / 2) < 0) {
1337 		i915_request_put(rq);
1338 		err = -ETIME;
1339 		goto err_ref1;
1340 	}
1341 	i915_request_put(rq);
1342 
1343 	result[0] = create_user_vma(A->vm, SZ_64K);
1344 	if (IS_ERR(result[0])) {
1345 		err = PTR_ERR(result[0]);
1346 		goto err_ref1;
1347 	}
1348 
1349 	result[1] = create_user_vma(A->vm, SZ_64K);
1350 	if (IS_ERR(result[1])) {
1351 		err = PTR_ERR(result[1]);
1352 		goto err_result0;
1353 	}
1354 
1355 	rq = record_registers(A, result[0], result[1], sema);
1356 	if (IS_ERR(rq)) {
1357 		err = PTR_ERR(rq);
1358 		goto err_result1;
1359 	}
1360 
1361 	err = poison_registers(B, poison, sema);
1362 	if (err) {
1363 		WRITE_ONCE(*sema, -1);
1364 		i915_request_put(rq);
1365 		goto err_result1;
1366 	}
1367 
1368 	if (i915_request_wait(rq, 0, HZ / 2) < 0) {
1369 		i915_request_put(rq);
1370 		err = -ETIME;
1371 		goto err_result1;
1372 	}
1373 	i915_request_put(rq);
1374 
1375 	err = compare_isolation(engine, ref, result, A, poison);
1376 
1377 err_result1:
1378 	i915_vma_put(result[1]);
1379 err_result0:
1380 	i915_vma_put(result[0]);
1381 err_ref1:
1382 	i915_vma_put(ref[1]);
1383 err_ref0:
1384 	i915_vma_put(ref[0]);
1385 err_B:
1386 	intel_context_put(B);
1387 err_A:
1388 	intel_context_put(A);
1389 	return err;
1390 }
1391 
1392 static bool skip_isolation(const struct intel_engine_cs *engine)
1393 {
1394 	if (engine->class == COPY_ENGINE_CLASS && GRAPHICS_VER(engine->i915) == 9)
1395 		return true;
1396 
1397 	if (engine->class == RENDER_CLASS && GRAPHICS_VER(engine->i915) == 11)
1398 		return true;
1399 
1400 	return false;
1401 }
1402 
1403 static int live_lrc_isolation(void *arg)
1404 {
1405 	struct intel_gt *gt = arg;
1406 	struct intel_engine_cs *engine;
1407 	enum intel_engine_id id;
1408 	const u32 poison[] = {
1409 		STACK_MAGIC,
1410 		0x3a3a3a3a,
1411 		0x5c5c5c5c,
1412 		0xffffffff,
1413 		0xffff0000,
1414 	};
1415 	int err = 0;
1416 
1417 	/*
1418 	 * Our goal is try and verify that per-context state cannot be
1419 	 * tampered with by another non-privileged client.
1420 	 *
1421 	 * We take the list of context registers from the LRI in the default
1422 	 * context image and attempt to modify that list from a remote context.
1423 	 */
1424 
1425 	for_each_engine(engine, gt, id) {
1426 		int i;
1427 
1428 		/* Just don't even ask */
1429 		if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN) &&
1430 		    skip_isolation(engine))
1431 			continue;
1432 
1433 		intel_engine_pm_get(engine);
1434 		for (i = 0; i < ARRAY_SIZE(poison); i++) {
1435 			int result;
1436 
1437 			result = __lrc_isolation(engine, poison[i]);
1438 			if (result && !err)
1439 				err = result;
1440 
1441 			result = __lrc_isolation(engine, ~poison[i]);
1442 			if (result && !err)
1443 				err = result;
1444 		}
1445 		intel_engine_pm_put(engine);
1446 		if (igt_flush_test(gt->i915)) {
1447 			err = -EIO;
1448 			break;
1449 		}
1450 	}
1451 
1452 	return err;
1453 }
1454 
1455 static int indirect_ctx_submit_req(struct intel_context *ce)
1456 {
1457 	struct i915_request *rq;
1458 	int err = 0;
1459 
1460 	rq = intel_context_create_request(ce);
1461 	if (IS_ERR(rq))
1462 		return PTR_ERR(rq);
1463 
1464 	i915_request_get(rq);
1465 	i915_request_add(rq);
1466 
1467 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
1468 		err = -ETIME;
1469 
1470 	i915_request_put(rq);
1471 
1472 	return err;
1473 }
1474 
1475 #define CTX_BB_CANARY_OFFSET (3 * 1024)
1476 #define CTX_BB_CANARY_INDEX  (CTX_BB_CANARY_OFFSET / sizeof(u32))
1477 
1478 static u32 *
1479 emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
1480 {
1481 	*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
1482 		MI_SRM_LRM_GLOBAL_GTT |
1483 		MI_LRI_LRM_CS_MMIO;
1484 	*cs++ = i915_mmio_reg_offset(RING_START(0));
1485 	*cs++ = i915_ggtt_offset(ce->state) +
1486 		context_wa_bb_offset(ce) +
1487 		CTX_BB_CANARY_OFFSET;
1488 	*cs++ = 0;
1489 
1490 	return cs;
1491 }
1492 
1493 static void
1494 indirect_ctx_bb_setup(struct intel_context *ce)
1495 {
1496 	u32 *cs = context_indirect_bb(ce);
1497 
1498 	cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
1499 
1500 	setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
1501 }
1502 
1503 static bool check_ring_start(struct intel_context *ce)
1504 {
1505 	const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
1506 		LRC_STATE_OFFSET + context_wa_bb_offset(ce);
1507 
1508 	if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
1509 		return true;
1510 
1511 	pr_err("ring start mismatch: canary 0x%08x vs state 0x%08x\n",
1512 	       ctx_bb[CTX_BB_CANARY_INDEX],
1513 	       ce->lrc_reg_state[CTX_RING_START]);
1514 
1515 	return false;
1516 }
1517 
1518 static int indirect_ctx_bb_check(struct intel_context *ce)
1519 {
1520 	int err;
1521 
1522 	err = indirect_ctx_submit_req(ce);
1523 	if (err)
1524 		return err;
1525 
1526 	if (!check_ring_start(ce))
1527 		return -EINVAL;
1528 
1529 	return 0;
1530 }
1531 
1532 static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
1533 {
1534 	struct intel_context *a, *b;
1535 	int err;
1536 
1537 	a = intel_context_create(engine);
1538 	if (IS_ERR(a))
1539 		return PTR_ERR(a);
1540 	err = intel_context_pin(a);
1541 	if (err)
1542 		goto put_a;
1543 
1544 	b = intel_context_create(engine);
1545 	if (IS_ERR(b)) {
1546 		err = PTR_ERR(b);
1547 		goto unpin_a;
1548 	}
1549 	err = intel_context_pin(b);
1550 	if (err)
1551 		goto put_b;
1552 
1553 	/* We use the already reserved extra page in context state */
1554 	if (!a->wa_bb_page) {
1555 		GEM_BUG_ON(b->wa_bb_page);
1556 		GEM_BUG_ON(GRAPHICS_VER(engine->i915) == 12);
1557 		goto unpin_b;
1558 	}
1559 
1560 	/*
1561 	 * In order to test that our per context bb is truly per context,
1562 	 * and executes at the intended spot on context restoring process,
1563 	 * make the batch store the ring start value to memory.
1564 	 * As ring start is restored apriori of starting the indirect ctx bb and
1565 	 * as it will be different for each context, it fits to this purpose.
1566 	 */
1567 	indirect_ctx_bb_setup(a);
1568 	indirect_ctx_bb_setup(b);
1569 
1570 	err = indirect_ctx_bb_check(a);
1571 	if (err)
1572 		goto unpin_b;
1573 
1574 	err = indirect_ctx_bb_check(b);
1575 
1576 unpin_b:
1577 	intel_context_unpin(b);
1578 put_b:
1579 	intel_context_put(b);
1580 unpin_a:
1581 	intel_context_unpin(a);
1582 put_a:
1583 	intel_context_put(a);
1584 
1585 	return err;
1586 }
1587 
1588 static int live_lrc_indirect_ctx_bb(void *arg)
1589 {
1590 	struct intel_gt *gt = arg;
1591 	struct intel_engine_cs *engine;
1592 	enum intel_engine_id id;
1593 	int err = 0;
1594 
1595 	for_each_engine(engine, gt, id) {
1596 		intel_engine_pm_get(engine);
1597 		err = __live_lrc_indirect_ctx_bb(engine);
1598 		intel_engine_pm_put(engine);
1599 
1600 		if (igt_flush_test(gt->i915))
1601 			err = -EIO;
1602 
1603 		if (err)
1604 			break;
1605 	}
1606 
1607 	return err;
1608 }
1609 
1610 static void garbage_reset(struct intel_engine_cs *engine,
1611 			  struct i915_request *rq)
1612 {
1613 	const unsigned int bit = I915_RESET_ENGINE + engine->id;
1614 	unsigned long *lock = &engine->gt->reset.flags;
1615 
1616 	local_bh_disable();
1617 	if (!test_and_set_bit(bit, lock)) {
1618 		tasklet_disable(&engine->sched_engine->tasklet);
1619 
1620 		if (!rq->fence.error)
1621 			__intel_engine_reset_bh(engine, NULL);
1622 
1623 		tasklet_enable(&engine->sched_engine->tasklet);
1624 		clear_and_wake_up_bit(bit, lock);
1625 	}
1626 	local_bh_enable();
1627 }
1628 
1629 static struct i915_request *garbage(struct intel_context *ce,
1630 				    struct rnd_state *prng)
1631 {
1632 	struct i915_request *rq;
1633 	int err;
1634 
1635 	err = intel_context_pin(ce);
1636 	if (err)
1637 		return ERR_PTR(err);
1638 
1639 	prandom_bytes_state(prng,
1640 			    ce->lrc_reg_state,
1641 			    ce->engine->context_size -
1642 			    LRC_STATE_OFFSET);
1643 
1644 	rq = intel_context_create_request(ce);
1645 	if (IS_ERR(rq)) {
1646 		err = PTR_ERR(rq);
1647 		goto err_unpin;
1648 	}
1649 
1650 	i915_request_get(rq);
1651 	i915_request_add(rq);
1652 	return rq;
1653 
1654 err_unpin:
1655 	intel_context_unpin(ce);
1656 	return ERR_PTR(err);
1657 }
1658 
1659 static int __lrc_garbage(struct intel_engine_cs *engine, struct rnd_state *prng)
1660 {
1661 	struct intel_context *ce;
1662 	struct i915_request *hang;
1663 	int err = 0;
1664 
1665 	ce = intel_context_create(engine);
1666 	if (IS_ERR(ce))
1667 		return PTR_ERR(ce);
1668 
1669 	hang = garbage(ce, prng);
1670 	if (IS_ERR(hang)) {
1671 		err = PTR_ERR(hang);
1672 		goto err_ce;
1673 	}
1674 
1675 	if (wait_for_submit(engine, hang, HZ / 2)) {
1676 		i915_request_put(hang);
1677 		err = -ETIME;
1678 		goto err_ce;
1679 	}
1680 
1681 	intel_context_set_banned(ce);
1682 	garbage_reset(engine, hang);
1683 
1684 	intel_engine_flush_submission(engine);
1685 	if (!hang->fence.error) {
1686 		i915_request_put(hang);
1687 		pr_err("%s: corrupted context was not reset\n",
1688 		       engine->name);
1689 		err = -EINVAL;
1690 		goto err_ce;
1691 	}
1692 
1693 	if (i915_request_wait(hang, 0, HZ / 2) < 0) {
1694 		pr_err("%s: corrupted context did not recover\n",
1695 		       engine->name);
1696 		i915_request_put(hang);
1697 		err = -EIO;
1698 		goto err_ce;
1699 	}
1700 	i915_request_put(hang);
1701 
1702 err_ce:
1703 	intel_context_put(ce);
1704 	return err;
1705 }
1706 
1707 static int live_lrc_garbage(void *arg)
1708 {
1709 	struct intel_gt *gt = arg;
1710 	struct intel_engine_cs *engine;
1711 	enum intel_engine_id id;
1712 
1713 	/*
1714 	 * Verify that we can recover if one context state is completely
1715 	 * corrupted.
1716 	 */
1717 
1718 	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
1719 		return 0;
1720 
1721 	for_each_engine(engine, gt, id) {
1722 		I915_RND_STATE(prng);
1723 		int err = 0, i;
1724 
1725 		if (!intel_has_reset_engine(engine->gt))
1726 			continue;
1727 
1728 		intel_engine_pm_get(engine);
1729 		for (i = 0; i < 3; i++) {
1730 			err = __lrc_garbage(engine, &prng);
1731 			if (err)
1732 				break;
1733 		}
1734 		intel_engine_pm_put(engine);
1735 
1736 		if (igt_flush_test(gt->i915))
1737 			err = -EIO;
1738 		if (err)
1739 			return err;
1740 	}
1741 
1742 	return 0;
1743 }
1744 
1745 static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
1746 {
1747 	struct intel_context *ce;
1748 	struct i915_request *rq;
1749 	IGT_TIMEOUT(end_time);
1750 	int err;
1751 
1752 	ce = intel_context_create(engine);
1753 	if (IS_ERR(ce))
1754 		return PTR_ERR(ce);
1755 
1756 	ce->runtime.num_underflow = 0;
1757 	ce->runtime.max_underflow = 0;
1758 
1759 	do {
1760 		unsigned int loop = 1024;
1761 
1762 		while (loop) {
1763 			rq = intel_context_create_request(ce);
1764 			if (IS_ERR(rq)) {
1765 				err = PTR_ERR(rq);
1766 				goto err_rq;
1767 			}
1768 
1769 			if (--loop == 0)
1770 				i915_request_get(rq);
1771 
1772 			i915_request_add(rq);
1773 		}
1774 
1775 		if (__igt_timeout(end_time, NULL))
1776 			break;
1777 
1778 		i915_request_put(rq);
1779 	} while (1);
1780 
1781 	err = i915_request_wait(rq, 0, HZ / 5);
1782 	if (err < 0) {
1783 		pr_err("%s: request not completed!\n", engine->name);
1784 		goto err_wait;
1785 	}
1786 
1787 	igt_flush_test(engine->i915);
1788 
1789 	pr_info("%s: pphwsp runtime %lluns, average %lluns\n",
1790 		engine->name,
1791 		intel_context_get_total_runtime_ns(ce),
1792 		intel_context_get_avg_runtime_ns(ce));
1793 
1794 	err = 0;
1795 	if (ce->runtime.num_underflow) {
1796 		pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n",
1797 		       engine->name,
1798 		       ce->runtime.num_underflow,
1799 		       ce->runtime.max_underflow);
1800 		GEM_TRACE_DUMP();
1801 		err = -EOVERFLOW;
1802 	}
1803 
1804 err_wait:
1805 	i915_request_put(rq);
1806 err_rq:
1807 	intel_context_put(ce);
1808 	return err;
1809 }
1810 
1811 static int live_pphwsp_runtime(void *arg)
1812 {
1813 	struct intel_gt *gt = arg;
1814 	struct intel_engine_cs *engine;
1815 	enum intel_engine_id id;
1816 	int err = 0;
1817 
1818 	/*
1819 	 * Check that cumulative context runtime as stored in the pphwsp[16]
1820 	 * is monotonic.
1821 	 */
1822 
1823 	for_each_engine(engine, gt, id) {
1824 		err = __live_pphwsp_runtime(engine);
1825 		if (err)
1826 			break;
1827 	}
1828 
1829 	if (igt_flush_test(gt->i915))
1830 		err = -EIO;
1831 
1832 	return err;
1833 }
1834 
1835 int intel_lrc_live_selftests(struct drm_i915_private *i915)
1836 {
1837 	static const struct i915_subtest tests[] = {
1838 		SUBTEST(live_lrc_layout),
1839 		SUBTEST(live_lrc_fixed),
1840 		SUBTEST(live_lrc_state),
1841 		SUBTEST(live_lrc_gpr),
1842 		SUBTEST(live_lrc_isolation),
1843 		SUBTEST(live_lrc_timestamp),
1844 		SUBTEST(live_lrc_garbage),
1845 		SUBTEST(live_pphwsp_runtime),
1846 		SUBTEST(live_lrc_indirect_ctx_bb),
1847 	};
1848 
1849 	if (!HAS_LOGICAL_RING_CONTEXTS(i915))
1850 		return 0;
1851 
1852 	return intel_gt_live_subtests(tests, to_gt(i915));
1853 }
1854