xref: /openbmc/linux/drivers/gpu/drm/i915/selftests/i915_request.c (revision 4b0aaacee51eb6592a03fdefd5ce97558518e291)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/prime_numbers.h>
26 
27 #include "../i915_selftest.h"
28 
29 #include "mock_context.h"
30 #include "mock_gem_device.h"
31 
32 static int igt_add_request(void *arg)
33 {
34 	struct drm_i915_private *i915 = arg;
35 	struct i915_request *request;
36 	int err = -ENOMEM;
37 
38 	/* Basic preliminary test to create a request and let it loose! */
39 
40 	mutex_lock(&i915->drm.struct_mutex);
41 	request = mock_request(i915->engine[RCS],
42 			       i915->kernel_context,
43 			       HZ / 10);
44 	if (!request)
45 		goto out_unlock;
46 
47 	i915_request_add(request);
48 
49 	err = 0;
50 out_unlock:
51 	mutex_unlock(&i915->drm.struct_mutex);
52 	return err;
53 }
54 
55 static int igt_wait_request(void *arg)
56 {
57 	const long T = HZ / 4;
58 	struct drm_i915_private *i915 = arg;
59 	struct i915_request *request;
60 	int err = -EINVAL;
61 
62 	/* Submit a request, then wait upon it */
63 
64 	mutex_lock(&i915->drm.struct_mutex);
65 	request = mock_request(i915->engine[RCS], i915->kernel_context, T);
66 	if (!request) {
67 		err = -ENOMEM;
68 		goto out_unlock;
69 	}
70 
71 	if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
72 		pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
73 		goto out_unlock;
74 	}
75 
76 	if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
77 		pr_err("request wait succeeded (expected timeout before submit!)\n");
78 		goto out_unlock;
79 	}
80 
81 	if (i915_request_completed(request)) {
82 		pr_err("request completed before submit!!\n");
83 		goto out_unlock;
84 	}
85 
86 	i915_request_add(request);
87 
88 	if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
89 		pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
90 		goto out_unlock;
91 	}
92 
93 	if (i915_request_completed(request)) {
94 		pr_err("request completed immediately!\n");
95 		goto out_unlock;
96 	}
97 
98 	if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
99 		pr_err("request wait succeeded (expected timeout!)\n");
100 		goto out_unlock;
101 	}
102 
103 	if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
104 		pr_err("request wait timed out!\n");
105 		goto out_unlock;
106 	}
107 
108 	if (!i915_request_completed(request)) {
109 		pr_err("request not complete after waiting!\n");
110 		goto out_unlock;
111 	}
112 
113 	if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
114 		pr_err("request wait timed out when already complete!\n");
115 		goto out_unlock;
116 	}
117 
118 	err = 0;
119 out_unlock:
120 	mock_device_flush(i915);
121 	mutex_unlock(&i915->drm.struct_mutex);
122 	return err;
123 }
124 
125 static int igt_fence_wait(void *arg)
126 {
127 	const long T = HZ / 4;
128 	struct drm_i915_private *i915 = arg;
129 	struct i915_request *request;
130 	int err = -EINVAL;
131 
132 	/* Submit a request, treat it as a fence and wait upon it */
133 
134 	mutex_lock(&i915->drm.struct_mutex);
135 	request = mock_request(i915->engine[RCS], i915->kernel_context, T);
136 	if (!request) {
137 		err = -ENOMEM;
138 		goto out_locked;
139 	}
140 	mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
141 
142 	if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
143 		pr_err("fence wait success before submit (expected timeout)!\n");
144 		goto out_device;
145 	}
146 
147 	mutex_lock(&i915->drm.struct_mutex);
148 	i915_request_add(request);
149 	mutex_unlock(&i915->drm.struct_mutex);
150 
151 	if (dma_fence_is_signaled(&request->fence)) {
152 		pr_err("fence signaled immediately!\n");
153 		goto out_device;
154 	}
155 
156 	if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
157 		pr_err("fence wait success after submit (expected timeout)!\n");
158 		goto out_device;
159 	}
160 
161 	if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
162 		pr_err("fence wait timed out (expected success)!\n");
163 		goto out_device;
164 	}
165 
166 	if (!dma_fence_is_signaled(&request->fence)) {
167 		pr_err("fence unsignaled after waiting!\n");
168 		goto out_device;
169 	}
170 
171 	if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
172 		pr_err("fence wait timed out when complete (expected success)!\n");
173 		goto out_device;
174 	}
175 
176 	err = 0;
177 out_device:
178 	mutex_lock(&i915->drm.struct_mutex);
179 out_locked:
180 	mock_device_flush(i915);
181 	mutex_unlock(&i915->drm.struct_mutex);
182 	return err;
183 }
184 
185 static int igt_request_rewind(void *arg)
186 {
187 	struct drm_i915_private *i915 = arg;
188 	struct i915_request *request, *vip;
189 	struct i915_gem_context *ctx[2];
190 	int err = -EINVAL;
191 
192 	mutex_lock(&i915->drm.struct_mutex);
193 	ctx[0] = mock_context(i915, "A");
194 	request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
195 	if (!request) {
196 		err = -ENOMEM;
197 		goto err_context_0;
198 	}
199 
200 	i915_request_get(request);
201 	i915_request_add(request);
202 
203 	ctx[1] = mock_context(i915, "B");
204 	vip = mock_request(i915->engine[RCS], ctx[1], 0);
205 	if (!vip) {
206 		err = -ENOMEM;
207 		goto err_context_1;
208 	}
209 
210 	/* Simulate preemption by manual reordering */
211 	if (!mock_cancel_request(request)) {
212 		pr_err("failed to cancel request (already executed)!\n");
213 		i915_request_add(vip);
214 		goto err_context_1;
215 	}
216 	i915_request_get(vip);
217 	i915_request_add(vip);
218 	rcu_read_lock();
219 	request->engine->submit_request(request);
220 	rcu_read_unlock();
221 
222 	mutex_unlock(&i915->drm.struct_mutex);
223 
224 	if (i915_request_wait(vip, 0, HZ) == -ETIME) {
225 		pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
226 		       vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
227 		goto err;
228 	}
229 
230 	if (i915_request_completed(request)) {
231 		pr_err("low priority request already completed\n");
232 		goto err;
233 	}
234 
235 	err = 0;
236 err:
237 	i915_request_put(vip);
238 	mutex_lock(&i915->drm.struct_mutex);
239 err_context_1:
240 	mock_context_close(ctx[1]);
241 	i915_request_put(request);
242 err_context_0:
243 	mock_context_close(ctx[0]);
244 	mock_device_flush(i915);
245 	mutex_unlock(&i915->drm.struct_mutex);
246 	return err;
247 }
248 
249 int i915_request_mock_selftests(void)
250 {
251 	static const struct i915_subtest tests[] = {
252 		SUBTEST(igt_add_request),
253 		SUBTEST(igt_wait_request),
254 		SUBTEST(igt_fence_wait),
255 		SUBTEST(igt_request_rewind),
256 	};
257 	struct drm_i915_private *i915;
258 	int err;
259 
260 	i915 = mock_gem_device();
261 	if (!i915)
262 		return -ENOMEM;
263 
264 	err = i915_subtests(tests, i915);
265 	drm_dev_put(&i915->drm);
266 
267 	return err;
268 }
269 
270 struct live_test {
271 	struct drm_i915_private *i915;
272 	const char *func;
273 	const char *name;
274 
275 	unsigned int reset_count;
276 };
277 
278 static int begin_live_test(struct live_test *t,
279 			   struct drm_i915_private *i915,
280 			   const char *func,
281 			   const char *name)
282 {
283 	int err;
284 
285 	t->i915 = i915;
286 	t->func = func;
287 	t->name = name;
288 
289 	err = i915_gem_wait_for_idle(i915,
290 				     I915_WAIT_LOCKED,
291 				     MAX_SCHEDULE_TIMEOUT);
292 	if (err) {
293 		pr_err("%s(%s): failed to idle before, with err=%d!",
294 		       func, name, err);
295 		return err;
296 	}
297 
298 	i915->gpu_error.missed_irq_rings = 0;
299 	t->reset_count = i915_reset_count(&i915->gpu_error);
300 
301 	return 0;
302 }
303 
304 static int end_live_test(struct live_test *t)
305 {
306 	struct drm_i915_private *i915 = t->i915;
307 
308 	i915_retire_requests(i915);
309 
310 	if (wait_for(intel_engines_are_idle(i915), 10)) {
311 		pr_err("%s(%s): GPU not idle\n", t->func, t->name);
312 		return -EIO;
313 	}
314 
315 	if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
316 		pr_err("%s(%s): GPU was reset %d times!\n",
317 		       t->func, t->name,
318 		       i915_reset_count(&i915->gpu_error) - t->reset_count);
319 		return -EIO;
320 	}
321 
322 	if (i915->gpu_error.missed_irq_rings) {
323 		pr_err("%s(%s): Missed interrupts on engines %lx\n",
324 		       t->func, t->name, i915->gpu_error.missed_irq_rings);
325 		return -EIO;
326 	}
327 
328 	return 0;
329 }
330 
331 static int live_nop_request(void *arg)
332 {
333 	struct drm_i915_private *i915 = arg;
334 	struct intel_engine_cs *engine;
335 	struct live_test t;
336 	unsigned int id;
337 	int err = -ENODEV;
338 
339 	/* Submit various sized batches of empty requests, to each engine
340 	 * (individually), and wait for the batch to complete. We can check
341 	 * the overhead of submitting requests to the hardware.
342 	 */
343 
344 	mutex_lock(&i915->drm.struct_mutex);
345 
346 	for_each_engine(engine, i915, id) {
347 		struct i915_request *request = NULL;
348 		unsigned long n, prime;
349 		IGT_TIMEOUT(end_time);
350 		ktime_t times[2] = {};
351 
352 		err = begin_live_test(&t, i915, __func__, engine->name);
353 		if (err)
354 			goto out_unlock;
355 
356 		for_each_prime_number_from(prime, 1, 8192) {
357 			times[1] = ktime_get_raw();
358 
359 			for (n = 0; n < prime; n++) {
360 				request = i915_request_alloc(engine,
361 							     i915->kernel_context);
362 				if (IS_ERR(request)) {
363 					err = PTR_ERR(request);
364 					goto out_unlock;
365 				}
366 
367 				/* This space is left intentionally blank.
368 				 *
369 				 * We do not actually want to perform any
370 				 * action with this request, we just want
371 				 * to measure the latency in allocation
372 				 * and submission of our breadcrumbs -
373 				 * ensuring that the bare request is sufficient
374 				 * for the system to work (i.e. proper HEAD
375 				 * tracking of the rings, interrupt handling,
376 				 * etc). It also gives us the lowest bounds
377 				 * for latency.
378 				 */
379 
380 				i915_request_add(request);
381 			}
382 			i915_request_wait(request,
383 					  I915_WAIT_LOCKED,
384 					  MAX_SCHEDULE_TIMEOUT);
385 
386 			times[1] = ktime_sub(ktime_get_raw(), times[1]);
387 			if (prime == 1)
388 				times[0] = times[1];
389 
390 			if (__igt_timeout(end_time, NULL))
391 				break;
392 		}
393 
394 		err = end_live_test(&t);
395 		if (err)
396 			goto out_unlock;
397 
398 		pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
399 			engine->name,
400 			ktime_to_ns(times[0]),
401 			prime, div64_u64(ktime_to_ns(times[1]), prime));
402 	}
403 
404 out_unlock:
405 	mutex_unlock(&i915->drm.struct_mutex);
406 	return err;
407 }
408 
409 static struct i915_vma *empty_batch(struct drm_i915_private *i915)
410 {
411 	struct drm_i915_gem_object *obj;
412 	struct i915_vma *vma;
413 	u32 *cmd;
414 	int err;
415 
416 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
417 	if (IS_ERR(obj))
418 		return ERR_CAST(obj);
419 
420 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
421 	if (IS_ERR(cmd)) {
422 		err = PTR_ERR(cmd);
423 		goto err;
424 	}
425 
426 	*cmd = MI_BATCH_BUFFER_END;
427 	i915_gem_chipset_flush(i915);
428 
429 	i915_gem_object_unpin_map(obj);
430 
431 	err = i915_gem_object_set_to_gtt_domain(obj, false);
432 	if (err)
433 		goto err;
434 
435 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
436 	if (IS_ERR(vma)) {
437 		err = PTR_ERR(vma);
438 		goto err;
439 	}
440 
441 	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
442 	if (err)
443 		goto err;
444 
445 	return vma;
446 
447 err:
448 	i915_gem_object_put(obj);
449 	return ERR_PTR(err);
450 }
451 
452 static struct i915_request *
453 empty_request(struct intel_engine_cs *engine,
454 	      struct i915_vma *batch)
455 {
456 	struct i915_request *request;
457 	int err;
458 
459 	request = i915_request_alloc(engine, engine->i915->kernel_context);
460 	if (IS_ERR(request))
461 		return request;
462 
463 	err = engine->emit_bb_start(request,
464 				    batch->node.start,
465 				    batch->node.size,
466 				    I915_DISPATCH_SECURE);
467 	if (err)
468 		goto out_request;
469 
470 out_request:
471 	i915_request_add(request);
472 	return err ? ERR_PTR(err) : request;
473 }
474 
475 static int live_empty_request(void *arg)
476 {
477 	struct drm_i915_private *i915 = arg;
478 	struct intel_engine_cs *engine;
479 	struct live_test t;
480 	struct i915_vma *batch;
481 	unsigned int id;
482 	int err = 0;
483 
484 	/* Submit various sized batches of empty requests, to each engine
485 	 * (individually), and wait for the batch to complete. We can check
486 	 * the overhead of submitting requests to the hardware.
487 	 */
488 
489 	mutex_lock(&i915->drm.struct_mutex);
490 
491 	batch = empty_batch(i915);
492 	if (IS_ERR(batch)) {
493 		err = PTR_ERR(batch);
494 		goto out_unlock;
495 	}
496 
497 	for_each_engine(engine, i915, id) {
498 		IGT_TIMEOUT(end_time);
499 		struct i915_request *request;
500 		unsigned long n, prime;
501 		ktime_t times[2] = {};
502 
503 		err = begin_live_test(&t, i915, __func__, engine->name);
504 		if (err)
505 			goto out_batch;
506 
507 		/* Warmup / preload */
508 		request = empty_request(engine, batch);
509 		if (IS_ERR(request)) {
510 			err = PTR_ERR(request);
511 			goto out_batch;
512 		}
513 		i915_request_wait(request,
514 				  I915_WAIT_LOCKED,
515 				  MAX_SCHEDULE_TIMEOUT);
516 
517 		for_each_prime_number_from(prime, 1, 8192) {
518 			times[1] = ktime_get_raw();
519 
520 			for (n = 0; n < prime; n++) {
521 				request = empty_request(engine, batch);
522 				if (IS_ERR(request)) {
523 					err = PTR_ERR(request);
524 					goto out_batch;
525 				}
526 			}
527 			i915_request_wait(request,
528 					  I915_WAIT_LOCKED,
529 					  MAX_SCHEDULE_TIMEOUT);
530 
531 			times[1] = ktime_sub(ktime_get_raw(), times[1]);
532 			if (prime == 1)
533 				times[0] = times[1];
534 
535 			if (__igt_timeout(end_time, NULL))
536 				break;
537 		}
538 
539 		err = end_live_test(&t);
540 		if (err)
541 			goto out_batch;
542 
543 		pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
544 			engine->name,
545 			ktime_to_ns(times[0]),
546 			prime, div64_u64(ktime_to_ns(times[1]), prime));
547 	}
548 
549 out_batch:
550 	i915_vma_unpin(batch);
551 	i915_vma_put(batch);
552 out_unlock:
553 	mutex_unlock(&i915->drm.struct_mutex);
554 	return err;
555 }
556 
557 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
558 {
559 	struct i915_gem_context *ctx = i915->kernel_context;
560 	struct i915_address_space *vm =
561 		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
562 	struct drm_i915_gem_object *obj;
563 	const int gen = INTEL_GEN(i915);
564 	struct i915_vma *vma;
565 	u32 *cmd;
566 	int err;
567 
568 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
569 	if (IS_ERR(obj))
570 		return ERR_CAST(obj);
571 
572 	vma = i915_vma_instance(obj, vm, NULL);
573 	if (IS_ERR(vma)) {
574 		err = PTR_ERR(vma);
575 		goto err;
576 	}
577 
578 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
579 	if (err)
580 		goto err;
581 
582 	err = i915_gem_object_set_to_wc_domain(obj, true);
583 	if (err)
584 		goto err;
585 
586 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
587 	if (IS_ERR(cmd)) {
588 		err = PTR_ERR(cmd);
589 		goto err;
590 	}
591 
592 	if (gen >= 8) {
593 		*cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
594 		*cmd++ = lower_32_bits(vma->node.start);
595 		*cmd++ = upper_32_bits(vma->node.start);
596 	} else if (gen >= 6) {
597 		*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
598 		*cmd++ = lower_32_bits(vma->node.start);
599 	} else {
600 		*cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
601 		*cmd++ = lower_32_bits(vma->node.start);
602 	}
603 	*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
604 	i915_gem_chipset_flush(i915);
605 
606 	i915_gem_object_unpin_map(obj);
607 
608 	return vma;
609 
610 err:
611 	i915_gem_object_put(obj);
612 	return ERR_PTR(err);
613 }
614 
615 static int recursive_batch_resolve(struct i915_vma *batch)
616 {
617 	u32 *cmd;
618 
619 	cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
620 	if (IS_ERR(cmd))
621 		return PTR_ERR(cmd);
622 
623 	*cmd = MI_BATCH_BUFFER_END;
624 	i915_gem_chipset_flush(batch->vm->i915);
625 
626 	i915_gem_object_unpin_map(batch->obj);
627 
628 	return 0;
629 }
630 
631 static int live_all_engines(void *arg)
632 {
633 	struct drm_i915_private *i915 = arg;
634 	struct intel_engine_cs *engine;
635 	struct i915_request *request[I915_NUM_ENGINES];
636 	struct i915_vma *batch;
637 	struct live_test t;
638 	unsigned int id;
639 	int err;
640 
641 	/* Check we can submit requests to all engines simultaneously. We
642 	 * send a recursive batch to each engine - checking that we don't
643 	 * block doing so, and that they don't complete too soon.
644 	 */
645 
646 	mutex_lock(&i915->drm.struct_mutex);
647 
648 	err = begin_live_test(&t, i915, __func__, "");
649 	if (err)
650 		goto out_unlock;
651 
652 	batch = recursive_batch(i915);
653 	if (IS_ERR(batch)) {
654 		err = PTR_ERR(batch);
655 		pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
656 		goto out_unlock;
657 	}
658 
659 	for_each_engine(engine, i915, id) {
660 		request[id] = i915_request_alloc(engine, i915->kernel_context);
661 		if (IS_ERR(request[id])) {
662 			err = PTR_ERR(request[id]);
663 			pr_err("%s: Request allocation failed with err=%d\n",
664 			       __func__, err);
665 			goto out_request;
666 		}
667 
668 		err = engine->emit_bb_start(request[id],
669 					    batch->node.start,
670 					    batch->node.size,
671 					    0);
672 		GEM_BUG_ON(err);
673 		request[id]->batch = batch;
674 
675 		if (!i915_gem_object_has_active_reference(batch->obj)) {
676 			i915_gem_object_get(batch->obj);
677 			i915_gem_object_set_active_reference(batch->obj);
678 		}
679 
680 		err = i915_vma_move_to_active(batch, request[id], 0);
681 		GEM_BUG_ON(err);
682 
683 		i915_request_get(request[id]);
684 		i915_request_add(request[id]);
685 	}
686 
687 	for_each_engine(engine, i915, id) {
688 		if (i915_request_completed(request[id])) {
689 			pr_err("%s(%s): request completed too early!\n",
690 			       __func__, engine->name);
691 			err = -EINVAL;
692 			goto out_request;
693 		}
694 	}
695 
696 	err = recursive_batch_resolve(batch);
697 	if (err) {
698 		pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
699 		goto out_request;
700 	}
701 
702 	for_each_engine(engine, i915, id) {
703 		long timeout;
704 
705 		timeout = i915_request_wait(request[id],
706 					    I915_WAIT_LOCKED,
707 					    MAX_SCHEDULE_TIMEOUT);
708 		if (timeout < 0) {
709 			err = timeout;
710 			pr_err("%s: error waiting for request on %s, err=%d\n",
711 			       __func__, engine->name, err);
712 			goto out_request;
713 		}
714 
715 		GEM_BUG_ON(!i915_request_completed(request[id]));
716 		i915_request_put(request[id]);
717 		request[id] = NULL;
718 	}
719 
720 	err = end_live_test(&t);
721 
722 out_request:
723 	for_each_engine(engine, i915, id)
724 		if (request[id])
725 			i915_request_put(request[id]);
726 	i915_vma_unpin(batch);
727 	i915_vma_put(batch);
728 out_unlock:
729 	mutex_unlock(&i915->drm.struct_mutex);
730 	return err;
731 }
732 
733 static int live_sequential_engines(void *arg)
734 {
735 	struct drm_i915_private *i915 = arg;
736 	struct i915_request *request[I915_NUM_ENGINES] = {};
737 	struct i915_request *prev = NULL;
738 	struct intel_engine_cs *engine;
739 	struct live_test t;
740 	unsigned int id;
741 	int err;
742 
743 	/* Check we can submit requests to all engines sequentially, such
744 	 * that each successive request waits for the earlier ones. This
745 	 * tests that we don't execute requests out of order, even though
746 	 * they are running on independent engines.
747 	 */
748 
749 	mutex_lock(&i915->drm.struct_mutex);
750 
751 	err = begin_live_test(&t, i915, __func__, "");
752 	if (err)
753 		goto out_unlock;
754 
755 	for_each_engine(engine, i915, id) {
756 		struct i915_vma *batch;
757 
758 		batch = recursive_batch(i915);
759 		if (IS_ERR(batch)) {
760 			err = PTR_ERR(batch);
761 			pr_err("%s: Unable to create batch for %s, err=%d\n",
762 			       __func__, engine->name, err);
763 			goto out_unlock;
764 		}
765 
766 		request[id] = i915_request_alloc(engine, i915->kernel_context);
767 		if (IS_ERR(request[id])) {
768 			err = PTR_ERR(request[id]);
769 			pr_err("%s: Request allocation failed for %s with err=%d\n",
770 			       __func__, engine->name, err);
771 			goto out_request;
772 		}
773 
774 		if (prev) {
775 			err = i915_request_await_dma_fence(request[id],
776 							   &prev->fence);
777 			if (err) {
778 				i915_request_add(request[id]);
779 				pr_err("%s: Request await failed for %s with err=%d\n",
780 				       __func__, engine->name, err);
781 				goto out_request;
782 			}
783 		}
784 
785 		err = engine->emit_bb_start(request[id],
786 					    batch->node.start,
787 					    batch->node.size,
788 					    0);
789 		GEM_BUG_ON(err);
790 		request[id]->batch = batch;
791 
792 		err = i915_vma_move_to_active(batch, request[id], 0);
793 		GEM_BUG_ON(err);
794 
795 		i915_gem_object_set_active_reference(batch->obj);
796 		i915_vma_get(batch);
797 
798 		i915_request_get(request[id]);
799 		i915_request_add(request[id]);
800 
801 		prev = request[id];
802 	}
803 
804 	for_each_engine(engine, i915, id) {
805 		long timeout;
806 
807 		if (i915_request_completed(request[id])) {
808 			pr_err("%s(%s): request completed too early!\n",
809 			       __func__, engine->name);
810 			err = -EINVAL;
811 			goto out_request;
812 		}
813 
814 		err = recursive_batch_resolve(request[id]->batch);
815 		if (err) {
816 			pr_err("%s: failed to resolve batch, err=%d\n",
817 			       __func__, err);
818 			goto out_request;
819 		}
820 
821 		timeout = i915_request_wait(request[id],
822 					    I915_WAIT_LOCKED,
823 					    MAX_SCHEDULE_TIMEOUT);
824 		if (timeout < 0) {
825 			err = timeout;
826 			pr_err("%s: error waiting for request on %s, err=%d\n",
827 			       __func__, engine->name, err);
828 			goto out_request;
829 		}
830 
831 		GEM_BUG_ON(!i915_request_completed(request[id]));
832 	}
833 
834 	err = end_live_test(&t);
835 
836 out_request:
837 	for_each_engine(engine, i915, id) {
838 		u32 *cmd;
839 
840 		if (!request[id])
841 			break;
842 
843 		cmd = i915_gem_object_pin_map(request[id]->batch->obj,
844 					      I915_MAP_WC);
845 		if (!IS_ERR(cmd)) {
846 			*cmd = MI_BATCH_BUFFER_END;
847 			i915_gem_chipset_flush(i915);
848 
849 			i915_gem_object_unpin_map(request[id]->batch->obj);
850 		}
851 
852 		i915_vma_put(request[id]->batch);
853 		i915_request_put(request[id]);
854 	}
855 out_unlock:
856 	mutex_unlock(&i915->drm.struct_mutex);
857 	return err;
858 }
859 
860 int i915_request_live_selftests(struct drm_i915_private *i915)
861 {
862 	static const struct i915_subtest tests[] = {
863 		SUBTEST(live_nop_request),
864 		SUBTEST(live_all_engines),
865 		SUBTEST(live_sequential_engines),
866 		SUBTEST(live_empty_request),
867 	};
868 
869 	if (i915_terminally_wedged(&i915->gpu_error))
870 		return 0;
871 
872 	return i915_subtests(tests, i915);
873 }
874