1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/prime_numbers.h>
26 
27 #include "../i915_selftest.h"
28 
29 #include "mock_context.h"
30 #include "mock_gem_device.h"
31 
32 static int igt_add_request(void *arg)
33 {
34 	struct drm_i915_private *i915 = arg;
35 	struct i915_request *request;
36 	int err = -ENOMEM;
37 
38 	/* Basic preliminary test to create a request and let it loose! */
39 
40 	mutex_lock(&i915->drm.struct_mutex);
41 	request = mock_request(i915->engine[RCS],
42 			       i915->kernel_context,
43 			       HZ / 10);
44 	if (!request)
45 		goto out_unlock;
46 
47 	i915_request_add(request);
48 
49 	err = 0;
50 out_unlock:
51 	mutex_unlock(&i915->drm.struct_mutex);
52 	return err;
53 }
54 
55 static int igt_wait_request(void *arg)
56 {
57 	const long T = HZ / 4;
58 	struct drm_i915_private *i915 = arg;
59 	struct i915_request *request;
60 	int err = -EINVAL;
61 
62 	/* Submit a request, then wait upon it */
63 
64 	mutex_lock(&i915->drm.struct_mutex);
65 	request = mock_request(i915->engine[RCS], i915->kernel_context, T);
66 	if (!request) {
67 		err = -ENOMEM;
68 		goto out_unlock;
69 	}
70 
71 	if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
72 		pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
73 		goto out_unlock;
74 	}
75 
76 	if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
77 		pr_err("request wait succeeded (expected timeout before submit!)\n");
78 		goto out_unlock;
79 	}
80 
81 	if (i915_request_completed(request)) {
82 		pr_err("request completed before submit!!\n");
83 		goto out_unlock;
84 	}
85 
86 	i915_request_add(request);
87 
88 	if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
89 		pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
90 		goto out_unlock;
91 	}
92 
93 	if (i915_request_completed(request)) {
94 		pr_err("request completed immediately!\n");
95 		goto out_unlock;
96 	}
97 
98 	if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
99 		pr_err("request wait succeeded (expected timeout!)\n");
100 		goto out_unlock;
101 	}
102 
103 	if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
104 		pr_err("request wait timed out!\n");
105 		goto out_unlock;
106 	}
107 
108 	if (!i915_request_completed(request)) {
109 		pr_err("request not complete after waiting!\n");
110 		goto out_unlock;
111 	}
112 
113 	if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
114 		pr_err("request wait timed out when already complete!\n");
115 		goto out_unlock;
116 	}
117 
118 	err = 0;
119 out_unlock:
120 	mock_device_flush(i915);
121 	mutex_unlock(&i915->drm.struct_mutex);
122 	return err;
123 }
124 
125 static int igt_fence_wait(void *arg)
126 {
127 	const long T = HZ / 4;
128 	struct drm_i915_private *i915 = arg;
129 	struct i915_request *request;
130 	int err = -EINVAL;
131 
132 	/* Submit a request, treat it as a fence and wait upon it */
133 
134 	mutex_lock(&i915->drm.struct_mutex);
135 	request = mock_request(i915->engine[RCS], i915->kernel_context, T);
136 	if (!request) {
137 		err = -ENOMEM;
138 		goto out_locked;
139 	}
140 	mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
141 
142 	if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
143 		pr_err("fence wait success before submit (expected timeout)!\n");
144 		goto out_device;
145 	}
146 
147 	mutex_lock(&i915->drm.struct_mutex);
148 	i915_request_add(request);
149 	mutex_unlock(&i915->drm.struct_mutex);
150 
151 	if (dma_fence_is_signaled(&request->fence)) {
152 		pr_err("fence signaled immediately!\n");
153 		goto out_device;
154 	}
155 
156 	if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
157 		pr_err("fence wait success after submit (expected timeout)!\n");
158 		goto out_device;
159 	}
160 
161 	if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
162 		pr_err("fence wait timed out (expected success)!\n");
163 		goto out_device;
164 	}
165 
166 	if (!dma_fence_is_signaled(&request->fence)) {
167 		pr_err("fence unsignaled after waiting!\n");
168 		goto out_device;
169 	}
170 
171 	if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
172 		pr_err("fence wait timed out when complete (expected success)!\n");
173 		goto out_device;
174 	}
175 
176 	err = 0;
177 out_device:
178 	mutex_lock(&i915->drm.struct_mutex);
179 out_locked:
180 	mock_device_flush(i915);
181 	mutex_unlock(&i915->drm.struct_mutex);
182 	return err;
183 }
184 
185 static int igt_request_rewind(void *arg)
186 {
187 	struct drm_i915_private *i915 = arg;
188 	struct i915_request *request, *vip;
189 	struct i915_gem_context *ctx[2];
190 	int err = -EINVAL;
191 
192 	mutex_lock(&i915->drm.struct_mutex);
193 	ctx[0] = mock_context(i915, "A");
194 	request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
195 	if (!request) {
196 		err = -ENOMEM;
197 		goto err_context_0;
198 	}
199 
200 	i915_request_get(request);
201 	i915_request_add(request);
202 
203 	ctx[1] = mock_context(i915, "B");
204 	vip = mock_request(i915->engine[RCS], ctx[1], 0);
205 	if (!vip) {
206 		err = -ENOMEM;
207 		goto err_context_1;
208 	}
209 
210 	/* Simulate preemption by manual reordering */
211 	if (!mock_cancel_request(request)) {
212 		pr_err("failed to cancel request (already executed)!\n");
213 		i915_request_add(vip);
214 		goto err_context_1;
215 	}
216 	i915_request_get(vip);
217 	i915_request_add(vip);
218 	rcu_read_lock();
219 	request->engine->submit_request(request);
220 	rcu_read_unlock();
221 
222 	mutex_unlock(&i915->drm.struct_mutex);
223 
224 	if (i915_request_wait(vip, 0, HZ) == -ETIME) {
225 		pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
226 		       vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
227 		goto err;
228 	}
229 
230 	if (i915_request_completed(request)) {
231 		pr_err("low priority request already completed\n");
232 		goto err;
233 	}
234 
235 	err = 0;
236 err:
237 	i915_request_put(vip);
238 	mutex_lock(&i915->drm.struct_mutex);
239 err_context_1:
240 	mock_context_close(ctx[1]);
241 	i915_request_put(request);
242 err_context_0:
243 	mock_context_close(ctx[0]);
244 	mock_device_flush(i915);
245 	mutex_unlock(&i915->drm.struct_mutex);
246 	return err;
247 }
248 
249 int i915_request_mock_selftests(void)
250 {
251 	static const struct i915_subtest tests[] = {
252 		SUBTEST(igt_add_request),
253 		SUBTEST(igt_wait_request),
254 		SUBTEST(igt_fence_wait),
255 		SUBTEST(igt_request_rewind),
256 	};
257 	struct drm_i915_private *i915;
258 	int err;
259 
260 	i915 = mock_gem_device();
261 	if (!i915)
262 		return -ENOMEM;
263 
264 	err = i915_subtests(tests, i915);
265 	drm_dev_put(&i915->drm);
266 
267 	return err;
268 }
269 
270 struct live_test {
271 	struct drm_i915_private *i915;
272 	const char *func;
273 	const char *name;
274 
275 	unsigned int reset_count;
276 };
277 
278 static int begin_live_test(struct live_test *t,
279 			   struct drm_i915_private *i915,
280 			   const char *func,
281 			   const char *name)
282 {
283 	int err;
284 
285 	t->i915 = i915;
286 	t->func = func;
287 	t->name = name;
288 
289 	err = i915_gem_wait_for_idle(i915,
290 				     I915_WAIT_LOCKED,
291 				     MAX_SCHEDULE_TIMEOUT);
292 	if (err) {
293 		pr_err("%s(%s): failed to idle before, with err=%d!",
294 		       func, name, err);
295 		return err;
296 	}
297 
298 	i915->gpu_error.missed_irq_rings = 0;
299 	t->reset_count = i915_reset_count(&i915->gpu_error);
300 
301 	return 0;
302 }
303 
304 static int end_live_test(struct live_test *t)
305 {
306 	struct drm_i915_private *i915 = t->i915;
307 
308 	i915_retire_requests(i915);
309 
310 	if (wait_for(intel_engines_are_idle(i915), 10)) {
311 		pr_err("%s(%s): GPU not idle\n", t->func, t->name);
312 		return -EIO;
313 	}
314 
315 	if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
316 		pr_err("%s(%s): GPU was reset %d times!\n",
317 		       t->func, t->name,
318 		       i915_reset_count(&i915->gpu_error) - t->reset_count);
319 		return -EIO;
320 	}
321 
322 	if (i915->gpu_error.missed_irq_rings) {
323 		pr_err("%s(%s): Missed interrupts on engines %lx\n",
324 		       t->func, t->name, i915->gpu_error.missed_irq_rings);
325 		return -EIO;
326 	}
327 
328 	return 0;
329 }
330 
331 static int live_nop_request(void *arg)
332 {
333 	struct drm_i915_private *i915 = arg;
334 	struct intel_engine_cs *engine;
335 	struct live_test t;
336 	unsigned int id;
337 	int err = -ENODEV;
338 
339 	/* Submit various sized batches of empty requests, to each engine
340 	 * (individually), and wait for the batch to complete. We can check
341 	 * the overhead of submitting requests to the hardware.
342 	 */
343 
344 	mutex_lock(&i915->drm.struct_mutex);
345 	intel_runtime_pm_get(i915);
346 
347 	for_each_engine(engine, i915, id) {
348 		struct i915_request *request = NULL;
349 		unsigned long n, prime;
350 		IGT_TIMEOUT(end_time);
351 		ktime_t times[2] = {};
352 
353 		err = begin_live_test(&t, i915, __func__, engine->name);
354 		if (err)
355 			goto out_unlock;
356 
357 		for_each_prime_number_from(prime, 1, 8192) {
358 			times[1] = ktime_get_raw();
359 
360 			for (n = 0; n < prime; n++) {
361 				request = i915_request_alloc(engine,
362 							     i915->kernel_context);
363 				if (IS_ERR(request)) {
364 					err = PTR_ERR(request);
365 					goto out_unlock;
366 				}
367 
368 				/* This space is left intentionally blank.
369 				 *
370 				 * We do not actually want to perform any
371 				 * action with this request, we just want
372 				 * to measure the latency in allocation
373 				 * and submission of our breadcrumbs -
374 				 * ensuring that the bare request is sufficient
375 				 * for the system to work (i.e. proper HEAD
376 				 * tracking of the rings, interrupt handling,
377 				 * etc). It also gives us the lowest bounds
378 				 * for latency.
379 				 */
380 
381 				i915_request_add(request);
382 			}
383 			i915_request_wait(request,
384 					  I915_WAIT_LOCKED,
385 					  MAX_SCHEDULE_TIMEOUT);
386 
387 			times[1] = ktime_sub(ktime_get_raw(), times[1]);
388 			if (prime == 1)
389 				times[0] = times[1];
390 
391 			if (__igt_timeout(end_time, NULL))
392 				break;
393 		}
394 
395 		err = end_live_test(&t);
396 		if (err)
397 			goto out_unlock;
398 
399 		pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
400 			engine->name,
401 			ktime_to_ns(times[0]),
402 			prime, div64_u64(ktime_to_ns(times[1]), prime));
403 	}
404 
405 out_unlock:
406 	intel_runtime_pm_put(i915);
407 	mutex_unlock(&i915->drm.struct_mutex);
408 	return err;
409 }
410 
411 static struct i915_vma *empty_batch(struct drm_i915_private *i915)
412 {
413 	struct drm_i915_gem_object *obj;
414 	struct i915_vma *vma;
415 	u32 *cmd;
416 	int err;
417 
418 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
419 	if (IS_ERR(obj))
420 		return ERR_CAST(obj);
421 
422 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
423 	if (IS_ERR(cmd)) {
424 		err = PTR_ERR(cmd);
425 		goto err;
426 	}
427 
428 	*cmd = MI_BATCH_BUFFER_END;
429 	i915_gem_chipset_flush(i915);
430 
431 	i915_gem_object_unpin_map(obj);
432 
433 	err = i915_gem_object_set_to_gtt_domain(obj, false);
434 	if (err)
435 		goto err;
436 
437 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
438 	if (IS_ERR(vma)) {
439 		err = PTR_ERR(vma);
440 		goto err;
441 	}
442 
443 	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
444 	if (err)
445 		goto err;
446 
447 	return vma;
448 
449 err:
450 	i915_gem_object_put(obj);
451 	return ERR_PTR(err);
452 }
453 
454 static struct i915_request *
455 empty_request(struct intel_engine_cs *engine,
456 	      struct i915_vma *batch)
457 {
458 	struct i915_request *request;
459 	int err;
460 
461 	request = i915_request_alloc(engine, engine->i915->kernel_context);
462 	if (IS_ERR(request))
463 		return request;
464 
465 	err = engine->emit_bb_start(request,
466 				    batch->node.start,
467 				    batch->node.size,
468 				    I915_DISPATCH_SECURE);
469 	if (err)
470 		goto out_request;
471 
472 out_request:
473 	i915_request_add(request);
474 	return err ? ERR_PTR(err) : request;
475 }
476 
477 static int live_empty_request(void *arg)
478 {
479 	struct drm_i915_private *i915 = arg;
480 	struct intel_engine_cs *engine;
481 	struct live_test t;
482 	struct i915_vma *batch;
483 	unsigned int id;
484 	int err = 0;
485 
486 	/* Submit various sized batches of empty requests, to each engine
487 	 * (individually), and wait for the batch to complete. We can check
488 	 * the overhead of submitting requests to the hardware.
489 	 */
490 
491 	mutex_lock(&i915->drm.struct_mutex);
492 	intel_runtime_pm_get(i915);
493 
494 	batch = empty_batch(i915);
495 	if (IS_ERR(batch)) {
496 		err = PTR_ERR(batch);
497 		goto out_unlock;
498 	}
499 
500 	for_each_engine(engine, i915, id) {
501 		IGT_TIMEOUT(end_time);
502 		struct i915_request *request;
503 		unsigned long n, prime;
504 		ktime_t times[2] = {};
505 
506 		err = begin_live_test(&t, i915, __func__, engine->name);
507 		if (err)
508 			goto out_batch;
509 
510 		/* Warmup / preload */
511 		request = empty_request(engine, batch);
512 		if (IS_ERR(request)) {
513 			err = PTR_ERR(request);
514 			goto out_batch;
515 		}
516 		i915_request_wait(request,
517 				  I915_WAIT_LOCKED,
518 				  MAX_SCHEDULE_TIMEOUT);
519 
520 		for_each_prime_number_from(prime, 1, 8192) {
521 			times[1] = ktime_get_raw();
522 
523 			for (n = 0; n < prime; n++) {
524 				request = empty_request(engine, batch);
525 				if (IS_ERR(request)) {
526 					err = PTR_ERR(request);
527 					goto out_batch;
528 				}
529 			}
530 			i915_request_wait(request,
531 					  I915_WAIT_LOCKED,
532 					  MAX_SCHEDULE_TIMEOUT);
533 
534 			times[1] = ktime_sub(ktime_get_raw(), times[1]);
535 			if (prime == 1)
536 				times[0] = times[1];
537 
538 			if (__igt_timeout(end_time, NULL))
539 				break;
540 		}
541 
542 		err = end_live_test(&t);
543 		if (err)
544 			goto out_batch;
545 
546 		pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
547 			engine->name,
548 			ktime_to_ns(times[0]),
549 			prime, div64_u64(ktime_to_ns(times[1]), prime));
550 	}
551 
552 out_batch:
553 	i915_vma_unpin(batch);
554 	i915_vma_put(batch);
555 out_unlock:
556 	intel_runtime_pm_put(i915);
557 	mutex_unlock(&i915->drm.struct_mutex);
558 	return err;
559 }
560 
561 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
562 {
563 	struct i915_gem_context *ctx = i915->kernel_context;
564 	struct i915_address_space *vm =
565 		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
566 	struct drm_i915_gem_object *obj;
567 	const int gen = INTEL_GEN(i915);
568 	struct i915_vma *vma;
569 	u32 *cmd;
570 	int err;
571 
572 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
573 	if (IS_ERR(obj))
574 		return ERR_CAST(obj);
575 
576 	vma = i915_vma_instance(obj, vm, NULL);
577 	if (IS_ERR(vma)) {
578 		err = PTR_ERR(vma);
579 		goto err;
580 	}
581 
582 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
583 	if (err)
584 		goto err;
585 
586 	err = i915_gem_object_set_to_wc_domain(obj, true);
587 	if (err)
588 		goto err;
589 
590 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
591 	if (IS_ERR(cmd)) {
592 		err = PTR_ERR(cmd);
593 		goto err;
594 	}
595 
596 	if (gen >= 8) {
597 		*cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
598 		*cmd++ = lower_32_bits(vma->node.start);
599 		*cmd++ = upper_32_bits(vma->node.start);
600 	} else if (gen >= 6) {
601 		*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
602 		*cmd++ = lower_32_bits(vma->node.start);
603 	} else {
604 		*cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
605 		*cmd++ = lower_32_bits(vma->node.start);
606 	}
607 	*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
608 	i915_gem_chipset_flush(i915);
609 
610 	i915_gem_object_unpin_map(obj);
611 
612 	return vma;
613 
614 err:
615 	i915_gem_object_put(obj);
616 	return ERR_PTR(err);
617 }
618 
619 static int recursive_batch_resolve(struct i915_vma *batch)
620 {
621 	u32 *cmd;
622 
623 	cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
624 	if (IS_ERR(cmd))
625 		return PTR_ERR(cmd);
626 
627 	*cmd = MI_BATCH_BUFFER_END;
628 	i915_gem_chipset_flush(batch->vm->i915);
629 
630 	i915_gem_object_unpin_map(batch->obj);
631 
632 	return 0;
633 }
634 
635 static int live_all_engines(void *arg)
636 {
637 	struct drm_i915_private *i915 = arg;
638 	struct intel_engine_cs *engine;
639 	struct i915_request *request[I915_NUM_ENGINES];
640 	struct i915_vma *batch;
641 	struct live_test t;
642 	unsigned int id;
643 	int err;
644 
645 	/* Check we can submit requests to all engines simultaneously. We
646 	 * send a recursive batch to each engine - checking that we don't
647 	 * block doing so, and that they don't complete too soon.
648 	 */
649 
650 	mutex_lock(&i915->drm.struct_mutex);
651 	intel_runtime_pm_get(i915);
652 
653 	err = begin_live_test(&t, i915, __func__, "");
654 	if (err)
655 		goto out_unlock;
656 
657 	batch = recursive_batch(i915);
658 	if (IS_ERR(batch)) {
659 		err = PTR_ERR(batch);
660 		pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
661 		goto out_unlock;
662 	}
663 
664 	for_each_engine(engine, i915, id) {
665 		request[id] = i915_request_alloc(engine, i915->kernel_context);
666 		if (IS_ERR(request[id])) {
667 			err = PTR_ERR(request[id]);
668 			pr_err("%s: Request allocation failed with err=%d\n",
669 			       __func__, err);
670 			goto out_request;
671 		}
672 
673 		err = engine->emit_bb_start(request[id],
674 					    batch->node.start,
675 					    batch->node.size,
676 					    0);
677 		GEM_BUG_ON(err);
678 		request[id]->batch = batch;
679 
680 		if (!i915_gem_object_has_active_reference(batch->obj)) {
681 			i915_gem_object_get(batch->obj);
682 			i915_gem_object_set_active_reference(batch->obj);
683 		}
684 
685 		err = i915_vma_move_to_active(batch, request[id], 0);
686 		GEM_BUG_ON(err);
687 
688 		i915_request_get(request[id]);
689 		i915_request_add(request[id]);
690 	}
691 
692 	for_each_engine(engine, i915, id) {
693 		if (i915_request_completed(request[id])) {
694 			pr_err("%s(%s): request completed too early!\n",
695 			       __func__, engine->name);
696 			err = -EINVAL;
697 			goto out_request;
698 		}
699 	}
700 
701 	err = recursive_batch_resolve(batch);
702 	if (err) {
703 		pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
704 		goto out_request;
705 	}
706 
707 	for_each_engine(engine, i915, id) {
708 		long timeout;
709 
710 		timeout = i915_request_wait(request[id],
711 					    I915_WAIT_LOCKED,
712 					    MAX_SCHEDULE_TIMEOUT);
713 		if (timeout < 0) {
714 			err = timeout;
715 			pr_err("%s: error waiting for request on %s, err=%d\n",
716 			       __func__, engine->name, err);
717 			goto out_request;
718 		}
719 
720 		GEM_BUG_ON(!i915_request_completed(request[id]));
721 		i915_request_put(request[id]);
722 		request[id] = NULL;
723 	}
724 
725 	err = end_live_test(&t);
726 
727 out_request:
728 	for_each_engine(engine, i915, id)
729 		if (request[id])
730 			i915_request_put(request[id]);
731 	i915_vma_unpin(batch);
732 	i915_vma_put(batch);
733 out_unlock:
734 	intel_runtime_pm_put(i915);
735 	mutex_unlock(&i915->drm.struct_mutex);
736 	return err;
737 }
738 
739 static int live_sequential_engines(void *arg)
740 {
741 	struct drm_i915_private *i915 = arg;
742 	struct i915_request *request[I915_NUM_ENGINES] = {};
743 	struct i915_request *prev = NULL;
744 	struct intel_engine_cs *engine;
745 	struct live_test t;
746 	unsigned int id;
747 	int err;
748 
749 	/* Check we can submit requests to all engines sequentially, such
750 	 * that each successive request waits for the earlier ones. This
751 	 * tests that we don't execute requests out of order, even though
752 	 * they are running on independent engines.
753 	 */
754 
755 	mutex_lock(&i915->drm.struct_mutex);
756 	intel_runtime_pm_get(i915);
757 
758 	err = begin_live_test(&t, i915, __func__, "");
759 	if (err)
760 		goto out_unlock;
761 
762 	for_each_engine(engine, i915, id) {
763 		struct i915_vma *batch;
764 
765 		batch = recursive_batch(i915);
766 		if (IS_ERR(batch)) {
767 			err = PTR_ERR(batch);
768 			pr_err("%s: Unable to create batch for %s, err=%d\n",
769 			       __func__, engine->name, err);
770 			goto out_unlock;
771 		}
772 
773 		request[id] = i915_request_alloc(engine, i915->kernel_context);
774 		if (IS_ERR(request[id])) {
775 			err = PTR_ERR(request[id]);
776 			pr_err("%s: Request allocation failed for %s with err=%d\n",
777 			       __func__, engine->name, err);
778 			goto out_request;
779 		}
780 
781 		if (prev) {
782 			err = i915_request_await_dma_fence(request[id],
783 							   &prev->fence);
784 			if (err) {
785 				i915_request_add(request[id]);
786 				pr_err("%s: Request await failed for %s with err=%d\n",
787 				       __func__, engine->name, err);
788 				goto out_request;
789 			}
790 		}
791 
792 		err = engine->emit_bb_start(request[id],
793 					    batch->node.start,
794 					    batch->node.size,
795 					    0);
796 		GEM_BUG_ON(err);
797 		request[id]->batch = batch;
798 
799 		err = i915_vma_move_to_active(batch, request[id], 0);
800 		GEM_BUG_ON(err);
801 
802 		i915_gem_object_set_active_reference(batch->obj);
803 		i915_vma_get(batch);
804 
805 		i915_request_get(request[id]);
806 		i915_request_add(request[id]);
807 
808 		prev = request[id];
809 	}
810 
811 	for_each_engine(engine, i915, id) {
812 		long timeout;
813 
814 		if (i915_request_completed(request[id])) {
815 			pr_err("%s(%s): request completed too early!\n",
816 			       __func__, engine->name);
817 			err = -EINVAL;
818 			goto out_request;
819 		}
820 
821 		err = recursive_batch_resolve(request[id]->batch);
822 		if (err) {
823 			pr_err("%s: failed to resolve batch, err=%d\n",
824 			       __func__, err);
825 			goto out_request;
826 		}
827 
828 		timeout = i915_request_wait(request[id],
829 					    I915_WAIT_LOCKED,
830 					    MAX_SCHEDULE_TIMEOUT);
831 		if (timeout < 0) {
832 			err = timeout;
833 			pr_err("%s: error waiting for request on %s, err=%d\n",
834 			       __func__, engine->name, err);
835 			goto out_request;
836 		}
837 
838 		GEM_BUG_ON(!i915_request_completed(request[id]));
839 	}
840 
841 	err = end_live_test(&t);
842 
843 out_request:
844 	for_each_engine(engine, i915, id) {
845 		u32 *cmd;
846 
847 		if (!request[id])
848 			break;
849 
850 		cmd = i915_gem_object_pin_map(request[id]->batch->obj,
851 					      I915_MAP_WC);
852 		if (!IS_ERR(cmd)) {
853 			*cmd = MI_BATCH_BUFFER_END;
854 			i915_gem_chipset_flush(i915);
855 
856 			i915_gem_object_unpin_map(request[id]->batch->obj);
857 		}
858 
859 		i915_vma_put(request[id]->batch);
860 		i915_request_put(request[id]);
861 	}
862 out_unlock:
863 	intel_runtime_pm_put(i915);
864 	mutex_unlock(&i915->drm.struct_mutex);
865 	return err;
866 }
867 
868 int i915_request_live_selftests(struct drm_i915_private *i915)
869 {
870 	static const struct i915_subtest tests[] = {
871 		SUBTEST(live_nop_request),
872 		SUBTEST(live_all_engines),
873 		SUBTEST(live_sequential_engines),
874 		SUBTEST(live_empty_request),
875 	};
876 
877 	if (i915_terminally_wedged(&i915->gpu_error))
878 		return 0;
879 
880 	return i915_subtests(tests, i915);
881 }
882