1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 
9 #include "i915_selftest.h"
10 
11 #include "igt_flush_test.h"
12 #include "lib_sw_fence.h"
13 
14 struct live_active {
15 	struct i915_active base;
16 	bool retired;
17 };
18 
19 static void __live_active_retire(struct i915_active *base)
20 {
21 	struct live_active *active = container_of(base, typeof(*active), base);
22 
23 	active->retired = true;
24 }
25 
26 static int __live_active_setup(struct drm_i915_private *i915,
27 			       struct live_active *active)
28 {
29 	struct intel_engine_cs *engine;
30 	struct i915_sw_fence *submit;
31 	enum intel_engine_id id;
32 	unsigned int count = 0;
33 	int err = 0;
34 
35 	submit = heap_fence_create(GFP_KERNEL);
36 	if (!submit)
37 		return -ENOMEM;
38 
39 	i915_active_init(i915, &active->base, __live_active_retire);
40 	active->retired = false;
41 
42 	if (!i915_active_acquire(&active->base)) {
43 		pr_err("First i915_active_acquire should report being idle\n");
44 		err = -EINVAL;
45 		goto out;
46 	}
47 
48 	for_each_engine(engine, i915, id) {
49 		struct i915_request *rq;
50 
51 		rq = i915_request_create(engine->kernel_context);
52 		if (IS_ERR(rq)) {
53 			err = PTR_ERR(rq);
54 			break;
55 		}
56 
57 		err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
58 						       submit,
59 						       GFP_KERNEL);
60 		if (err >= 0)
61 			err = i915_active_ref(&active->base,
62 					      rq->fence.context, rq);
63 		i915_request_add(rq);
64 		if (err) {
65 			pr_err("Failed to track active ref!\n");
66 			break;
67 		}
68 
69 		count++;
70 	}
71 
72 	i915_active_release(&active->base);
73 	if (active->retired && count) {
74 		pr_err("i915_active retired before submission!\n");
75 		err = -EINVAL;
76 	}
77 	if (active->base.count != count) {
78 		pr_err("i915_active not tracking all requests, found %d, expected %d\n",
79 		       active->base.count, count);
80 		err = -EINVAL;
81 	}
82 
83 out:
84 	i915_sw_fence_commit(submit);
85 	heap_fence_put(submit);
86 
87 	return err;
88 }
89 
90 static int live_active_wait(void *arg)
91 {
92 	struct drm_i915_private *i915 = arg;
93 	struct live_active active;
94 	intel_wakeref_t wakeref;
95 	int err;
96 
97 	/* Check that we get a callback when requests retire upon waiting */
98 
99 	mutex_lock(&i915->drm.struct_mutex);
100 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
101 
102 	err = __live_active_setup(i915, &active);
103 
104 	i915_active_wait(&active.base);
105 	if (!active.retired) {
106 		pr_err("i915_active not retired after waiting!\n");
107 		err = -EINVAL;
108 	}
109 
110 	i915_active_fini(&active.base);
111 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
112 		err = -EIO;
113 
114 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
115 	mutex_unlock(&i915->drm.struct_mutex);
116 	return err;
117 }
118 
119 static int live_active_retire(void *arg)
120 {
121 	struct drm_i915_private *i915 = arg;
122 	struct live_active active;
123 	intel_wakeref_t wakeref;
124 	int err;
125 
126 	/* Check that we get a callback when requests are indirectly retired */
127 
128 	mutex_lock(&i915->drm.struct_mutex);
129 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
130 
131 	err = __live_active_setup(i915, &active);
132 
133 	/* waits for & retires all requests */
134 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
135 		err = -EIO;
136 
137 	if (!active.retired) {
138 		pr_err("i915_active not retired after flushing!\n");
139 		err = -EINVAL;
140 	}
141 
142 	i915_active_fini(&active.base);
143 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
144 	mutex_unlock(&i915->drm.struct_mutex);
145 	return err;
146 }
147 
148 int i915_active_live_selftests(struct drm_i915_private *i915)
149 {
150 	static const struct i915_subtest tests[] = {
151 		SUBTEST(live_active_wait),
152 		SUBTEST(live_active_retire),
153 	};
154 
155 	if (i915_terminally_wedged(i915))
156 		return 0;
157 
158 	return i915_subtests(tests, i915);
159 }
160