1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include <linux/kref.h>
8 
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_gt.h"
11 
12 #include "i915_selftest.h"
13 
14 #include "igt_flush_test.h"
15 #include "lib_sw_fence.h"
16 
17 struct live_active {
18 	struct i915_active base;
19 	struct kref ref;
20 	bool retired;
21 };
22 
23 static void __live_get(struct live_active *active)
24 {
25 	kref_get(&active->ref);
26 }
27 
28 static void __live_free(struct live_active *active)
29 {
30 	i915_active_fini(&active->base);
31 	kfree(active);
32 }
33 
34 static void __live_release(struct kref *ref)
35 {
36 	struct live_active *active = container_of(ref, typeof(*active), ref);
37 
38 	__live_free(active);
39 }
40 
41 static void __live_put(struct live_active *active)
42 {
43 	kref_put(&active->ref, __live_release);
44 }
45 
46 static int __live_active(struct i915_active *base)
47 {
48 	struct live_active *active = container_of(base, typeof(*active), base);
49 
50 	__live_get(active);
51 	return 0;
52 }
53 
54 static void __live_retire(struct i915_active *base)
55 {
56 	struct live_active *active = container_of(base, typeof(*active), base);
57 
58 	active->retired = true;
59 	__live_put(active);
60 }
61 
62 static struct live_active *__live_alloc(struct drm_i915_private *i915)
63 {
64 	struct live_active *active;
65 
66 	active = kzalloc(sizeof(*active), GFP_KERNEL);
67 	if (!active)
68 		return NULL;
69 
70 	kref_init(&active->ref);
71 	i915_active_init(i915, &active->base, __live_active, __live_retire);
72 
73 	return active;
74 }
75 
76 static struct live_active *
77 __live_active_setup(struct drm_i915_private *i915)
78 {
79 	struct intel_engine_cs *engine;
80 	struct i915_sw_fence *submit;
81 	struct live_active *active;
82 	enum intel_engine_id id;
83 	unsigned int count = 0;
84 	int err = 0;
85 
86 	active = __live_alloc(i915);
87 	if (!active)
88 		return ERR_PTR(-ENOMEM);
89 
90 	submit = heap_fence_create(GFP_KERNEL);
91 	if (!submit) {
92 		kfree(active);
93 		return ERR_PTR(-ENOMEM);
94 	}
95 
96 	err = i915_active_acquire(&active->base);
97 	if (err)
98 		goto out;
99 
100 	for_each_engine(engine, i915, id) {
101 		struct i915_request *rq;
102 
103 		rq = i915_request_create(engine->kernel_context);
104 		if (IS_ERR(rq)) {
105 			err = PTR_ERR(rq);
106 			break;
107 		}
108 
109 		err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
110 						       submit,
111 						       GFP_KERNEL);
112 		if (err >= 0)
113 			err = i915_active_ref(&active->base,
114 					      rq->fence.context, rq);
115 		i915_request_add(rq);
116 		if (err) {
117 			pr_err("Failed to track active ref!\n");
118 			break;
119 		}
120 
121 		count++;
122 	}
123 
124 	i915_active_release(&active->base);
125 	if (active->retired && count) {
126 		pr_err("i915_active retired before submission!\n");
127 		err = -EINVAL;
128 	}
129 	if (atomic_read(&active->base.count) != count) {
130 		pr_err("i915_active not tracking all requests, found %d, expected %d\n",
131 		       atomic_read(&active->base.count), count);
132 		err = -EINVAL;
133 	}
134 
135 out:
136 	i915_sw_fence_commit(submit);
137 	heap_fence_put(submit);
138 	if (err) {
139 		__live_put(active);
140 		active = ERR_PTR(err);
141 	}
142 
143 	return active;
144 }
145 
146 static int live_active_wait(void *arg)
147 {
148 	struct drm_i915_private *i915 = arg;
149 	struct live_active *active;
150 	intel_wakeref_t wakeref;
151 	int err = 0;
152 
153 	/* Check that we get a callback when requests retire upon waiting */
154 
155 	mutex_lock(&i915->drm.struct_mutex);
156 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
157 
158 	active = __live_active_setup(i915);
159 	if (IS_ERR(active)) {
160 		err = PTR_ERR(active);
161 		goto err;
162 	}
163 
164 	i915_active_wait(&active->base);
165 	if (!active->retired) {
166 		pr_err("i915_active not retired after waiting!\n");
167 		err = -EINVAL;
168 	}
169 
170 	__live_put(active);
171 
172 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
173 		err = -EIO;
174 
175 err:
176 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
177 	mutex_unlock(&i915->drm.struct_mutex);
178 
179 	return err;
180 }
181 
182 static int live_active_retire(void *arg)
183 {
184 	struct drm_i915_private *i915 = arg;
185 	struct live_active *active;
186 	intel_wakeref_t wakeref;
187 	int err = 0;
188 
189 	/* Check that we get a callback when requests are indirectly retired */
190 
191 	mutex_lock(&i915->drm.struct_mutex);
192 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
193 
194 	active = __live_active_setup(i915);
195 	if (IS_ERR(active)) {
196 		err = PTR_ERR(active);
197 		goto err;
198 	}
199 
200 	/* waits for & retires all requests */
201 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
202 		err = -EIO;
203 
204 	if (!active->retired) {
205 		pr_err("i915_active not retired after flushing!\n");
206 		err = -EINVAL;
207 	}
208 
209 	__live_put(active);
210 
211 err:
212 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
213 	mutex_unlock(&i915->drm.struct_mutex);
214 
215 	return err;
216 }
217 
218 int i915_active_live_selftests(struct drm_i915_private *i915)
219 {
220 	static const struct i915_subtest tests[] = {
221 		SUBTEST(live_active_wait),
222 		SUBTEST(live_active_retire),
223 	};
224 
225 	if (intel_gt_is_wedged(&i915->gt))
226 		return 0;
227 
228 	return i915_subtests(tests, i915);
229 }
230