1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include <linux/kref.h>
8 
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_gt.h"
11 
12 #include "i915_selftest.h"
13 
14 #include "igt_flush_test.h"
15 #include "lib_sw_fence.h"
16 
17 struct live_active {
18 	struct i915_active base;
19 	struct kref ref;
20 	bool retired;
21 };
22 
23 static void __live_get(struct live_active *active)
24 {
25 	kref_get(&active->ref);
26 }
27 
28 static void __live_free(struct live_active *active)
29 {
30 	i915_active_fini(&active->base);
31 	kfree(active);
32 }
33 
34 static void __live_release(struct kref *ref)
35 {
36 	struct live_active *active = container_of(ref, typeof(*active), ref);
37 
38 	__live_free(active);
39 }
40 
41 static void __live_put(struct live_active *active)
42 {
43 	kref_put(&active->ref, __live_release);
44 }
45 
46 static int __live_active(struct i915_active *base)
47 {
48 	struct live_active *active = container_of(base, typeof(*active), base);
49 
50 	__live_get(active);
51 	return 0;
52 }
53 
54 static void __live_retire(struct i915_active *base)
55 {
56 	struct live_active *active = container_of(base, typeof(*active), base);
57 
58 	active->retired = true;
59 	__live_put(active);
60 }
61 
62 static struct live_active *__live_alloc(struct drm_i915_private *i915)
63 {
64 	struct live_active *active;
65 
66 	active = kzalloc(sizeof(*active), GFP_KERNEL);
67 	if (!active)
68 		return NULL;
69 
70 	kref_init(&active->ref);
71 	i915_active_init(&active->base, __live_active, __live_retire);
72 
73 	return active;
74 }
75 
76 static struct live_active *
77 __live_active_setup(struct drm_i915_private *i915)
78 {
79 	struct intel_engine_cs *engine;
80 	struct i915_sw_fence *submit;
81 	struct live_active *active;
82 	enum intel_engine_id id;
83 	unsigned int count = 0;
84 	int err = 0;
85 
86 	active = __live_alloc(i915);
87 	if (!active)
88 		return ERR_PTR(-ENOMEM);
89 
90 	submit = heap_fence_create(GFP_KERNEL);
91 	if (!submit) {
92 		kfree(active);
93 		return ERR_PTR(-ENOMEM);
94 	}
95 
96 	err = i915_active_acquire(&active->base);
97 	if (err)
98 		goto out;
99 
100 	for_each_engine(engine, i915, id) {
101 		struct i915_request *rq;
102 
103 		rq = i915_request_create(engine->kernel_context);
104 		if (IS_ERR(rq)) {
105 			err = PTR_ERR(rq);
106 			break;
107 		}
108 
109 		err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
110 						       submit,
111 						       GFP_KERNEL);
112 		if (err >= 0)
113 			err = i915_active_add_request(&active->base, rq);
114 		i915_request_add(rq);
115 		if (err) {
116 			pr_err("Failed to track active ref!\n");
117 			break;
118 		}
119 
120 		count++;
121 	}
122 
123 	i915_active_release(&active->base);
124 	if (READ_ONCE(active->retired) && count) {
125 		pr_err("i915_active retired before submission!\n");
126 		err = -EINVAL;
127 	}
128 	if (atomic_read(&active->base.count) != count) {
129 		pr_err("i915_active not tracking all requests, found %d, expected %d\n",
130 		       atomic_read(&active->base.count), count);
131 		err = -EINVAL;
132 	}
133 
134 out:
135 	i915_sw_fence_commit(submit);
136 	heap_fence_put(submit);
137 	if (err) {
138 		__live_put(active);
139 		active = ERR_PTR(err);
140 	}
141 
142 	return active;
143 }
144 
145 static int live_active_wait(void *arg)
146 {
147 	struct drm_i915_private *i915 = arg;
148 	struct live_active *active;
149 	int err = 0;
150 
151 	/* Check that we get a callback when requests retire upon waiting */
152 
153 	active = __live_active_setup(i915);
154 	if (IS_ERR(active))
155 		return PTR_ERR(active);
156 
157 	i915_active_wait(&active->base);
158 	if (!READ_ONCE(active->retired)) {
159 		pr_err("i915_active not retired after waiting!\n");
160 		err = -EINVAL;
161 	}
162 
163 	__live_put(active);
164 
165 	if (igt_flush_test(i915))
166 		err = -EIO;
167 
168 	return err;
169 }
170 
171 static int live_active_retire(void *arg)
172 {
173 	struct drm_i915_private *i915 = arg;
174 	struct live_active *active;
175 	int err = 0;
176 
177 	/* Check that we get a callback when requests are indirectly retired */
178 
179 	active = __live_active_setup(i915);
180 	if (IS_ERR(active))
181 		return PTR_ERR(active);
182 
183 	/* waits for & retires all requests */
184 	if (igt_flush_test(i915))
185 		err = -EIO;
186 
187 	if (!READ_ONCE(active->retired)) {
188 		pr_err("i915_active not retired after flushing!\n");
189 		err = -EINVAL;
190 	}
191 
192 	__live_put(active);
193 
194 	return err;
195 }
196 
197 int i915_active_live_selftests(struct drm_i915_private *i915)
198 {
199 	static const struct i915_subtest tests[] = {
200 		SUBTEST(live_active_wait),
201 		SUBTEST(live_active_retire),
202 	};
203 
204 	if (intel_gt_is_wedged(&i915->gt))
205 		return 0;
206 
207 	return i915_subtests(tests, i915);
208 }
209