1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <linux/list.h>
7 #include <linux/list_sort.h>
8 #include <linux/llist.h>
9
10 #include "i915_drv.h"
11 #include "intel_engine.h"
12 #include "intel_engine_user.h"
13 #include "intel_gt.h"
14 #include "uc/intel_guc_submission.h"
15
16 struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private * i915,u8 class,u8 instance)17 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
18 {
19 struct rb_node *p = i915->uabi_engines.rb_node;
20
21 while (p) {
22 struct intel_engine_cs *it =
23 rb_entry(p, typeof(*it), uabi_node);
24
25 if (class < it->uabi_class)
26 p = p->rb_left;
27 else if (class > it->uabi_class ||
28 instance > it->uabi_instance)
29 p = p->rb_right;
30 else if (instance < it->uabi_instance)
31 p = p->rb_left;
32 else
33 return it;
34 }
35
36 return NULL;
37 }
38
intel_engine_add_user(struct intel_engine_cs * engine)39 void intel_engine_add_user(struct intel_engine_cs *engine)
40 {
41 llist_add((struct llist_node *)&engine->uabi_node,
42 (struct llist_head *)&engine->i915->uabi_engines);
43 }
44
45 #define I915_NO_UABI_CLASS ((u16)(-1))
46
47 static const u16 uabi_classes[] = {
48 [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
49 [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
50 [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
51 [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
52 [COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
53 [OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
54 };
55
engine_cmp(void * priv,const struct list_head * A,const struct list_head * B)56 static int engine_cmp(void *priv, const struct list_head *A,
57 const struct list_head *B)
58 {
59 const struct intel_engine_cs *a =
60 container_of((struct rb_node *)A, typeof(*a), uabi_node);
61 const struct intel_engine_cs *b =
62 container_of((struct rb_node *)B, typeof(*b), uabi_node);
63
64 if (uabi_classes[a->class] < uabi_classes[b->class])
65 return -1;
66 if (uabi_classes[a->class] > uabi_classes[b->class])
67 return 1;
68
69 if (a->instance < b->instance)
70 return -1;
71 if (a->instance > b->instance)
72 return 1;
73
74 return 0;
75 }
76
get_engines(struct drm_i915_private * i915)77 static struct llist_node *get_engines(struct drm_i915_private *i915)
78 {
79 return llist_del_all((struct llist_head *)&i915->uabi_engines);
80 }
81
sort_engines(struct drm_i915_private * i915,struct list_head * engines)82 static void sort_engines(struct drm_i915_private *i915,
83 struct list_head *engines)
84 {
85 struct llist_node *pos, *next;
86
87 llist_for_each_safe(pos, next, get_engines(i915)) {
88 struct intel_engine_cs *engine =
89 container_of((struct rb_node *)pos, typeof(*engine),
90 uabi_node);
91 list_add((struct list_head *)&engine->uabi_node, engines);
92 }
93 list_sort(NULL, engines, engine_cmp);
94 }
95
set_scheduler_caps(struct drm_i915_private * i915)96 static void set_scheduler_caps(struct drm_i915_private *i915)
97 {
98 static const struct {
99 u8 engine;
100 u8 sched;
101 } map[] = {
102 #define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
103 MAP(HAS_PREEMPTION, PREEMPTION),
104 MAP(HAS_SEMAPHORES, SEMAPHORES),
105 MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
106 #undef MAP
107 };
108 struct intel_engine_cs *engine;
109 u32 enabled, disabled;
110
111 enabled = 0;
112 disabled = 0;
113 for_each_uabi_engine(engine, i915) { /* all engines must agree! */
114 int i;
115
116 if (engine->sched_engine->schedule)
117 enabled |= (I915_SCHEDULER_CAP_ENABLED |
118 I915_SCHEDULER_CAP_PRIORITY);
119 else
120 disabled |= (I915_SCHEDULER_CAP_ENABLED |
121 I915_SCHEDULER_CAP_PRIORITY);
122
123 if (intel_uc_uses_guc_submission(&engine->gt->uc))
124 enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
125
126 for (i = 0; i < ARRAY_SIZE(map); i++) {
127 if (engine->flags & BIT(map[i].engine))
128 enabled |= BIT(map[i].sched);
129 else
130 disabled |= BIT(map[i].sched);
131 }
132 }
133
134 i915->caps.scheduler = enabled & ~disabled;
135 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
136 i915->caps.scheduler = 0;
137 }
138
intel_engine_class_repr(u8 class)139 const char *intel_engine_class_repr(u8 class)
140 {
141 static const char * const uabi_names[] = {
142 [RENDER_CLASS] = "rcs",
143 [COPY_ENGINE_CLASS] = "bcs",
144 [VIDEO_DECODE_CLASS] = "vcs",
145 [VIDEO_ENHANCEMENT_CLASS] = "vecs",
146 [OTHER_CLASS] = "other",
147 [COMPUTE_CLASS] = "ccs",
148 };
149
150 if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
151 return "xxx";
152
153 return uabi_names[class];
154 }
155
156 struct legacy_ring {
157 struct intel_gt *gt;
158 u8 class;
159 u8 instance;
160 };
161
legacy_ring_idx(const struct legacy_ring * ring)162 static int legacy_ring_idx(const struct legacy_ring *ring)
163 {
164 static const struct {
165 u8 base, max;
166 } map[] = {
167 [RENDER_CLASS] = { RCS0, 1 },
168 [COPY_ENGINE_CLASS] = { BCS0, 1 },
169 [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
170 [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
171 [COMPUTE_CLASS] = { CCS0, I915_MAX_CCS },
172 };
173
174 if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
175 return INVALID_ENGINE;
176
177 if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
178 return INVALID_ENGINE;
179
180 return map[ring->class].base + ring->instance;
181 }
182
add_legacy_ring(struct legacy_ring * ring,struct intel_engine_cs * engine)183 static void add_legacy_ring(struct legacy_ring *ring,
184 struct intel_engine_cs *engine)
185 {
186 if (engine->gt != ring->gt || engine->class != ring->class) {
187 ring->gt = engine->gt;
188 ring->class = engine->class;
189 ring->instance = 0;
190 }
191
192 engine->legacy_idx = legacy_ring_idx(ring);
193 if (engine->legacy_idx != INVALID_ENGINE)
194 ring->instance++;
195 }
196
engine_rename(struct intel_engine_cs * engine,const char * name,u16 instance)197 static void engine_rename(struct intel_engine_cs *engine, const char *name, u16 instance)
198 {
199 char old[sizeof(engine->name)];
200
201 memcpy(old, engine->name, sizeof(engine->name));
202 scnprintf(engine->name, sizeof(engine->name), "%s%u", name, instance);
203 drm_dbg(&engine->i915->drm, "renamed %s to %s\n", old, engine->name);
204 }
205
intel_engines_driver_register(struct drm_i915_private * i915)206 void intel_engines_driver_register(struct drm_i915_private *i915)
207 {
208 u16 name_instance, other_instance = 0;
209 struct legacy_ring ring = {};
210 struct list_head *it, *next;
211 struct rb_node **p, *prev;
212 LIST_HEAD(engines);
213
214 sort_engines(i915, &engines);
215
216 prev = NULL;
217 p = &i915->uabi_engines.rb_node;
218 list_for_each_safe(it, next, &engines) {
219 struct intel_engine_cs *engine =
220 container_of((struct rb_node *)it, typeof(*engine),
221 uabi_node);
222
223 if (intel_gt_has_unrecoverable_error(engine->gt))
224 continue; /* ignore incomplete engines */
225
226 GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
227 engine->uabi_class = uabi_classes[engine->class];
228 if (engine->uabi_class == I915_NO_UABI_CLASS) {
229 name_instance = other_instance++;
230 } else {
231 GEM_BUG_ON(engine->uabi_class >=
232 ARRAY_SIZE(i915->engine_uabi_class_count));
233 name_instance =
234 i915->engine_uabi_class_count[engine->uabi_class]++;
235 }
236 engine->uabi_instance = name_instance;
237
238 /*
239 * Replace the internal name with the final user and log facing
240 * name.
241 */
242 engine_rename(engine,
243 intel_engine_class_repr(engine->class),
244 name_instance);
245
246 if (engine->uabi_class == I915_NO_UABI_CLASS)
247 continue;
248
249 rb_link_node(&engine->uabi_node, prev, p);
250 rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
251
252 GEM_BUG_ON(intel_engine_lookup_user(i915,
253 engine->uabi_class,
254 engine->uabi_instance) != engine);
255
256 /* Fix up the mapping to match default execbuf::user_map[] */
257 add_legacy_ring(&ring, engine);
258
259 prev = &engine->uabi_node;
260 p = &prev->rb_right;
261 }
262
263 if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
264 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
265 struct intel_engine_cs *engine;
266 unsigned int isolation;
267 int class, inst;
268 int errors = 0;
269
270 for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) {
271 for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) {
272 engine = intel_engine_lookup_user(i915,
273 class, inst);
274 if (!engine) {
275 pr_err("UABI engine not found for { class:%d, instance:%d }\n",
276 class, inst);
277 errors++;
278 continue;
279 }
280
281 if (engine->uabi_class != class ||
282 engine->uabi_instance != inst) {
283 pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
284 engine->name,
285 engine->uabi_class,
286 engine->uabi_instance,
287 class, inst);
288 errors++;
289 continue;
290 }
291 }
292 }
293
294 /*
295 * Make sure that classes with multiple engine instances all
296 * share the same basic configuration.
297 */
298 isolation = intel_engines_has_context_isolation(i915);
299 for_each_uabi_engine(engine, i915) {
300 unsigned int bit = BIT(engine->uabi_class);
301 unsigned int expected = engine->default_state ? bit : 0;
302
303 if ((isolation & bit) != expected) {
304 pr_err("mismatching default context state for class %d on engine %s\n",
305 engine->uabi_class, engine->name);
306 errors++;
307 }
308 }
309
310 if (drm_WARN(&i915->drm, errors,
311 "Invalid UABI engine mapping found"))
312 i915->uabi_engines = RB_ROOT;
313 }
314
315 set_scheduler_caps(i915);
316 }
317
intel_engines_has_context_isolation(struct drm_i915_private * i915)318 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
319 {
320 struct intel_engine_cs *engine;
321 unsigned int which;
322
323 which = 0;
324 for_each_uabi_engine(engine, i915)
325 if (engine->default_state)
326 which |= BIT(engine->uabi_class);
327
328 return which;
329 }
330