1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <linux/list.h> 8 #include <linux/list_sort.h> 9 #include <linux/llist.h> 10 11 #include "i915_drv.h" 12 #include "intel_engine.h" 13 #include "intel_engine_user.h" 14 15 struct intel_engine_cs * 16 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) 17 { 18 struct rb_node *p = i915->uabi_engines.rb_node; 19 20 while (p) { 21 struct intel_engine_cs *it = 22 rb_entry(p, typeof(*it), uabi_node); 23 24 if (class < it->uabi_class) 25 p = p->rb_left; 26 else if (class > it->uabi_class || 27 instance > it->uabi_instance) 28 p = p->rb_right; 29 else if (instance < it->uabi_instance) 30 p = p->rb_left; 31 else 32 return it; 33 } 34 35 return NULL; 36 } 37 38 void intel_engine_add_user(struct intel_engine_cs *engine) 39 { 40 llist_add((struct llist_node *)&engine->uabi_node, 41 (struct llist_head *)&engine->i915->uabi_engines); 42 } 43 44 static const u8 uabi_classes[] = { 45 [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER, 46 [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY, 47 [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO, 48 [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE, 49 }; 50 51 static int engine_cmp(void *priv, struct list_head *A, struct list_head *B) 52 { 53 const struct intel_engine_cs *a = 54 container_of((struct rb_node *)A, typeof(*a), uabi_node); 55 const struct intel_engine_cs *b = 56 container_of((struct rb_node *)B, typeof(*b), uabi_node); 57 58 if (uabi_classes[a->class] < uabi_classes[b->class]) 59 return -1; 60 if (uabi_classes[a->class] > uabi_classes[b->class]) 61 return 1; 62 63 if (a->instance < b->instance) 64 return -1; 65 if (a->instance > b->instance) 66 return 1; 67 68 return 0; 69 } 70 71 static struct llist_node *get_engines(struct drm_i915_private *i915) 72 { 73 return llist_del_all((struct llist_head *)&i915->uabi_engines); 74 } 75 76 static void sort_engines(struct drm_i915_private *i915, 77 struct list_head *engines) 78 { 79 struct llist_node *pos, *next; 80 81 llist_for_each_safe(pos, next, get_engines(i915)) { 82 struct intel_engine_cs *engine = 83 container_of((struct rb_node *)pos, typeof(*engine), 84 uabi_node); 85 list_add((struct list_head *)&engine->uabi_node, engines); 86 } 87 list_sort(NULL, engines, engine_cmp); 88 } 89 90 static void set_scheduler_caps(struct drm_i915_private *i915) 91 { 92 static const struct { 93 u8 engine; 94 u8 sched; 95 } map[] = { 96 #define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) } 97 MAP(HAS_PREEMPTION, PREEMPTION), 98 MAP(HAS_SEMAPHORES, SEMAPHORES), 99 MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS), 100 #undef MAP 101 }; 102 struct intel_engine_cs *engine; 103 u32 enabled, disabled; 104 105 enabled = 0; 106 disabled = 0; 107 for_each_uabi_engine(engine, i915) { /* all engines must agree! */ 108 int i; 109 110 if (engine->schedule) 111 enabled |= (I915_SCHEDULER_CAP_ENABLED | 112 I915_SCHEDULER_CAP_PRIORITY); 113 else 114 disabled |= (I915_SCHEDULER_CAP_ENABLED | 115 I915_SCHEDULER_CAP_PRIORITY); 116 117 for (i = 0; i < ARRAY_SIZE(map); i++) { 118 if (engine->flags & BIT(map[i].engine)) 119 enabled |= BIT(map[i].sched); 120 else 121 disabled |= BIT(map[i].sched); 122 } 123 } 124 125 i915->caps.scheduler = enabled & ~disabled; 126 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) 127 i915->caps.scheduler = 0; 128 } 129 130 const char *intel_engine_class_repr(u8 class) 131 { 132 static const char * const uabi_names[] = { 133 [RENDER_CLASS] = "rcs", 134 [COPY_ENGINE_CLASS] = "bcs", 135 [VIDEO_DECODE_CLASS] = "vcs", 136 [VIDEO_ENHANCEMENT_CLASS] = "vecs", 137 }; 138 139 if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class]) 140 return "xxx"; 141 142 return uabi_names[class]; 143 } 144 145 struct legacy_ring { 146 struct intel_gt *gt; 147 u8 class; 148 u8 instance; 149 }; 150 151 static int legacy_ring_idx(const struct legacy_ring *ring) 152 { 153 static const struct { 154 u8 base, max; 155 } map[] = { 156 [RENDER_CLASS] = { RCS0, 1 }, 157 [COPY_ENGINE_CLASS] = { BCS0, 1 }, 158 [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS }, 159 [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS }, 160 }; 161 162 if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map))) 163 return INVALID_ENGINE; 164 165 if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max)) 166 return INVALID_ENGINE; 167 168 return map[ring->class].base + ring->instance; 169 } 170 171 static void add_legacy_ring(struct legacy_ring *ring, 172 struct intel_engine_cs *engine) 173 { 174 if (engine->gt != ring->gt || engine->class != ring->class) { 175 ring->gt = engine->gt; 176 ring->class = engine->class; 177 ring->instance = 0; 178 } 179 180 engine->legacy_idx = legacy_ring_idx(ring); 181 if (engine->legacy_idx != INVALID_ENGINE) 182 ring->instance++; 183 } 184 185 void intel_engines_driver_register(struct drm_i915_private *i915) 186 { 187 struct legacy_ring ring = {}; 188 u8 uabi_instances[4] = {}; 189 struct list_head *it, *next; 190 struct rb_node **p, *prev; 191 LIST_HEAD(engines); 192 193 sort_engines(i915, &engines); 194 195 prev = NULL; 196 p = &i915->uabi_engines.rb_node; 197 list_for_each_safe(it, next, &engines) { 198 struct intel_engine_cs *engine = 199 container_of((struct rb_node *)it, typeof(*engine), 200 uabi_node); 201 char old[sizeof(engine->name)]; 202 203 GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes)); 204 engine->uabi_class = uabi_classes[engine->class]; 205 206 GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances)); 207 engine->uabi_instance = uabi_instances[engine->uabi_class]++; 208 209 /* Replace the internal name with the final user facing name */ 210 memcpy(old, engine->name, sizeof(engine->name)); 211 scnprintf(engine->name, sizeof(engine->name), "%s%u", 212 intel_engine_class_repr(engine->class), 213 engine->uabi_instance); 214 DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name); 215 216 rb_link_node(&engine->uabi_node, prev, p); 217 rb_insert_color(&engine->uabi_node, &i915->uabi_engines); 218 219 GEM_BUG_ON(intel_engine_lookup_user(i915, 220 engine->uabi_class, 221 engine->uabi_instance) != engine); 222 223 /* Fix up the mapping to match default execbuf::user_map[] */ 224 add_legacy_ring(&ring, engine); 225 226 prev = &engine->uabi_node; 227 p = &prev->rb_right; 228 } 229 230 if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) && 231 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { 232 struct intel_engine_cs *engine; 233 unsigned int isolation; 234 int class, inst; 235 int errors = 0; 236 237 for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) { 238 for (inst = 0; inst < uabi_instances[class]; inst++) { 239 engine = intel_engine_lookup_user(i915, 240 class, inst); 241 if (!engine) { 242 pr_err("UABI engine not found for { class:%d, instance:%d }\n", 243 class, inst); 244 errors++; 245 continue; 246 } 247 248 if (engine->uabi_class != class || 249 engine->uabi_instance != inst) { 250 pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n", 251 engine->name, 252 engine->uabi_class, 253 engine->uabi_instance, 254 class, inst); 255 errors++; 256 continue; 257 } 258 } 259 } 260 261 /* 262 * Make sure that classes with multiple engine instances all 263 * share the same basic configuration. 264 */ 265 isolation = intel_engines_has_context_isolation(i915); 266 for_each_uabi_engine(engine, i915) { 267 unsigned int bit = BIT(engine->uabi_class); 268 unsigned int expected = engine->default_state ? bit : 0; 269 270 if ((isolation & bit) != expected) { 271 pr_err("mismatching default context state for class %d on engine %s\n", 272 engine->uabi_class, engine->name); 273 errors++; 274 } 275 } 276 277 if (WARN(errors, "Invalid UABI engine mapping found")) 278 i915->uabi_engines = RB_ROOT; 279 } 280 281 set_scheduler_caps(i915); 282 } 283 284 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915) 285 { 286 struct intel_engine_cs *engine; 287 unsigned int which; 288 289 which = 0; 290 for_each_uabi_engine(engine, i915) 291 if (engine->default_state) 292 which |= BIT(engine->uabi_class); 293 294 return which; 295 } 296