Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include <linux/list.h>
7#include <linux/list_sort.h>
8#include <linux/llist.h>
9
10#include "i915_drv.h"
11#include "intel_engine.h"
12#include "intel_engine_user.h"
13#include "intel_gt.h"
14
15struct intel_engine_cs *
16intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
17{
18 struct rb_node *p = i915->uabi_engines.rb_node;
19
20 while (p) {
21 struct intel_engine_cs *it =
22 rb_entry(p, typeof(*it), uabi_node);
23
24 if (class < it->uabi_class)
25 p = p->rb_left;
26 else if (class > it->uabi_class ||
27 instance > it->uabi_instance)
28 p = p->rb_right;
29 else if (instance < it->uabi_instance)
30 p = p->rb_left;
31 else
32 return it;
33 }
34
35 return NULL;
36}
37
38void intel_engine_add_user(struct intel_engine_cs *engine)
39{
40 llist_add((struct llist_node *)&engine->uabi_node,
41 (struct llist_head *)&engine->i915->uabi_engines);
42}
43
44static const u8 uabi_classes[] = {
45 [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
46 [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
47 [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
48 [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
49};
50
51static int engine_cmp(void *priv, const struct list_head *A,
52 const struct list_head *B)
53{
54 const struct intel_engine_cs *a =
55 container_of((struct rb_node *)A, typeof(*a), uabi_node);
56 const struct intel_engine_cs *b =
57 container_of((struct rb_node *)B, typeof(*b), uabi_node);
58
59 if (uabi_classes[a->class] < uabi_classes[b->class])
60 return -1;
61 if (uabi_classes[a->class] > uabi_classes[b->class])
62 return 1;
63
64 if (a->instance < b->instance)
65 return -1;
66 if (a->instance > b->instance)
67 return 1;
68
69 return 0;
70}
71
72static struct llist_node *get_engines(struct drm_i915_private *i915)
73{
74 return llist_del_all((struct llist_head *)&i915->uabi_engines);
75}
76
77static void sort_engines(struct drm_i915_private *i915,
78 struct list_head *engines)
79{
80 struct llist_node *pos, *next;
81
82 llist_for_each_safe(pos, next, get_engines(i915)) {
83 struct intel_engine_cs *engine =
84 container_of((struct rb_node *)pos, typeof(*engine),
85 uabi_node);
86 list_add((struct list_head *)&engine->uabi_node, engines);
87 }
88 list_sort(NULL, engines, engine_cmp);
89}
90
91static void set_scheduler_caps(struct drm_i915_private *i915)
92{
93 static const struct {
94 u8 engine;
95 u8 sched;
96 } map[] = {
97#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
98 MAP(HAS_PREEMPTION, PREEMPTION),
99 MAP(HAS_SEMAPHORES, SEMAPHORES),
100 MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
101#undef MAP
102 };
103 struct intel_engine_cs *engine;
104 u32 enabled, disabled;
105
106 enabled = 0;
107 disabled = 0;
108 for_each_uabi_engine(engine, i915) { /* all engines must agree! */
109 int i;
110
111 if (engine->schedule)
112 enabled |= (I915_SCHEDULER_CAP_ENABLED |
113 I915_SCHEDULER_CAP_PRIORITY);
114 else
115 disabled |= (I915_SCHEDULER_CAP_ENABLED |
116 I915_SCHEDULER_CAP_PRIORITY);
117
118 for (i = 0; i < ARRAY_SIZE(map); i++) {
119 if (engine->flags & BIT(map[i].engine))
120 enabled |= BIT(map[i].sched);
121 else
122 disabled |= BIT(map[i].sched);
123 }
124 }
125
126 i915->caps.scheduler = enabled & ~disabled;
127 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
128 i915->caps.scheduler = 0;
129}
130
131const char *intel_engine_class_repr(u8 class)
132{
133 static const char * const uabi_names[] = {
134 [RENDER_CLASS] = "rcs",
135 [COPY_ENGINE_CLASS] = "bcs",
136 [VIDEO_DECODE_CLASS] = "vcs",
137 [VIDEO_ENHANCEMENT_CLASS] = "vecs",
138 };
139
140 if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
141 return "xxx";
142
143 return uabi_names[class];
144}
145
146struct legacy_ring {
147 struct intel_gt *gt;
148 u8 class;
149 u8 instance;
150};
151
152static int legacy_ring_idx(const struct legacy_ring *ring)
153{
154 static const struct {
155 u8 base, max;
156 } map[] = {
157 [RENDER_CLASS] = { RCS0, 1 },
158 [COPY_ENGINE_CLASS] = { BCS0, 1 },
159 [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
160 [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
161 };
162
163 if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
164 return INVALID_ENGINE;
165
166 if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
167 return INVALID_ENGINE;
168
169 return map[ring->class].base + ring->instance;
170}
171
172static void add_legacy_ring(struct legacy_ring *ring,
173 struct intel_engine_cs *engine)
174{
175 if (engine->gt != ring->gt || engine->class != ring->class) {
176 ring->gt = engine->gt;
177 ring->class = engine->class;
178 ring->instance = 0;
179 }
180
181 engine->legacy_idx = legacy_ring_idx(ring);
182 if (engine->legacy_idx != INVALID_ENGINE)
183 ring->instance++;
184}
185
186void intel_engines_driver_register(struct drm_i915_private *i915)
187{
188 struct legacy_ring ring = {};
189 u8 uabi_instances[4] = {};
190 struct list_head *it, *next;
191 struct rb_node **p, *prev;
192 LIST_HEAD(engines);
193
194 sort_engines(i915, &engines);
195
196 prev = NULL;
197 p = &i915->uabi_engines.rb_node;
198 list_for_each_safe(it, next, &engines) {
199 struct intel_engine_cs *engine =
200 container_of((struct rb_node *)it, typeof(*engine),
201 uabi_node);
202 char old[sizeof(engine->name)];
203
204 if (intel_gt_has_unrecoverable_error(engine->gt))
205 continue; /* ignore incomplete engines */
206
207 GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
208 engine->uabi_class = uabi_classes[engine->class];
209
210 GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances));
211 engine->uabi_instance = uabi_instances[engine->uabi_class]++;
212
213 /* Replace the internal name with the final user facing name */
214 memcpy(old, engine->name, sizeof(engine->name));
215 scnprintf(engine->name, sizeof(engine->name), "%s%u",
216 intel_engine_class_repr(engine->class),
217 engine->uabi_instance);
218 DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name);
219
220 rb_link_node(&engine->uabi_node, prev, p);
221 rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
222
223 GEM_BUG_ON(intel_engine_lookup_user(i915,
224 engine->uabi_class,
225 engine->uabi_instance) != engine);
226
227 /* Fix up the mapping to match default execbuf::user_map[] */
228 add_legacy_ring(&ring, engine);
229
230 prev = &engine->uabi_node;
231 p = &prev->rb_right;
232 }
233
234 if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
235 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
236 struct intel_engine_cs *engine;
237 unsigned int isolation;
238 int class, inst;
239 int errors = 0;
240
241 for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) {
242 for (inst = 0; inst < uabi_instances[class]; inst++) {
243 engine = intel_engine_lookup_user(i915,
244 class, inst);
245 if (!engine) {
246 pr_err("UABI engine not found for { class:%d, instance:%d }\n",
247 class, inst);
248 errors++;
249 continue;
250 }
251
252 if (engine->uabi_class != class ||
253 engine->uabi_instance != inst) {
254 pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
255 engine->name,
256 engine->uabi_class,
257 engine->uabi_instance,
258 class, inst);
259 errors++;
260 continue;
261 }
262 }
263 }
264
265 /*
266 * Make sure that classes with multiple engine instances all
267 * share the same basic configuration.
268 */
269 isolation = intel_engines_has_context_isolation(i915);
270 for_each_uabi_engine(engine, i915) {
271 unsigned int bit = BIT(engine->uabi_class);
272 unsigned int expected = engine->default_state ? bit : 0;
273
274 if ((isolation & bit) != expected) {
275 pr_err("mismatching default context state for class %d on engine %s\n",
276 engine->uabi_class, engine->name);
277 errors++;
278 }
279 }
280
281 if (drm_WARN(&i915->drm, errors,
282 "Invalid UABI engine mapping found"))
283 i915->uabi_engines = RB_ROOT;
284 }
285
286 set_scheduler_caps(i915);
287}
288
289unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
290{
291 struct intel_engine_cs *engine;
292 unsigned int which;
293
294 which = 0;
295 for_each_uabi_engine(engine, i915)
296 if (engine->default_state)
297 which |= BIT(engine->uabi_class);
298
299 return which;
300}
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include <linux/list.h>
7#include <linux/list_sort.h>
8#include <linux/llist.h>
9
10#include "i915_drv.h"
11#include "intel_engine.h"
12#include "intel_engine_user.h"
13#include "intel_gt.h"
14#include "uc/intel_guc_submission.h"
15
16struct intel_engine_cs *
17intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
18{
19 struct rb_node *p = i915->uabi_engines.rb_node;
20
21 while (p) {
22 struct intel_engine_cs *it =
23 rb_entry(p, typeof(*it), uabi_node);
24
25 if (class < it->uabi_class)
26 p = p->rb_left;
27 else if (class > it->uabi_class ||
28 instance > it->uabi_instance)
29 p = p->rb_right;
30 else if (instance < it->uabi_instance)
31 p = p->rb_left;
32 else
33 return it;
34 }
35
36 return NULL;
37}
38
39void intel_engine_add_user(struct intel_engine_cs *engine)
40{
41 llist_add((struct llist_node *)&engine->uabi_node,
42 (struct llist_head *)&engine->i915->uabi_engines);
43}
44
45static const u8 uabi_classes[] = {
46 [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
47 [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
48 [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
49 [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
50 [COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
51};
52
53static int engine_cmp(void *priv, const struct list_head *A,
54 const struct list_head *B)
55{
56 const struct intel_engine_cs *a =
57 container_of((struct rb_node *)A, typeof(*a), uabi_node);
58 const struct intel_engine_cs *b =
59 container_of((struct rb_node *)B, typeof(*b), uabi_node);
60
61 if (uabi_classes[a->class] < uabi_classes[b->class])
62 return -1;
63 if (uabi_classes[a->class] > uabi_classes[b->class])
64 return 1;
65
66 if (a->instance < b->instance)
67 return -1;
68 if (a->instance > b->instance)
69 return 1;
70
71 return 0;
72}
73
74static struct llist_node *get_engines(struct drm_i915_private *i915)
75{
76 return llist_del_all((struct llist_head *)&i915->uabi_engines);
77}
78
79static void sort_engines(struct drm_i915_private *i915,
80 struct list_head *engines)
81{
82 struct llist_node *pos, *next;
83
84 llist_for_each_safe(pos, next, get_engines(i915)) {
85 struct intel_engine_cs *engine =
86 container_of((struct rb_node *)pos, typeof(*engine),
87 uabi_node);
88 list_add((struct list_head *)&engine->uabi_node, engines);
89 }
90 list_sort(NULL, engines, engine_cmp);
91}
92
93static void set_scheduler_caps(struct drm_i915_private *i915)
94{
95 static const struct {
96 u8 engine;
97 u8 sched;
98 } map[] = {
99#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
100 MAP(HAS_PREEMPTION, PREEMPTION),
101 MAP(HAS_SEMAPHORES, SEMAPHORES),
102 MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
103#undef MAP
104 };
105 struct intel_engine_cs *engine;
106 u32 enabled, disabled;
107
108 enabled = 0;
109 disabled = 0;
110 for_each_uabi_engine(engine, i915) { /* all engines must agree! */
111 int i;
112
113 if (engine->sched_engine->schedule)
114 enabled |= (I915_SCHEDULER_CAP_ENABLED |
115 I915_SCHEDULER_CAP_PRIORITY);
116 else
117 disabled |= (I915_SCHEDULER_CAP_ENABLED |
118 I915_SCHEDULER_CAP_PRIORITY);
119
120 if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
121 enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
122
123 for (i = 0; i < ARRAY_SIZE(map); i++) {
124 if (engine->flags & BIT(map[i].engine))
125 enabled |= BIT(map[i].sched);
126 else
127 disabled |= BIT(map[i].sched);
128 }
129 }
130
131 i915->caps.scheduler = enabled & ~disabled;
132 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
133 i915->caps.scheduler = 0;
134}
135
136const char *intel_engine_class_repr(u8 class)
137{
138 static const char * const uabi_names[] = {
139 [RENDER_CLASS] = "rcs",
140 [COPY_ENGINE_CLASS] = "bcs",
141 [VIDEO_DECODE_CLASS] = "vcs",
142 [VIDEO_ENHANCEMENT_CLASS] = "vecs",
143 [OTHER_CLASS] = "other",
144 [COMPUTE_CLASS] = "ccs",
145 };
146
147 if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
148 return "xxx";
149
150 return uabi_names[class];
151}
152
153struct legacy_ring {
154 struct intel_gt *gt;
155 u8 class;
156 u8 instance;
157};
158
159static int legacy_ring_idx(const struct legacy_ring *ring)
160{
161 static const struct {
162 u8 base, max;
163 } map[] = {
164 [RENDER_CLASS] = { RCS0, 1 },
165 [COPY_ENGINE_CLASS] = { BCS0, 1 },
166 [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
167 [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
168 [COMPUTE_CLASS] = { CCS0, I915_MAX_CCS },
169 };
170
171 if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
172 return INVALID_ENGINE;
173
174 if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
175 return INVALID_ENGINE;
176
177 return map[ring->class].base + ring->instance;
178}
179
180static void add_legacy_ring(struct legacy_ring *ring,
181 struct intel_engine_cs *engine)
182{
183 if (engine->gt != ring->gt || engine->class != ring->class) {
184 ring->gt = engine->gt;
185 ring->class = engine->class;
186 ring->instance = 0;
187 }
188
189 engine->legacy_idx = legacy_ring_idx(ring);
190 if (engine->legacy_idx != INVALID_ENGINE)
191 ring->instance++;
192}
193
194static void engine_rename(struct intel_engine_cs *engine, const char *name, u16 instance)
195{
196 char old[sizeof(engine->name)];
197
198 memcpy(old, engine->name, sizeof(engine->name));
199 scnprintf(engine->name, sizeof(engine->name), "%s%u", name, instance);
200 drm_dbg(&engine->i915->drm, "renamed %s to %s\n", old, engine->name);
201}
202
203void intel_engines_driver_register(struct drm_i915_private *i915)
204{
205 struct legacy_ring ring = {};
206 struct list_head *it, *next;
207 struct rb_node **p, *prev;
208 LIST_HEAD(engines);
209
210 sort_engines(i915, &engines);
211
212 prev = NULL;
213 p = &i915->uabi_engines.rb_node;
214 list_for_each_safe(it, next, &engines) {
215 struct intel_engine_cs *engine =
216 container_of((struct rb_node *)it, typeof(*engine),
217 uabi_node);
218
219 if (intel_gt_has_unrecoverable_error(engine->gt))
220 continue; /* ignore incomplete engines */
221
222 /*
223 * We don't want to expose the GSC engine to the users, but we
224 * still rename it so it is easier to identify in the debug logs
225 */
226 if (engine->id == GSC0) {
227 engine_rename(engine, "gsc", 0);
228 continue;
229 }
230
231 GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
232 engine->uabi_class = uabi_classes[engine->class];
233
234 GEM_BUG_ON(engine->uabi_class >=
235 ARRAY_SIZE(i915->engine_uabi_class_count));
236 engine->uabi_instance =
237 i915->engine_uabi_class_count[engine->uabi_class]++;
238
239 /* Replace the internal name with the final user facing name */
240 engine_rename(engine,
241 intel_engine_class_repr(engine->class),
242 engine->uabi_instance);
243
244 rb_link_node(&engine->uabi_node, prev, p);
245 rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
246
247 GEM_BUG_ON(intel_engine_lookup_user(i915,
248 engine->uabi_class,
249 engine->uabi_instance) != engine);
250
251 /* Fix up the mapping to match default execbuf::user_map[] */
252 add_legacy_ring(&ring, engine);
253
254 prev = &engine->uabi_node;
255 p = &prev->rb_right;
256 }
257
258 if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
259 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
260 struct intel_engine_cs *engine;
261 unsigned int isolation;
262 int class, inst;
263 int errors = 0;
264
265 for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) {
266 for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) {
267 engine = intel_engine_lookup_user(i915,
268 class, inst);
269 if (!engine) {
270 pr_err("UABI engine not found for { class:%d, instance:%d }\n",
271 class, inst);
272 errors++;
273 continue;
274 }
275
276 if (engine->uabi_class != class ||
277 engine->uabi_instance != inst) {
278 pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
279 engine->name,
280 engine->uabi_class,
281 engine->uabi_instance,
282 class, inst);
283 errors++;
284 continue;
285 }
286 }
287 }
288
289 /*
290 * Make sure that classes with multiple engine instances all
291 * share the same basic configuration.
292 */
293 isolation = intel_engines_has_context_isolation(i915);
294 for_each_uabi_engine(engine, i915) {
295 unsigned int bit = BIT(engine->uabi_class);
296 unsigned int expected = engine->default_state ? bit : 0;
297
298 if ((isolation & bit) != expected) {
299 pr_err("mismatching default context state for class %d on engine %s\n",
300 engine->uabi_class, engine->name);
301 errors++;
302 }
303 }
304
305 if (drm_WARN(&i915->drm, errors,
306 "Invalid UABI engine mapping found"))
307 i915->uabi_engines = RB_ROOT;
308 }
309
310 set_scheduler_caps(i915);
311}
312
313unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
314{
315 struct intel_engine_cs *engine;
316 unsigned int which;
317
318 which = 0;
319 for_each_uabi_engine(engine, i915)
320 if (engine->default_state)
321 which |= BIT(engine->uabi_class);
322
323 return which;
324}