Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include <linux/kobject.h>
7#include <linux/sysfs.h>
8
9#include "i915_drv.h"
10#include "intel_engine.h"
11#include "intel_engine_heartbeat.h"
12#include "sysfs_engines.h"
13
14struct kobj_engine {
15 struct kobject base;
16 struct intel_engine_cs *engine;
17};
18
19static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
20{
21 return container_of(kobj, struct kobj_engine, base)->engine;
22}
23
24static ssize_t
25name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
26{
27 return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
28}
29
30static struct kobj_attribute name_attr =
31__ATTR(name, 0444, name_show, NULL);
32
33static ssize_t
34class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
35{
36 return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
37}
38
39static struct kobj_attribute class_attr =
40__ATTR(class, 0444, class_show, NULL);
41
42static ssize_t
43inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
44{
45 return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
46}
47
48static struct kobj_attribute inst_attr =
49__ATTR(instance, 0444, inst_show, NULL);
50
51static ssize_t
52mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
53{
54 return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
55}
56
57static struct kobj_attribute mmio_attr =
58__ATTR(mmio_base, 0444, mmio_show, NULL);
59
60static const char * const vcs_caps[] = {
61 [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
62 [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
63};
64
65static const char * const vecs_caps[] = {
66 [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
67};
68
69static ssize_t repr_trim(char *buf, ssize_t len)
70{
71 /* Trim off the trailing space and replace with a newline */
72 if (len > PAGE_SIZE)
73 len = PAGE_SIZE;
74 if (len > 0)
75 buf[len - 1] = '\n';
76
77 return len;
78}
79
80static ssize_t
81__caps_show(struct intel_engine_cs *engine,
82 unsigned long caps, char *buf, bool show_unknown)
83{
84 const char * const *repr;
85 int count, n;
86 ssize_t len;
87
88 switch (engine->class) {
89 case VIDEO_DECODE_CLASS:
90 repr = vcs_caps;
91 count = ARRAY_SIZE(vcs_caps);
92 break;
93
94 case VIDEO_ENHANCEMENT_CLASS:
95 repr = vecs_caps;
96 count = ARRAY_SIZE(vecs_caps);
97 break;
98
99 default:
100 repr = NULL;
101 count = 0;
102 break;
103 }
104 GEM_BUG_ON(count > BITS_PER_LONG);
105
106 len = 0;
107 for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
108 if (n >= count || !repr[n]) {
109 if (GEM_WARN_ON(show_unknown))
110 len += snprintf(buf + len, PAGE_SIZE - len,
111 "[%x] ", n);
112 } else {
113 len += snprintf(buf + len, PAGE_SIZE - len,
114 "%s ", repr[n]);
115 }
116 if (GEM_WARN_ON(len >= PAGE_SIZE))
117 break;
118 }
119 return repr_trim(buf, len);
120}
121
122static ssize_t
123caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
124{
125 struct intel_engine_cs *engine = kobj_to_engine(kobj);
126
127 return __caps_show(engine, engine->uabi_capabilities, buf, true);
128}
129
130static struct kobj_attribute caps_attr =
131__ATTR(capabilities, 0444, caps_show, NULL);
132
133static ssize_t
134all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
135{
136 return __caps_show(kobj_to_engine(kobj), -1, buf, false);
137}
138
139static struct kobj_attribute all_caps_attr =
140__ATTR(known_capabilities, 0444, all_caps_show, NULL);
141
142static ssize_t
143max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
144 const char *buf, size_t count)
145{
146 struct intel_engine_cs *engine = kobj_to_engine(kobj);
147 unsigned long long duration, clamped;
148 int err;
149
150 /*
151 * When waiting for a request, if is it currently being executed
152 * on the GPU, we busywait for a short while before sleeping. The
153 * premise is that most requests are short, and if it is already
154 * executing then there is a good chance that it will complete
155 * before we can setup the interrupt handler and go to sleep.
156 * We try to offset the cost of going to sleep, by first spinning
157 * on the request -- if it completed in less time than it would take
158 * to go sleep, process the interrupt and return back to the client,
159 * then we have saved the client some latency, albeit at the cost
160 * of spinning on an expensive CPU core.
161 *
162 * While we try to avoid waiting at all for a request that is unlikely
163 * to complete, deciding how long it is worth spinning is for is an
164 * arbitrary decision: trading off power vs latency.
165 */
166
167 err = kstrtoull(buf, 0, &duration);
168 if (err)
169 return err;
170
171 clamped = intel_clamp_max_busywait_duration_ns(engine, duration);
172 if (duration != clamped)
173 return -EINVAL;
174
175 WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
176
177 return count;
178}
179
180static ssize_t
181max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
182{
183 struct intel_engine_cs *engine = kobj_to_engine(kobj);
184
185 return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
186}
187
188static struct kobj_attribute max_spin_attr =
189__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
190
191static ssize_t
192max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
193{
194 struct intel_engine_cs *engine = kobj_to_engine(kobj);
195
196 return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
197}
198
199static struct kobj_attribute max_spin_def =
200__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
201
202static ssize_t
203timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
204 const char *buf, size_t count)
205{
206 struct intel_engine_cs *engine = kobj_to_engine(kobj);
207 unsigned long long duration, clamped;
208 int err;
209
210 /*
211 * Execlists uses a scheduling quantum (a timeslice) to alternate
212 * execution between ready-to-run contexts of equal priority. This
213 * ensures that all users (though only if they of equal importance)
214 * have the opportunity to run and prevents livelocks where contexts
215 * may have implicit ordering due to userspace semaphores.
216 */
217
218 err = kstrtoull(buf, 0, &duration);
219 if (err)
220 return err;
221
222 clamped = intel_clamp_timeslice_duration_ms(engine, duration);
223 if (duration != clamped)
224 return -EINVAL;
225
226 WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
227
228 if (execlists_active(&engine->execlists))
229 set_timer_ms(&engine->execlists.timer, duration);
230
231 return count;
232}
233
234static ssize_t
235timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
236{
237 struct intel_engine_cs *engine = kobj_to_engine(kobj);
238
239 return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
240}
241
242static struct kobj_attribute timeslice_duration_attr =
243__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
244
245static ssize_t
246timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
247{
248 struct intel_engine_cs *engine = kobj_to_engine(kobj);
249
250 return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
251}
252
253static struct kobj_attribute timeslice_duration_def =
254__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
255
256static ssize_t
257stop_store(struct kobject *kobj, struct kobj_attribute *attr,
258 const char *buf, size_t count)
259{
260 struct intel_engine_cs *engine = kobj_to_engine(kobj);
261 unsigned long long duration, clamped;
262 int err;
263
264 /*
265 * When we allow ourselves to sleep before a GPU reset after disabling
266 * submission, even for a few milliseconds, gives an innocent context
267 * the opportunity to clear the GPU before the reset occurs. However,
268 * how long to sleep depends on the typical non-preemptible duration
269 * (a similar problem to determining the ideal preempt-reset timeout
270 * or even the heartbeat interval).
271 */
272
273 err = kstrtoull(buf, 0, &duration);
274 if (err)
275 return err;
276
277 clamped = intel_clamp_stop_timeout_ms(engine, duration);
278 if (duration != clamped)
279 return -EINVAL;
280
281 WRITE_ONCE(engine->props.stop_timeout_ms, duration);
282 return count;
283}
284
285static ssize_t
286stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
287{
288 struct intel_engine_cs *engine = kobj_to_engine(kobj);
289
290 return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
291}
292
293static struct kobj_attribute stop_timeout_attr =
294__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
295
296static ssize_t
297stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
298{
299 struct intel_engine_cs *engine = kobj_to_engine(kobj);
300
301 return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
302}
303
304static struct kobj_attribute stop_timeout_def =
305__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
306
307static ssize_t
308preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
309 const char *buf, size_t count)
310{
311 struct intel_engine_cs *engine = kobj_to_engine(kobj);
312 unsigned long long timeout, clamped;
313 int err;
314
315 /*
316 * After initialising a preemption request, we give the current
317 * resident a small amount of time to vacate the GPU. The preemption
318 * request is for a higher priority context and should be immediate to
319 * maintain high quality of service (and avoid priority inversion).
320 * However, the preemption granularity of the GPU can be quite coarse
321 * and so we need a compromise.
322 */
323
324 err = kstrtoull(buf, 0, &timeout);
325 if (err)
326 return err;
327
328 clamped = intel_clamp_preempt_timeout_ms(engine, timeout);
329 if (timeout != clamped)
330 return -EINVAL;
331
332 WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
333
334 if (READ_ONCE(engine->execlists.pending[0]))
335 set_timer_ms(&engine->execlists.preempt, timeout);
336
337 return count;
338}
339
340static ssize_t
341preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
342 char *buf)
343{
344 struct intel_engine_cs *engine = kobj_to_engine(kobj);
345
346 return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
347}
348
349static struct kobj_attribute preempt_timeout_attr =
350__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
351
352static ssize_t
353preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
354 char *buf)
355{
356 struct intel_engine_cs *engine = kobj_to_engine(kobj);
357
358 return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
359}
360
361static struct kobj_attribute preempt_timeout_def =
362__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
363
364static ssize_t
365heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
366 const char *buf, size_t count)
367{
368 struct intel_engine_cs *engine = kobj_to_engine(kobj);
369 unsigned long long delay, clamped;
370 int err;
371
372 /*
373 * We monitor the health of the system via periodic heartbeat pulses.
374 * The pulses also provide the opportunity to perform garbage
375 * collection. However, we interpret an incomplete pulse (a missed
376 * heartbeat) as an indication that the system is no longer responsive,
377 * i.e. hung, and perform an engine or full GPU reset. Given that the
378 * preemption granularity can be very coarse on a system, the optimal
379 * value for any workload is unknowable!
380 */
381
382 err = kstrtoull(buf, 0, &delay);
383 if (err)
384 return err;
385
386 clamped = intel_clamp_heartbeat_interval_ms(engine, delay);
387 if (delay != clamped)
388 return -EINVAL;
389
390 err = intel_engine_set_heartbeat(engine, delay);
391 if (err)
392 return err;
393
394 return count;
395}
396
397static ssize_t
398heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
399{
400 struct intel_engine_cs *engine = kobj_to_engine(kobj);
401
402 return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
403}
404
405static struct kobj_attribute heartbeat_interval_attr =
406__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
407
408static ssize_t
409heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
410{
411 struct intel_engine_cs *engine = kobj_to_engine(kobj);
412
413 return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
414}
415
416static struct kobj_attribute heartbeat_interval_def =
417__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
418
419static void kobj_engine_release(struct kobject *kobj)
420{
421 kfree(kobj);
422}
423
424static struct kobj_type kobj_engine_type = {
425 .release = kobj_engine_release,
426 .sysfs_ops = &kobj_sysfs_ops
427};
428
429static struct kobject *
430kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
431{
432 struct kobj_engine *ke;
433
434 ke = kzalloc(sizeof(*ke), GFP_KERNEL);
435 if (!ke)
436 return NULL;
437
438 kobject_init(&ke->base, &kobj_engine_type);
439 ke->engine = engine;
440
441 if (kobject_add(&ke->base, dir, "%s", engine->name)) {
442 kobject_put(&ke->base);
443 return NULL;
444 }
445
446 /* xfer ownership to sysfs tree */
447 return &ke->base;
448}
449
450static void add_defaults(struct kobj_engine *parent)
451{
452 static const struct attribute *files[] = {
453 &max_spin_def.attr,
454 &stop_timeout_def.attr,
455#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
456 &heartbeat_interval_def.attr,
457#endif
458 NULL
459 };
460 struct kobj_engine *ke;
461
462 ke = kzalloc(sizeof(*ke), GFP_KERNEL);
463 if (!ke)
464 return;
465
466 kobject_init(&ke->base, &kobj_engine_type);
467 ke->engine = parent->engine;
468
469 if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
470 kobject_put(&ke->base);
471 return;
472 }
473
474 if (sysfs_create_files(&ke->base, files))
475 return;
476
477 if (intel_engine_has_timeslices(ke->engine) &&
478 sysfs_create_file(&ke->base, ×lice_duration_def.attr))
479 return;
480
481 if (intel_engine_has_preempt_reset(ke->engine) &&
482 sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
483 return;
484}
485
486void intel_engines_add_sysfs(struct drm_i915_private *i915)
487{
488 static const struct attribute *files[] = {
489 &name_attr.attr,
490 &class_attr.attr,
491 &inst_attr.attr,
492 &mmio_attr.attr,
493 &caps_attr.attr,
494 &all_caps_attr.attr,
495 &max_spin_attr.attr,
496 &stop_timeout_attr.attr,
497#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
498 &heartbeat_interval_attr.attr,
499#endif
500 NULL
501 };
502
503 struct device *kdev = i915->drm.primary->kdev;
504 struct intel_engine_cs *engine;
505 struct kobject *dir;
506
507 dir = kobject_create_and_add("engine", &kdev->kobj);
508 if (!dir)
509 return;
510
511 for_each_uabi_engine(engine, i915) {
512 struct kobject *kobj;
513
514 kobj = kobj_engine(dir, engine);
515 if (!kobj)
516 goto err_engine;
517
518 if (sysfs_create_files(kobj, files))
519 goto err_object;
520
521 if (intel_engine_has_timeslices(engine) &&
522 sysfs_create_file(kobj, ×lice_duration_attr.attr))
523 goto err_engine;
524
525 if (intel_engine_has_preempt_reset(engine) &&
526 sysfs_create_file(kobj, &preempt_timeout_attr.attr))
527 goto err_engine;
528
529 add_defaults(container_of(kobj, struct kobj_engine, base));
530
531 if (0) {
532err_object:
533 kobject_put(kobj);
534err_engine:
535 dev_err(kdev, "Failed to add sysfs engine '%s'\n",
536 engine->name);
537 break;
538 }
539 }
540}
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include <linux/kobject.h>
7#include <linux/sysfs.h>
8
9#include "i915_drv.h"
10#include "intel_engine.h"
11#include "intel_engine_heartbeat.h"
12#include "sysfs_engines.h"
13
14struct kobj_engine {
15 struct kobject base;
16 struct intel_engine_cs *engine;
17};
18
19static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
20{
21 return container_of(kobj, struct kobj_engine, base)->engine;
22}
23
24static ssize_t
25name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
26{
27 return sysfs_emit(buf, "%s\n", kobj_to_engine(kobj)->name);
28}
29
30static const struct kobj_attribute name_attr =
31__ATTR(name, 0444, name_show, NULL);
32
33static ssize_t
34class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
35{
36 return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
37}
38
39static const struct kobj_attribute class_attr =
40__ATTR(class, 0444, class_show, NULL);
41
42static ssize_t
43inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
44{
45 return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
46}
47
48static const struct kobj_attribute inst_attr =
49__ATTR(instance, 0444, inst_show, NULL);
50
51static ssize_t
52mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
53{
54 return sysfs_emit(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
55}
56
57static const struct kobj_attribute mmio_attr =
58__ATTR(mmio_base, 0444, mmio_show, NULL);
59
60static const char * const vcs_caps[] = {
61 [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
62 [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
63};
64
65static const char * const vecs_caps[] = {
66 [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
67};
68
69static ssize_t repr_trim(char *buf, ssize_t len)
70{
71 /* Trim off the trailing space and replace with a newline */
72 if (len > PAGE_SIZE)
73 len = PAGE_SIZE;
74 if (len > 0)
75 buf[len - 1] = '\n';
76
77 return len;
78}
79
80static ssize_t
81__caps_show(struct intel_engine_cs *engine,
82 unsigned long caps, char *buf, bool show_unknown)
83{
84 const char * const *repr;
85 int count, n;
86 ssize_t len;
87
88 switch (engine->class) {
89 case VIDEO_DECODE_CLASS:
90 repr = vcs_caps;
91 count = ARRAY_SIZE(vcs_caps);
92 break;
93
94 case VIDEO_ENHANCEMENT_CLASS:
95 repr = vecs_caps;
96 count = ARRAY_SIZE(vecs_caps);
97 break;
98
99 default:
100 repr = NULL;
101 count = 0;
102 break;
103 }
104 GEM_BUG_ON(count > BITS_PER_LONG);
105
106 len = 0;
107 for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
108 if (n >= count || !repr[n]) {
109 if (GEM_WARN_ON(show_unknown))
110 len += sysfs_emit_at(buf, len, "[%x] ", n);
111 } else {
112 len += sysfs_emit_at(buf, len, "%s ", repr[n]);
113 }
114 if (GEM_WARN_ON(len >= PAGE_SIZE))
115 break;
116 }
117 return repr_trim(buf, len);
118}
119
120static ssize_t
121caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
122{
123 struct intel_engine_cs *engine = kobj_to_engine(kobj);
124
125 return __caps_show(engine, engine->uabi_capabilities, buf, true);
126}
127
128static const struct kobj_attribute caps_attr =
129__ATTR(capabilities, 0444, caps_show, NULL);
130
131static ssize_t
132all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
133{
134 return __caps_show(kobj_to_engine(kobj), -1, buf, false);
135}
136
137static const struct kobj_attribute all_caps_attr =
138__ATTR(known_capabilities, 0444, all_caps_show, NULL);
139
140static ssize_t
141max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
142 const char *buf, size_t count)
143{
144 struct intel_engine_cs *engine = kobj_to_engine(kobj);
145 unsigned long long duration, clamped;
146 int err;
147
148 /*
149 * When waiting for a request, if is it currently being executed
150 * on the GPU, we busywait for a short while before sleeping. The
151 * premise is that most requests are short, and if it is already
152 * executing then there is a good chance that it will complete
153 * before we can setup the interrupt handler and go to sleep.
154 * We try to offset the cost of going to sleep, by first spinning
155 * on the request -- if it completed in less time than it would take
156 * to go sleep, process the interrupt and return back to the client,
157 * then we have saved the client some latency, albeit at the cost
158 * of spinning on an expensive CPU core.
159 *
160 * While we try to avoid waiting at all for a request that is unlikely
161 * to complete, deciding how long it is worth spinning is for is an
162 * arbitrary decision: trading off power vs latency.
163 */
164
165 err = kstrtoull(buf, 0, &duration);
166 if (err)
167 return err;
168
169 clamped = intel_clamp_max_busywait_duration_ns(engine, duration);
170 if (duration != clamped)
171 return -EINVAL;
172
173 WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
174
175 return count;
176}
177
178static ssize_t
179max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
180{
181 struct intel_engine_cs *engine = kobj_to_engine(kobj);
182
183 return sysfs_emit(buf, "%lu\n", engine->props.max_busywait_duration_ns);
184}
185
186static const struct kobj_attribute max_spin_attr =
187__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
188
189static ssize_t
190max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
191{
192 struct intel_engine_cs *engine = kobj_to_engine(kobj);
193
194 return sysfs_emit(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
195}
196
197static const struct kobj_attribute max_spin_def =
198__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
199
200static ssize_t
201timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
202 const char *buf, size_t count)
203{
204 struct intel_engine_cs *engine = kobj_to_engine(kobj);
205 unsigned long long duration, clamped;
206 int err;
207
208 /*
209 * Execlists uses a scheduling quantum (a timeslice) to alternate
210 * execution between ready-to-run contexts of equal priority. This
211 * ensures that all users (though only if they of equal importance)
212 * have the opportunity to run and prevents livelocks where contexts
213 * may have implicit ordering due to userspace semaphores.
214 */
215
216 err = kstrtoull(buf, 0, &duration);
217 if (err)
218 return err;
219
220 clamped = intel_clamp_timeslice_duration_ms(engine, duration);
221 if (duration != clamped)
222 return -EINVAL;
223
224 WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
225
226 if (execlists_active(&engine->execlists))
227 set_timer_ms(&engine->execlists.timer, duration);
228
229 return count;
230}
231
232static ssize_t
233timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
234{
235 struct intel_engine_cs *engine = kobj_to_engine(kobj);
236
237 return sysfs_emit(buf, "%lu\n", engine->props.timeslice_duration_ms);
238}
239
240static const struct kobj_attribute timeslice_duration_attr =
241__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
242
243static ssize_t
244timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
245{
246 struct intel_engine_cs *engine = kobj_to_engine(kobj);
247
248 return sysfs_emit(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
249}
250
251static const struct kobj_attribute timeslice_duration_def =
252__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
253
254static ssize_t
255stop_store(struct kobject *kobj, struct kobj_attribute *attr,
256 const char *buf, size_t count)
257{
258 struct intel_engine_cs *engine = kobj_to_engine(kobj);
259 unsigned long long duration, clamped;
260 int err;
261
262 /*
263 * When we allow ourselves to sleep before a GPU reset after disabling
264 * submission, even for a few milliseconds, gives an innocent context
265 * the opportunity to clear the GPU before the reset occurs. However,
266 * how long to sleep depends on the typical non-preemptible duration
267 * (a similar problem to determining the ideal preempt-reset timeout
268 * or even the heartbeat interval).
269 */
270
271 err = kstrtoull(buf, 0, &duration);
272 if (err)
273 return err;
274
275 clamped = intel_clamp_stop_timeout_ms(engine, duration);
276 if (duration != clamped)
277 return -EINVAL;
278
279 WRITE_ONCE(engine->props.stop_timeout_ms, duration);
280 return count;
281}
282
283static ssize_t
284stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
285{
286 struct intel_engine_cs *engine = kobj_to_engine(kobj);
287
288 return sysfs_emit(buf, "%lu\n", engine->props.stop_timeout_ms);
289}
290
291static const struct kobj_attribute stop_timeout_attr =
292__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
293
294static ssize_t
295stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
296{
297 struct intel_engine_cs *engine = kobj_to_engine(kobj);
298
299 return sysfs_emit(buf, "%lu\n", engine->defaults.stop_timeout_ms);
300}
301
302static const struct kobj_attribute stop_timeout_def =
303__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
304
305static ssize_t
306preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
307 const char *buf, size_t count)
308{
309 struct intel_engine_cs *engine = kobj_to_engine(kobj);
310 unsigned long long timeout, clamped;
311 int err;
312
313 /*
314 * After initialising a preemption request, we give the current
315 * resident a small amount of time to vacate the GPU. The preemption
316 * request is for a higher priority context and should be immediate to
317 * maintain high quality of service (and avoid priority inversion).
318 * However, the preemption granularity of the GPU can be quite coarse
319 * and so we need a compromise.
320 */
321
322 err = kstrtoull(buf, 0, &timeout);
323 if (err)
324 return err;
325
326 clamped = intel_clamp_preempt_timeout_ms(engine, timeout);
327 if (timeout != clamped)
328 return -EINVAL;
329
330 WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
331
332 if (READ_ONCE(engine->execlists.pending[0]))
333 set_timer_ms(&engine->execlists.preempt, timeout);
334
335 return count;
336}
337
338static ssize_t
339preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
340 char *buf)
341{
342 struct intel_engine_cs *engine = kobj_to_engine(kobj);
343
344 return sysfs_emit(buf, "%lu\n", engine->props.preempt_timeout_ms);
345}
346
347static const struct kobj_attribute preempt_timeout_attr =
348__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
349
350static ssize_t
351preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
352 char *buf)
353{
354 struct intel_engine_cs *engine = kobj_to_engine(kobj);
355
356 return sysfs_emit(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
357}
358
359static const struct kobj_attribute preempt_timeout_def =
360__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
361
362static ssize_t
363heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
364 const char *buf, size_t count)
365{
366 struct intel_engine_cs *engine = kobj_to_engine(kobj);
367 unsigned long long delay, clamped;
368 int err;
369
370 /*
371 * We monitor the health of the system via periodic heartbeat pulses.
372 * The pulses also provide the opportunity to perform garbage
373 * collection. However, we interpret an incomplete pulse (a missed
374 * heartbeat) as an indication that the system is no longer responsive,
375 * i.e. hung, and perform an engine or full GPU reset. Given that the
376 * preemption granularity can be very coarse on a system, the optimal
377 * value for any workload is unknowable!
378 */
379
380 err = kstrtoull(buf, 0, &delay);
381 if (err)
382 return err;
383
384 clamped = intel_clamp_heartbeat_interval_ms(engine, delay);
385 if (delay != clamped)
386 return -EINVAL;
387
388 err = intel_engine_set_heartbeat(engine, delay);
389 if (err)
390 return err;
391
392 return count;
393}
394
395static ssize_t
396heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
397{
398 struct intel_engine_cs *engine = kobj_to_engine(kobj);
399
400 return sysfs_emit(buf, "%lu\n", engine->props.heartbeat_interval_ms);
401}
402
403static const struct kobj_attribute heartbeat_interval_attr =
404__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
405
406static ssize_t
407heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
408{
409 struct intel_engine_cs *engine = kobj_to_engine(kobj);
410
411 return sysfs_emit(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
412}
413
414static const struct kobj_attribute heartbeat_interval_def =
415__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
416
417static void kobj_engine_release(struct kobject *kobj)
418{
419 kfree(kobj);
420}
421
422static const struct kobj_type kobj_engine_type = {
423 .release = kobj_engine_release,
424 .sysfs_ops = &kobj_sysfs_ops
425};
426
427static struct kobject *
428kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
429{
430 struct kobj_engine *ke;
431
432 ke = kzalloc(sizeof(*ke), GFP_KERNEL);
433 if (!ke)
434 return NULL;
435
436 kobject_init(&ke->base, &kobj_engine_type);
437 ke->engine = engine;
438
439 if (kobject_add(&ke->base, dir, "%s", engine->name)) {
440 kobject_put(&ke->base);
441 return NULL;
442 }
443
444 /* xfer ownership to sysfs tree */
445 return &ke->base;
446}
447
448static void add_defaults(struct kobj_engine *parent)
449{
450 static const struct attribute * const files[] = {
451 &max_spin_def.attr,
452 &stop_timeout_def.attr,
453#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
454 &heartbeat_interval_def.attr,
455#endif
456 NULL
457 };
458 struct kobj_engine *ke;
459
460 ke = kzalloc(sizeof(*ke), GFP_KERNEL);
461 if (!ke)
462 return;
463
464 kobject_init(&ke->base, &kobj_engine_type);
465 ke->engine = parent->engine;
466
467 if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
468 kobject_put(&ke->base);
469 return;
470 }
471
472 if (sysfs_create_files(&ke->base, files))
473 return;
474
475 if (intel_engine_has_timeslices(ke->engine) &&
476 sysfs_create_file(&ke->base, ×lice_duration_def.attr))
477 return;
478
479 if (intel_engine_has_preempt_reset(ke->engine) &&
480 sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
481 return;
482}
483
484void intel_engines_add_sysfs(struct drm_i915_private *i915)
485{
486 static const struct attribute * const files[] = {
487 &name_attr.attr,
488 &class_attr.attr,
489 &inst_attr.attr,
490 &mmio_attr.attr,
491 &caps_attr.attr,
492 &all_caps_attr.attr,
493 &max_spin_attr.attr,
494 &stop_timeout_attr.attr,
495#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
496 &heartbeat_interval_attr.attr,
497#endif
498 NULL
499 };
500
501 struct device *kdev = i915->drm.primary->kdev;
502 struct intel_engine_cs *engine;
503 struct kobject *dir;
504
505 dir = kobject_create_and_add("engine", &kdev->kobj);
506 if (!dir)
507 return;
508
509 for_each_uabi_engine(engine, i915) {
510 struct kobject *kobj;
511
512 kobj = kobj_engine(dir, engine);
513 if (!kobj)
514 goto err_engine;
515
516 if (sysfs_create_files(kobj, files))
517 goto err_object;
518
519 if (intel_engine_has_timeslices(engine) &&
520 sysfs_create_file(kobj, ×lice_duration_attr.attr))
521 goto err_engine;
522
523 if (intel_engine_has_preempt_reset(engine) &&
524 sysfs_create_file(kobj, &preempt_timeout_attr.attr))
525 goto err_engine;
526
527 add_defaults(container_of(kobj, struct kobj_engine, base));
528
529 if (0) {
530err_object:
531 kobject_put(kobj);
532err_engine:
533 dev_err(kdev, "Failed to add sysfs engine '%s'\n",
534 engine->name);
535 break;
536 }
537 }
538}