Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2019 Intel Corporation
  4 */
  5
  6#include <linux/kobject.h>
  7#include <linux/sysfs.h>
  8
  9#include "i915_drv.h"
 10#include "intel_engine.h"
 11#include "intel_engine_heartbeat.h"
 12#include "sysfs_engines.h"
 13
 14struct kobj_engine {
 15	struct kobject base;
 16	struct intel_engine_cs *engine;
 17};
 18
 19static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
 20{
 21	return container_of(kobj, struct kobj_engine, base)->engine;
 22}
 23
 24static ssize_t
 25name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 26{
 27	return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
 28}
 29
 30static struct kobj_attribute name_attr =
 31__ATTR(name, 0444, name_show, NULL);
 32
 33static ssize_t
 34class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 35{
 36	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
 37}
 38
 39static struct kobj_attribute class_attr =
 40__ATTR(class, 0444, class_show, NULL);
 41
 42static ssize_t
 43inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 44{
 45	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
 46}
 47
 48static struct kobj_attribute inst_attr =
 49__ATTR(instance, 0444, inst_show, NULL);
 50
 51static ssize_t
 52mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 53{
 54	return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
 55}
 56
 57static struct kobj_attribute mmio_attr =
 58__ATTR(mmio_base, 0444, mmio_show, NULL);
 59
 60static const char * const vcs_caps[] = {
 61	[ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
 62	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
 63};
 64
 65static const char * const vecs_caps[] = {
 66	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
 67};
 68
 69static ssize_t repr_trim(char *buf, ssize_t len)
 70{
 71	/* Trim off the trailing space and replace with a newline */
 72	if (len > PAGE_SIZE)
 73		len = PAGE_SIZE;
 74	if (len > 0)
 75		buf[len - 1] = '\n';
 76
 77	return len;
 78}
 79
 80static ssize_t
 81__caps_show(struct intel_engine_cs *engine,
 82	    unsigned long caps, char *buf, bool show_unknown)
 83{
 84	const char * const *repr;
 85	int count, n;
 86	ssize_t len;
 87
 88	switch (engine->class) {
 89	case VIDEO_DECODE_CLASS:
 90		repr = vcs_caps;
 91		count = ARRAY_SIZE(vcs_caps);
 92		break;
 93
 94	case VIDEO_ENHANCEMENT_CLASS:
 95		repr = vecs_caps;
 96		count = ARRAY_SIZE(vecs_caps);
 97		break;
 98
 99	default:
100		repr = NULL;
101		count = 0;
102		break;
103	}
104	GEM_BUG_ON(count > BITS_PER_LONG);
105
106	len = 0;
107	for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
108		if (n >= count || !repr[n]) {
109			if (GEM_WARN_ON(show_unknown))
110				len += snprintf(buf + len, PAGE_SIZE - len,
111						"[%x] ", n);
112		} else {
113			len += snprintf(buf + len, PAGE_SIZE - len,
114					"%s ", repr[n]);
115		}
116		if (GEM_WARN_ON(len >= PAGE_SIZE))
117			break;
118	}
119	return repr_trim(buf, len);
120}
121
122static ssize_t
123caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
124{
125	struct intel_engine_cs *engine = kobj_to_engine(kobj);
126
127	return __caps_show(engine, engine->uabi_capabilities, buf, true);
128}
129
130static struct kobj_attribute caps_attr =
131__ATTR(capabilities, 0444, caps_show, NULL);
132
133static ssize_t
134all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
135{
136	return __caps_show(kobj_to_engine(kobj), -1, buf, false);
137}
138
139static struct kobj_attribute all_caps_attr =
140__ATTR(known_capabilities, 0444, all_caps_show, NULL);
141
142static ssize_t
143max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
144	       const char *buf, size_t count)
145{
146	struct intel_engine_cs *engine = kobj_to_engine(kobj);
147	unsigned long long duration, clamped;
148	int err;
149
150	/*
151	 * When waiting for a request, if is it currently being executed
152	 * on the GPU, we busywait for a short while before sleeping. The
153	 * premise is that most requests are short, and if it is already
154	 * executing then there is a good chance that it will complete
155	 * before we can setup the interrupt handler and go to sleep.
156	 * We try to offset the cost of going to sleep, by first spinning
157	 * on the request -- if it completed in less time than it would take
158	 * to go sleep, process the interrupt and return back to the client,
159	 * then we have saved the client some latency, albeit at the cost
160	 * of spinning on an expensive CPU core.
161	 *
162	 * While we try to avoid waiting at all for a request that is unlikely
163	 * to complete, deciding how long it is worth spinning is for is an
164	 * arbitrary decision: trading off power vs latency.
165	 */
166
167	err = kstrtoull(buf, 0, &duration);
168	if (err)
169		return err;
170
171	clamped = intel_clamp_max_busywait_duration_ns(engine, duration);
172	if (duration != clamped)
173		return -EINVAL;
174
175	WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
176
177	return count;
178}
179
180static ssize_t
181max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
182{
183	struct intel_engine_cs *engine = kobj_to_engine(kobj);
184
185	return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
186}
187
188static struct kobj_attribute max_spin_attr =
189__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
190
191static ssize_t
192max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
193{
194	struct intel_engine_cs *engine = kobj_to_engine(kobj);
195
196	return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
197}
198
199static struct kobj_attribute max_spin_def =
200__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
201
202static ssize_t
203timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
204		const char *buf, size_t count)
205{
206	struct intel_engine_cs *engine = kobj_to_engine(kobj);
207	unsigned long long duration, clamped;
208	int err;
209
210	/*
211	 * Execlists uses a scheduling quantum (a timeslice) to alternate
212	 * execution between ready-to-run contexts of equal priority. This
213	 * ensures that all users (though only if they of equal importance)
214	 * have the opportunity to run and prevents livelocks where contexts
215	 * may have implicit ordering due to userspace semaphores.
216	 */
217
218	err = kstrtoull(buf, 0, &duration);
219	if (err)
220		return err;
221
222	clamped = intel_clamp_timeslice_duration_ms(engine, duration);
223	if (duration != clamped)
224		return -EINVAL;
225
226	WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
227
228	if (execlists_active(&engine->execlists))
229		set_timer_ms(&engine->execlists.timer, duration);
230
231	return count;
232}
233
234static ssize_t
235timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
236{
237	struct intel_engine_cs *engine = kobj_to_engine(kobj);
238
239	return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
240}
241
242static struct kobj_attribute timeslice_duration_attr =
243__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
244
245static ssize_t
246timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
247{
248	struct intel_engine_cs *engine = kobj_to_engine(kobj);
249
250	return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
251}
252
253static struct kobj_attribute timeslice_duration_def =
254__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
255
256static ssize_t
257stop_store(struct kobject *kobj, struct kobj_attribute *attr,
258	   const char *buf, size_t count)
259{
260	struct intel_engine_cs *engine = kobj_to_engine(kobj);
261	unsigned long long duration, clamped;
262	int err;
263
264	/*
265	 * When we allow ourselves to sleep before a GPU reset after disabling
266	 * submission, even for a few milliseconds, gives an innocent context
267	 * the opportunity to clear the GPU before the reset occurs. However,
268	 * how long to sleep depends on the typical non-preemptible duration
269	 * (a similar problem to determining the ideal preempt-reset timeout
270	 * or even the heartbeat interval).
271	 */
272
273	err = kstrtoull(buf, 0, &duration);
274	if (err)
275		return err;
276
277	clamped = intel_clamp_stop_timeout_ms(engine, duration);
278	if (duration != clamped)
279		return -EINVAL;
280
281	WRITE_ONCE(engine->props.stop_timeout_ms, duration);
282	return count;
283}
284
285static ssize_t
286stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
287{
288	struct intel_engine_cs *engine = kobj_to_engine(kobj);
289
290	return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
291}
292
293static struct kobj_attribute stop_timeout_attr =
294__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
295
296static ssize_t
297stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
298{
299	struct intel_engine_cs *engine = kobj_to_engine(kobj);
300
301	return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
302}
303
304static struct kobj_attribute stop_timeout_def =
305__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
306
307static ssize_t
308preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
309		      const char *buf, size_t count)
310{
311	struct intel_engine_cs *engine = kobj_to_engine(kobj);
312	unsigned long long timeout, clamped;
313	int err;
314
315	/*
316	 * After initialising a preemption request, we give the current
317	 * resident a small amount of time to vacate the GPU. The preemption
318	 * request is for a higher priority context and should be immediate to
319	 * maintain high quality of service (and avoid priority inversion).
320	 * However, the preemption granularity of the GPU can be quite coarse
321	 * and so we need a compromise.
322	 */
323
324	err = kstrtoull(buf, 0, &timeout);
325	if (err)
326		return err;
327
328	clamped = intel_clamp_preempt_timeout_ms(engine, timeout);
329	if (timeout != clamped)
330		return -EINVAL;
331
332	WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
333
334	if (READ_ONCE(engine->execlists.pending[0]))
335		set_timer_ms(&engine->execlists.preempt, timeout);
336
337	return count;
338}
339
340static ssize_t
341preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
342		     char *buf)
343{
344	struct intel_engine_cs *engine = kobj_to_engine(kobj);
345
346	return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
347}
348
349static struct kobj_attribute preempt_timeout_attr =
350__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
351
352static ssize_t
353preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
354			char *buf)
355{
356	struct intel_engine_cs *engine = kobj_to_engine(kobj);
357
358	return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
359}
360
361static struct kobj_attribute preempt_timeout_def =
362__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
363
364static ssize_t
365heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
366		const char *buf, size_t count)
367{
368	struct intel_engine_cs *engine = kobj_to_engine(kobj);
369	unsigned long long delay, clamped;
370	int err;
371
372	/*
373	 * We monitor the health of the system via periodic heartbeat pulses.
374	 * The pulses also provide the opportunity to perform garbage
375	 * collection.  However, we interpret an incomplete pulse (a missed
376	 * heartbeat) as an indication that the system is no longer responsive,
377	 * i.e. hung, and perform an engine or full GPU reset. Given that the
378	 * preemption granularity can be very coarse on a system, the optimal
379	 * value for any workload is unknowable!
380	 */
381
382	err = kstrtoull(buf, 0, &delay);
383	if (err)
384		return err;
385
386	clamped = intel_clamp_heartbeat_interval_ms(engine, delay);
387	if (delay != clamped)
388		return -EINVAL;
389
390	err = intel_engine_set_heartbeat(engine, delay);
391	if (err)
392		return err;
393
394	return count;
395}
396
397static ssize_t
398heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
399{
400	struct intel_engine_cs *engine = kobj_to_engine(kobj);
401
402	return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
403}
404
405static struct kobj_attribute heartbeat_interval_attr =
406__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
407
408static ssize_t
409heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
410{
411	struct intel_engine_cs *engine = kobj_to_engine(kobj);
412
413	return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
414}
415
416static struct kobj_attribute heartbeat_interval_def =
417__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
418
419static void kobj_engine_release(struct kobject *kobj)
420{
421	kfree(kobj);
422}
423
424static struct kobj_type kobj_engine_type = {
425	.release = kobj_engine_release,
426	.sysfs_ops = &kobj_sysfs_ops
427};
428
429static struct kobject *
430kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
431{
432	struct kobj_engine *ke;
433
434	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
435	if (!ke)
436		return NULL;
437
438	kobject_init(&ke->base, &kobj_engine_type);
439	ke->engine = engine;
440
441	if (kobject_add(&ke->base, dir, "%s", engine->name)) {
442		kobject_put(&ke->base);
443		return NULL;
444	}
445
446	/* xfer ownership to sysfs tree */
447	return &ke->base;
448}
449
450static void add_defaults(struct kobj_engine *parent)
451{
452	static const struct attribute *files[] = {
453		&max_spin_def.attr,
454		&stop_timeout_def.attr,
455#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
456		&heartbeat_interval_def.attr,
457#endif
458		NULL
459	};
460	struct kobj_engine *ke;
461
462	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
463	if (!ke)
464		return;
465
466	kobject_init(&ke->base, &kobj_engine_type);
467	ke->engine = parent->engine;
468
469	if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
470		kobject_put(&ke->base);
471		return;
472	}
473
474	if (sysfs_create_files(&ke->base, files))
475		return;
476
477	if (intel_engine_has_timeslices(ke->engine) &&
478	    sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
479		return;
480
481	if (intel_engine_has_preempt_reset(ke->engine) &&
482	    sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
483		return;
484}
485
486void intel_engines_add_sysfs(struct drm_i915_private *i915)
487{
488	static const struct attribute *files[] = {
489		&name_attr.attr,
490		&class_attr.attr,
491		&inst_attr.attr,
492		&mmio_attr.attr,
493		&caps_attr.attr,
494		&all_caps_attr.attr,
495		&max_spin_attr.attr,
496		&stop_timeout_attr.attr,
497#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
498		&heartbeat_interval_attr.attr,
499#endif
500		NULL
501	};
502
503	struct device *kdev = i915->drm.primary->kdev;
504	struct intel_engine_cs *engine;
505	struct kobject *dir;
506
507	dir = kobject_create_and_add("engine", &kdev->kobj);
508	if (!dir)
509		return;
510
511	for_each_uabi_engine(engine, i915) {
512		struct kobject *kobj;
513
514		kobj = kobj_engine(dir, engine);
515		if (!kobj)
516			goto err_engine;
517
518		if (sysfs_create_files(kobj, files))
519			goto err_object;
520
521		if (intel_engine_has_timeslices(engine) &&
522		    sysfs_create_file(kobj, &timeslice_duration_attr.attr))
523			goto err_engine;
524
525		if (intel_engine_has_preempt_reset(engine) &&
526		    sysfs_create_file(kobj, &preempt_timeout_attr.attr))
527			goto err_engine;
528
529		add_defaults(container_of(kobj, struct kobj_engine, base));
530
531		if (0) {
532err_object:
533			kobject_put(kobj);
534err_engine:
535			dev_err(kdev, "Failed to add sysfs engine '%s'\n",
536				engine->name);
537			break;
538		}
539	}
540}