Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2021 Intel Corporation
  4 */
  5
  6#include "xe_exec_queue.h"
  7
  8#include <linux/nospec.h>
  9
 10#include <drm/drm_device.h>
 11#include <drm/drm_file.h>
 12#include <drm/xe_drm.h>
 13
 14#include "xe_device.h"
 15#include "xe_gt.h"
 16#include "xe_hw_engine_class_sysfs.h"
 17#include "xe_hw_fence.h"
 18#include "xe_lrc.h"
 19#include "xe_macros.h"
 20#include "xe_migrate.h"
 21#include "xe_pm.h"
 22#include "xe_ring_ops_types.h"
 23#include "xe_trace.h"
 24#include "xe_vm.h"
 25
 26enum xe_exec_queue_sched_prop {
 27	XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
 28	XE_EXEC_QUEUE_TIMESLICE = 1,
 29	XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
 30	XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
 31};
 32
 33static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
 34				      u64 extensions, int ext_number, bool create);
 35
 36static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
 37						   struct xe_vm *vm,
 38						   u32 logical_mask,
 39						   u16 width, struct xe_hw_engine *hwe,
 40						   u32 flags, u64 extensions)
 41{
 42	struct xe_exec_queue *q;
 43	struct xe_gt *gt = hwe->gt;
 44	int err;
 
 45
 46	/* only kernel queues can be permanent */
 47	XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
 48
 49	q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
 50	if (!q)
 51		return ERR_PTR(-ENOMEM);
 52
 53	kref_init(&q->refcount);
 54	q->flags = flags;
 55	q->hwe = hwe;
 56	q->gt = gt;
 
 
 57	q->class = hwe->class;
 58	q->width = width;
 59	q->logical_mask = logical_mask;
 60	q->fence_irq = &gt->fence_irq[hwe->class];
 61	q->ring_ops = gt->ring_ops[hwe->class];
 62	q->ops = gt->exec_queue_ops;
 63	INIT_LIST_HEAD(&q->compute.link);
 64	INIT_LIST_HEAD(&q->multi_gt_link);
 65
 66	q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
 67	q->sched_props.preempt_timeout_us =
 68				hwe->eclass->sched_props.preempt_timeout_us;
 69	q->sched_props.job_timeout_ms =
 70				hwe->eclass->sched_props.job_timeout_ms;
 71	if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
 72	    q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
 73		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
 74	else
 75		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
 76
 77	if (extensions) {
 78		/*
 79		 * may set q->usm, must come before xe_lrc_init(),
 80		 * may overwrite q->sched_props, must come before q->ops->init()
 81		 */
 82		err = exec_queue_user_extensions(xe, q, extensions, 0, true);
 83		if (err) {
 84			kfree(q);
 85			return ERR_PTR(err);
 86		}
 87	}
 88
 89	if (vm)
 90		q->vm = xe_vm_get(vm);
 91
 92	if (xe_exec_queue_is_parallel(q)) {
 93		q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
 94		q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
 95	}
 
 
 
 
 96
 97	return q;
 98}
 99
100static void __xe_exec_queue_free(struct xe_exec_queue *q)
101{
102	if (q->vm)
103		xe_vm_put(q->vm);
104	kfree(q);
105}
106
107static int __xe_exec_queue_init(struct xe_exec_queue *q)
108{
109	struct xe_device *xe = gt_to_xe(q->gt);
110	int i, err;
111
112	for (i = 0; i < q->width; ++i) {
113		err = xe_lrc_init(q->lrc + i, q->hwe, q, q->vm, SZ_16K);
114		if (err)
115			goto err_lrc;
116	}
117
118	err = q->ops->init(q);
119	if (err)
120		goto err_lrc;
121
122	/*
123	 * Normally the user vm holds an rpm ref to keep the device
124	 * awake, and the context holds a ref for the vm, however for
125	 * some engines we use the kernels migrate vm underneath which offers no
126	 * such rpm ref, or we lack a vm. Make sure we keep a ref here, so we
127	 * can perform GuC CT actions when needed. Caller is expected to have
128	 * already grabbed the rpm ref outside any sensitive locks.
129	 */
130	if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
131		drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
132
133	return 0;
134
135err_lrc:
136	for (i = i - 1; i >= 0; --i)
137		xe_lrc_finish(q->lrc + i);
138	return err;
 
139}
140
141struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
142					   u32 logical_mask, u16 width,
143					   struct xe_hw_engine *hwe, u32 flags,
144					   u64 extensions)
145{
146	struct xe_exec_queue *q;
147	int err;
148
149	q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
150				  extensions);
151	if (IS_ERR(q))
152		return q;
153
154	if (vm) {
155		err = xe_vm_lock(vm, true);
156		if (err)
157			goto err_post_alloc;
158	}
159
160	err = __xe_exec_queue_init(q);
161	if (vm)
162		xe_vm_unlock(vm);
163	if (err)
164		goto err_post_alloc;
165
166	return q;
167
168err_post_alloc:
169	__xe_exec_queue_free(q);
170	return ERR_PTR(err);
171}
172
173struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
174						 struct xe_vm *vm,
175						 enum xe_engine_class class, u32 flags)
176{
177	struct xe_hw_engine *hwe, *hwe0 = NULL;
178	enum xe_hw_engine_id id;
179	u32 logical_mask = 0;
180
181	for_each_hw_engine(hwe, gt, id) {
182		if (xe_hw_engine_is_reserved(hwe))
183			continue;
184
185		if (hwe->class == class) {
186			logical_mask |= BIT(hwe->logical_instance);
187			if (!hwe0)
188				hwe0 = hwe;
189		}
190	}
191
192	if (!logical_mask)
193		return ERR_PTR(-ENODEV);
194
195	return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, 0);
196}
197
198void xe_exec_queue_destroy(struct kref *ref)
199{
200	struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
201	struct xe_exec_queue *eq, *next;
202
203	xe_exec_queue_last_fence_put_unlocked(q);
204	if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
205		list_for_each_entry_safe(eq, next, &q->multi_gt_list,
206					 multi_gt_link)
207			xe_exec_queue_put(eq);
208	}
209
210	q->ops->fini(q);
211}
212
213void xe_exec_queue_fini(struct xe_exec_queue *q)
214{
215	int i;
216
217	for (i = 0; i < q->width; ++i)
218		xe_lrc_finish(q->lrc + i);
219	if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
220		xe_device_mem_access_put(gt_to_xe(q->gt));
221	__xe_exec_queue_free(q);
 
 
 
222}
223
224void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
225{
226	switch (q->class) {
227	case XE_ENGINE_CLASS_RENDER:
228		sprintf(q->name, "rcs%d", instance);
229		break;
230	case XE_ENGINE_CLASS_VIDEO_DECODE:
231		sprintf(q->name, "vcs%d", instance);
232		break;
233	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
234		sprintf(q->name, "vecs%d", instance);
235		break;
236	case XE_ENGINE_CLASS_COPY:
237		sprintf(q->name, "bcs%d", instance);
238		break;
239	case XE_ENGINE_CLASS_COMPUTE:
240		sprintf(q->name, "ccs%d", instance);
241		break;
242	case XE_ENGINE_CLASS_OTHER:
243		sprintf(q->name, "gsccs%d", instance);
244		break;
245	default:
246		XE_WARN_ON(q->class);
247	}
248}
249
250struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
251{
252	struct xe_exec_queue *q;
253
254	mutex_lock(&xef->exec_queue.lock);
255	q = xa_load(&xef->exec_queue.xa, id);
256	if (q)
257		xe_exec_queue_get(q);
258	mutex_unlock(&xef->exec_queue.lock);
259
260	return q;
261}
262
263enum xe_exec_queue_priority
264xe_exec_queue_device_get_max_priority(struct xe_device *xe)
265{
266	return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
267				       XE_EXEC_QUEUE_PRIORITY_NORMAL;
268}
269
270static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
271				   u64 value, bool create)
272{
273	if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
274		return -EINVAL;
275
276	if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
277		return -EPERM;
278
279	if (!create)
280		return q->ops->set_priority(q, value);
281
282	q->sched_props.priority = value;
283	return 0;
284}
285
286static bool xe_exec_queue_enforce_schedule_limit(void)
287{
288#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
289	return true;
290#else
291	return !capable(CAP_SYS_NICE);
292#endif
293}
294
295static void
296xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
297			      enum xe_exec_queue_sched_prop prop,
298			      u32 *min, u32 *max)
299{
300	switch (prop) {
301	case XE_EXEC_QUEUE_JOB_TIMEOUT:
302		*min = eclass->sched_props.job_timeout_min;
303		*max = eclass->sched_props.job_timeout_max;
304		break;
305	case XE_EXEC_QUEUE_TIMESLICE:
306		*min = eclass->sched_props.timeslice_min;
307		*max = eclass->sched_props.timeslice_max;
308		break;
309	case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
310		*min = eclass->sched_props.preempt_timeout_min;
311		*max = eclass->sched_props.preempt_timeout_max;
312		break;
313	default:
314		break;
315	}
316#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
317	if (capable(CAP_SYS_NICE)) {
318		switch (prop) {
319		case XE_EXEC_QUEUE_JOB_TIMEOUT:
320			*min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
321			*max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
322			break;
323		case XE_EXEC_QUEUE_TIMESLICE:
324			*min = XE_HW_ENGINE_TIMESLICE_MIN;
325			*max = XE_HW_ENGINE_TIMESLICE_MAX;
326			break;
327		case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
328			*min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
329			*max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
330			break;
331		default:
332			break;
333		}
334	}
335#endif
336}
337
338static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
339				    u64 value, bool create)
340{
341	u32 min = 0, max = 0;
342
343	xe_exec_queue_get_prop_minmax(q->hwe->eclass,
344				      XE_EXEC_QUEUE_TIMESLICE, &min, &max);
345
346	if (xe_exec_queue_enforce_schedule_limit() &&
347	    !xe_hw_engine_timeout_in_range(value, min, max))
348		return -EINVAL;
349
350	if (!create)
351		return q->ops->set_timeslice(q, value);
352
353	q->sched_props.timeslice_us = value;
354	return 0;
355}
356
357typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
358					     struct xe_exec_queue *q,
359					     u64 value, bool create);
360
361static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
362	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
363	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
364};
365
366static int exec_queue_user_ext_set_property(struct xe_device *xe,
367					    struct xe_exec_queue *q,
368					    u64 extension,
369					    bool create)
370{
371	u64 __user *address = u64_to_user_ptr(extension);
372	struct drm_xe_ext_set_property ext;
373	int err;
374	u32 idx;
375
376	err = __copy_from_user(&ext, address, sizeof(ext));
377	if (XE_IOCTL_DBG(xe, err))
378		return -EFAULT;
379
380	if (XE_IOCTL_DBG(xe, ext.property >=
381			 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
382	    XE_IOCTL_DBG(xe, ext.pad) ||
383	    XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
384			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
385		return -EINVAL;
386
387	idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
388	if (!exec_queue_set_property_funcs[idx])
389		return -EINVAL;
390
391	return exec_queue_set_property_funcs[idx](xe, q, ext.value,  create);
392}
393
394typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
395					       struct xe_exec_queue *q,
396					       u64 extension,
397					       bool create);
398
399static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs[] = {
400	[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
401};
402
403#define MAX_USER_EXTENSIONS	16
404static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
405				      u64 extensions, int ext_number, bool create)
406{
407	u64 __user *address = u64_to_user_ptr(extensions);
408	struct drm_xe_user_extension ext;
409	int err;
410	u32 idx;
411
412	if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
413		return -E2BIG;
414
415	err = __copy_from_user(&ext, address, sizeof(ext));
416	if (XE_IOCTL_DBG(xe, err))
417		return -EFAULT;
418
419	if (XE_IOCTL_DBG(xe, ext.pad) ||
420	    XE_IOCTL_DBG(xe, ext.name >=
421			 ARRAY_SIZE(exec_queue_user_extension_funcs)))
422		return -EINVAL;
423
424	idx = array_index_nospec(ext.name,
425				 ARRAY_SIZE(exec_queue_user_extension_funcs));
426	err = exec_queue_user_extension_funcs[idx](xe, q, extensions, create);
427	if (XE_IOCTL_DBG(xe, err))
428		return err;
429
430	if (ext.next_extension)
431		return exec_queue_user_extensions(xe, q, ext.next_extension,
432					      ++ext_number, create);
433
434	return 0;
435}
436
437static const enum xe_engine_class user_to_xe_engine_class[] = {
438	[DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
439	[DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
440	[DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
441	[DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
442	[DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
443};
444
445static struct xe_hw_engine *
446find_hw_engine(struct xe_device *xe,
447	       struct drm_xe_engine_class_instance eci)
448{
449	u32 idx;
450
451	if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
452		return NULL;
453
454	if (eci.gt_id >= xe->info.gt_count)
455		return NULL;
456
457	idx = array_index_nospec(eci.engine_class,
458				 ARRAY_SIZE(user_to_xe_engine_class));
459
460	return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
461			       user_to_xe_engine_class[idx],
462			       eci.engine_instance, true);
463}
464
465static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
466					struct drm_xe_engine_class_instance *eci,
467					u16 width, u16 num_placements)
468{
469	struct xe_hw_engine *hwe;
470	enum xe_hw_engine_id id;
471	u32 logical_mask = 0;
472
473	if (XE_IOCTL_DBG(xe, width != 1))
474		return 0;
475	if (XE_IOCTL_DBG(xe, num_placements != 1))
476		return 0;
477	if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
478		return 0;
479
480	eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
481
482	for_each_hw_engine(hwe, gt, id) {
483		if (xe_hw_engine_is_reserved(hwe))
484			continue;
485
486		if (hwe->class ==
487		    user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
488			logical_mask |= BIT(hwe->logical_instance);
489	}
490
491	return logical_mask;
492}
493
494static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
495				      struct drm_xe_engine_class_instance *eci,
496				      u16 width, u16 num_placements)
497{
498	int len = width * num_placements;
499	int i, j, n;
500	u16 class;
501	u16 gt_id;
502	u32 return_mask = 0, prev_mask;
503
504	if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
505			 len > 1))
506		return 0;
507
508	for (i = 0; i < width; ++i) {
509		u32 current_mask = 0;
510
511		for (j = 0; j < num_placements; ++j) {
512			struct xe_hw_engine *hwe;
513
514			n = j * width + i;
515
516			hwe = find_hw_engine(xe, eci[n]);
517			if (XE_IOCTL_DBG(xe, !hwe))
518				return 0;
519
520			if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
521				return 0;
522
523			if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
524			    XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
525				return 0;
526
527			class = eci[n].engine_class;
528			gt_id = eci[n].gt_id;
529
530			if (width == 1 || !i)
531				return_mask |= BIT(eci[n].engine_instance);
532			current_mask |= BIT(eci[n].engine_instance);
533		}
534
535		/* Parallel submissions must be logically contiguous */
536		if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
537			return 0;
538
539		prev_mask = current_mask;
540	}
541
542	return return_mask;
543}
544
545int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
546			       struct drm_file *file)
547{
548	struct xe_device *xe = to_xe_device(dev);
549	struct xe_file *xef = to_xe_file(file);
550	struct drm_xe_exec_queue_create *args = data;
551	struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
552	struct drm_xe_engine_class_instance __user *user_eci =
553		u64_to_user_ptr(args->instances);
554	struct xe_hw_engine *hwe;
555	struct xe_vm *vm, *migrate_vm;
556	struct xe_gt *gt;
557	struct xe_exec_queue *q = NULL;
558	u32 logical_mask;
559	u32 id;
560	u32 len;
561	int err;
562
563	if (XE_IOCTL_DBG(xe, args->flags) ||
564	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
565		return -EINVAL;
566
567	len = args->width * args->num_placements;
568	if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
569		return -EINVAL;
570
571	err = __copy_from_user(eci, user_eci,
572			       sizeof(struct drm_xe_engine_class_instance) *
573			       len);
574	if (XE_IOCTL_DBG(xe, err))
575		return -EFAULT;
576
577	if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
578		return -EINVAL;
579
580	if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
581		for_each_gt(gt, xe, id) {
582			struct xe_exec_queue *new;
583			u32 flags;
584
585			if (xe_gt_is_media_type(gt))
586				continue;
587
588			eci[0].gt_id = gt->info.id;
589			logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
590								    args->width,
591								    args->num_placements);
592			if (XE_IOCTL_DBG(xe, !logical_mask))
593				return -EINVAL;
594
595			hwe = find_hw_engine(xe, eci[0]);
596			if (XE_IOCTL_DBG(xe, !hwe))
597				return -EINVAL;
598
599			/* The migration vm doesn't hold rpm ref */
600			xe_device_mem_access_get(xe);
601
602			flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
603
604			migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
605			new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
606						   args->width, hwe, flags,
607						   args->extensions);
 
 
 
 
608
609			xe_device_mem_access_put(xe); /* now held by engine */
610
611			xe_vm_put(migrate_vm);
612			if (IS_ERR(new)) {
613				err = PTR_ERR(new);
614				if (q)
615					goto put_exec_queue;
616				return err;
617			}
618			if (id == 0)
619				q = new;
620			else
621				list_add_tail(&new->multi_gt_list,
622					      &q->multi_gt_link);
623		}
624	} else {
625		gt = xe_device_get_gt(xe, eci[0].gt_id);
626		logical_mask = calc_validate_logical_mask(xe, gt, eci,
627							  args->width,
628							  args->num_placements);
629		if (XE_IOCTL_DBG(xe, !logical_mask))
630			return -EINVAL;
631
632		hwe = find_hw_engine(xe, eci[0]);
633		if (XE_IOCTL_DBG(xe, !hwe))
634			return -EINVAL;
635
636		vm = xe_vm_lookup(xef, args->vm_id);
637		if (XE_IOCTL_DBG(xe, !vm))
638			return -ENOENT;
639
640		err = down_read_interruptible(&vm->lock);
641		if (err) {
642			xe_vm_put(vm);
643			return err;
644		}
645
646		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
647			up_read(&vm->lock);
648			xe_vm_put(vm);
649			return -ENOENT;
650		}
651
652		q = xe_exec_queue_create(xe, vm, logical_mask,
653					 args->width, hwe, 0,
654					 args->extensions);
655		up_read(&vm->lock);
656		xe_vm_put(vm);
657		if (IS_ERR(q))
658			return PTR_ERR(q);
659
660		if (xe_vm_in_preempt_fence_mode(vm)) {
661			q->compute.context = dma_fence_context_alloc(1);
662			spin_lock_init(&q->compute.lock);
663
664			err = xe_vm_add_compute_exec_queue(vm, q);
665			if (XE_IOCTL_DBG(xe, err))
666				goto put_exec_queue;
667		}
 
 
 
 
 
 
668	}
669
670	mutex_lock(&xef->exec_queue.lock);
671	err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
672	mutex_unlock(&xef->exec_queue.lock);
673	if (err)
674		goto kill_exec_queue;
675
676	args->exec_queue_id = id;
677
678	return 0;
679
680kill_exec_queue:
681	xe_exec_queue_kill(q);
682put_exec_queue:
683	xe_exec_queue_put(q);
684	return err;
685}
686
687int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
688				     struct drm_file *file)
689{
690	struct xe_device *xe = to_xe_device(dev);
691	struct xe_file *xef = to_xe_file(file);
692	struct drm_xe_exec_queue_get_property *args = data;
693	struct xe_exec_queue *q;
694	int ret;
695
696	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
697		return -EINVAL;
698
699	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
700	if (XE_IOCTL_DBG(xe, !q))
701		return -ENOENT;
702
703	switch (args->property) {
704	case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
705		args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED);
706		ret = 0;
707		break;
708	default:
709		ret = -EINVAL;
710	}
711
712	xe_exec_queue_put(q);
713
714	return ret;
715}
716
717/**
718 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
719 * @q: The exec_queue
720 *
721 * Return: True if the exec_queue is long-running, false otherwise.
722 */
723bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
724{
725	return q->vm && xe_vm_in_lr_mode(q->vm) &&
726		!(q->flags & EXEC_QUEUE_FLAG_VM);
727}
728
729static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
730{
731	return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1;
732}
733
734/**
735 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
736 * @q: The exec_queue
737 *
738 * Return: True if the exec_queue's ring is full, false otherwise.
739 */
740bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
741{
742	struct xe_lrc *lrc = q->lrc;
743	s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
744
745	return xe_exec_queue_num_job_inflight(q) >= max_job;
746}
747
748/**
749 * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
750 * @q: The exec_queue
751 *
752 * FIXME: Need to determine what to use as the short-lived
753 * timeline lock for the exec_queues, so that the return value
754 * of this function becomes more than just an advisory
755 * snapshot in time. The timeline lock must protect the
756 * seqno from racing submissions on the same exec_queue.
757 * Typically vm->resv, but user-created timeline locks use the migrate vm
758 * and never grabs the migrate vm->resv so we have a race there.
759 *
760 * Return: True if the exec_queue is idle, false otherwise.
761 */
762bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
763{
764	if (xe_exec_queue_is_parallel(q)) {
765		int i;
766
767		for (i = 0; i < q->width; ++i) {
768			if (xe_lrc_seqno(&q->lrc[i]) !=
769			    q->lrc[i].fence_ctx.next_seqno - 1)
770				return false;
771		}
772
773		return true;
774	}
775
776	return xe_lrc_seqno(&q->lrc[0]) ==
777		q->lrc[0].fence_ctx.next_seqno - 1;
778}
779
780void xe_exec_queue_kill(struct xe_exec_queue *q)
781{
782	struct xe_exec_queue *eq = q, *next;
783
784	list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
785				 multi_gt_link) {
786		q->ops->kill(eq);
787		xe_vm_remove_compute_exec_queue(q->vm, eq);
788	}
789
790	q->ops->kill(q);
791	xe_vm_remove_compute_exec_queue(q->vm, q);
792}
793
794int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
795				struct drm_file *file)
796{
797	struct xe_device *xe = to_xe_device(dev);
798	struct xe_file *xef = to_xe_file(file);
799	struct drm_xe_exec_queue_destroy *args = data;
800	struct xe_exec_queue *q;
801
802	if (XE_IOCTL_DBG(xe, args->pad) ||
803	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
804		return -EINVAL;
805
806	mutex_lock(&xef->exec_queue.lock);
807	q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
808	mutex_unlock(&xef->exec_queue.lock);
809	if (XE_IOCTL_DBG(xe, !q))
810		return -ENOENT;
811
812	xe_exec_queue_kill(q);
813
814	trace_xe_exec_queue_close(q);
815	xe_exec_queue_put(q);
816
817	return 0;
818}
819
820static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
821						    struct xe_vm *vm)
822{
823	if (q->flags & EXEC_QUEUE_FLAG_VM)
824		lockdep_assert_held(&vm->lock);
825	else
826		xe_vm_assert_held(vm);
827}
828
829/**
830 * xe_exec_queue_last_fence_put() - Drop ref to last fence
831 * @q: The exec queue
832 * @vm: The VM the engine does a bind or exec for
833 */
834void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
835{
836	xe_exec_queue_last_fence_lockdep_assert(q, vm);
837
838	if (q->last_fence) {
839		dma_fence_put(q->last_fence);
840		q->last_fence = NULL;
841	}
842}
843
844/**
845 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
846 * @q: The exec queue
847 *
848 * Only safe to be called from xe_exec_queue_destroy().
849 */
850void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
851{
852	if (q->last_fence) {
853		dma_fence_put(q->last_fence);
854		q->last_fence = NULL;
855	}
856}
857
858/**
859 * xe_exec_queue_last_fence_get() - Get last fence
860 * @q: The exec queue
861 * @vm: The VM the engine does a bind or exec for
862 *
863 * Get last fence, takes a ref
864 *
865 * Returns: last fence if not signaled, dma fence stub if signaled
866 */
867struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
868					       struct xe_vm *vm)
869{
870	struct dma_fence *fence;
871
872	xe_exec_queue_last_fence_lockdep_assert(q, vm);
873
874	if (q->last_fence &&
875	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
876		xe_exec_queue_last_fence_put(q, vm);
877
878	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
879	dma_fence_get(fence);
880	return fence;
881}
882
883/**
884 * xe_exec_queue_last_fence_set() - Set last fence
885 * @q: The exec queue
886 * @vm: The VM the engine does a bind or exec for
887 * @fence: The fence
888 *
889 * Set the last fence for the engine. Increases reference count for fence, when
890 * closing engine xe_exec_queue_last_fence_put should be called.
891 */
892void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
893				  struct dma_fence *fence)
894{
895	xe_exec_queue_last_fence_lockdep_assert(q, vm);
896
897	xe_exec_queue_last_fence_put(q, vm);
898	q->last_fence = dma_fence_get(fence);
899}
v6.8
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2021 Intel Corporation
  4 */
  5
  6#include "xe_exec_queue.h"
  7
  8#include <linux/nospec.h>
  9
 10#include <drm/drm_device.h>
 11#include <drm/drm_file.h>
 12#include <drm/xe_drm.h>
 13
 14#include "xe_device.h"
 15#include "xe_gt.h"
 16#include "xe_hw_engine_class_sysfs.h"
 17#include "xe_hw_fence.h"
 18#include "xe_lrc.h"
 19#include "xe_macros.h"
 20#include "xe_migrate.h"
 21#include "xe_pm.h"
 22#include "xe_ring_ops_types.h"
 23#include "xe_trace.h"
 24#include "xe_vm.h"
 25
 26enum xe_exec_queue_sched_prop {
 27	XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
 28	XE_EXEC_QUEUE_TIMESLICE = 1,
 29	XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
 30	XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
 31};
 32
 33static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
 34						    struct xe_vm *vm,
 35						    u32 logical_mask,
 36						    u16 width, struct xe_hw_engine *hwe,
 37						    u32 flags)
 
 
 
 38{
 39	struct xe_exec_queue *q;
 40	struct xe_gt *gt = hwe->gt;
 41	int err;
 42	int i;
 43
 44	/* only kernel queues can be permanent */
 45	XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
 46
 47	q = kzalloc(sizeof(*q) + sizeof(struct xe_lrc) * width, GFP_KERNEL);
 48	if (!q)
 49		return ERR_PTR(-ENOMEM);
 50
 51	kref_init(&q->refcount);
 52	q->flags = flags;
 53	q->hwe = hwe;
 54	q->gt = gt;
 55	if (vm)
 56		q->vm = xe_vm_get(vm);
 57	q->class = hwe->class;
 58	q->width = width;
 59	q->logical_mask = logical_mask;
 60	q->fence_irq = &gt->fence_irq[hwe->class];
 61	q->ring_ops = gt->ring_ops[hwe->class];
 62	q->ops = gt->exec_queue_ops;
 63	INIT_LIST_HEAD(&q->compute.link);
 64	INIT_LIST_HEAD(&q->multi_gt_link);
 65
 66	q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
 67	q->sched_props.preempt_timeout_us =
 68				hwe->eclass->sched_props.preempt_timeout_us;
 
 
 69	if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
 70	    q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
 71		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
 72	else
 73		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
 74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75	if (xe_exec_queue_is_parallel(q)) {
 76		q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
 77		q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
 78	}
 79	if (q->flags & EXEC_QUEUE_FLAG_VM) {
 80		q->bind.fence_ctx = dma_fence_context_alloc(1);
 81		q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
 82	}
 83
 84	for (i = 0; i < width; ++i) {
 85		err = xe_lrc_init(q->lrc + i, hwe, q, vm, SZ_16K);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86		if (err)
 87			goto err_lrc;
 88	}
 89
 90	err = q->ops->init(q);
 91	if (err)
 92		goto err_lrc;
 93
 94	/*
 95	 * Normally the user vm holds an rpm ref to keep the device
 96	 * awake, and the context holds a ref for the vm, however for
 97	 * some engines we use the kernels migrate vm underneath which offers no
 98	 * such rpm ref, or we lack a vm. Make sure we keep a ref here, so we
 99	 * can perform GuC CT actions when needed. Caller is expected to have
100	 * already grabbed the rpm ref outside any sensitive locks.
101	 */
102	if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !vm))
103		drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
104
105	return q;
106
107err_lrc:
108	for (i = i - 1; i >= 0; --i)
109		xe_lrc_finish(q->lrc + i);
110	kfree(q);
111	return ERR_PTR(err);
112}
113
114struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
115					   u32 logical_mask, u16 width,
116					   struct xe_hw_engine *hwe, u32 flags)
 
117{
118	struct xe_exec_queue *q;
119	int err;
120
 
 
 
 
 
121	if (vm) {
122		err = xe_vm_lock(vm, true);
123		if (err)
124			return ERR_PTR(err);
125	}
126	q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags);
 
127	if (vm)
128		xe_vm_unlock(vm);
 
 
129
130	return q;
 
 
 
 
131}
132
133struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
134						 struct xe_vm *vm,
135						 enum xe_engine_class class, u32 flags)
136{
137	struct xe_hw_engine *hwe, *hwe0 = NULL;
138	enum xe_hw_engine_id id;
139	u32 logical_mask = 0;
140
141	for_each_hw_engine(hwe, gt, id) {
142		if (xe_hw_engine_is_reserved(hwe))
143			continue;
144
145		if (hwe->class == class) {
146			logical_mask |= BIT(hwe->logical_instance);
147			if (!hwe0)
148				hwe0 = hwe;
149		}
150	}
151
152	if (!logical_mask)
153		return ERR_PTR(-ENODEV);
154
155	return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags);
156}
157
158void xe_exec_queue_destroy(struct kref *ref)
159{
160	struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
161	struct xe_exec_queue *eq, *next;
162
163	xe_exec_queue_last_fence_put_unlocked(q);
164	if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
165		list_for_each_entry_safe(eq, next, &q->multi_gt_list,
166					 multi_gt_link)
167			xe_exec_queue_put(eq);
168	}
169
170	q->ops->fini(q);
171}
172
173void xe_exec_queue_fini(struct xe_exec_queue *q)
174{
175	int i;
176
177	for (i = 0; i < q->width; ++i)
178		xe_lrc_finish(q->lrc + i);
179	if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
180		xe_device_mem_access_put(gt_to_xe(q->gt));
181	if (q->vm)
182		xe_vm_put(q->vm);
183
184	kfree(q);
185}
186
187void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
188{
189	switch (q->class) {
190	case XE_ENGINE_CLASS_RENDER:
191		sprintf(q->name, "rcs%d", instance);
192		break;
193	case XE_ENGINE_CLASS_VIDEO_DECODE:
194		sprintf(q->name, "vcs%d", instance);
195		break;
196	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
197		sprintf(q->name, "vecs%d", instance);
198		break;
199	case XE_ENGINE_CLASS_COPY:
200		sprintf(q->name, "bcs%d", instance);
201		break;
202	case XE_ENGINE_CLASS_COMPUTE:
203		sprintf(q->name, "ccs%d", instance);
204		break;
205	case XE_ENGINE_CLASS_OTHER:
206		sprintf(q->name, "gsccs%d", instance);
207		break;
208	default:
209		XE_WARN_ON(q->class);
210	}
211}
212
213struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
214{
215	struct xe_exec_queue *q;
216
217	mutex_lock(&xef->exec_queue.lock);
218	q = xa_load(&xef->exec_queue.xa, id);
219	if (q)
220		xe_exec_queue_get(q);
221	mutex_unlock(&xef->exec_queue.lock);
222
223	return q;
224}
225
226enum xe_exec_queue_priority
227xe_exec_queue_device_get_max_priority(struct xe_device *xe)
228{
229	return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
230				       XE_EXEC_QUEUE_PRIORITY_NORMAL;
231}
232
233static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
234				   u64 value, bool create)
235{
236	if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
237		return -EINVAL;
238
239	if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
240		return -EPERM;
241
242	return q->ops->set_priority(q, value);
 
 
 
 
243}
244
245static bool xe_exec_queue_enforce_schedule_limit(void)
246{
247#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
248	return true;
249#else
250	return !capable(CAP_SYS_NICE);
251#endif
252}
253
254static void
255xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
256			      enum xe_exec_queue_sched_prop prop,
257			      u32 *min, u32 *max)
258{
259	switch (prop) {
260	case XE_EXEC_QUEUE_JOB_TIMEOUT:
261		*min = eclass->sched_props.job_timeout_min;
262		*max = eclass->sched_props.job_timeout_max;
263		break;
264	case XE_EXEC_QUEUE_TIMESLICE:
265		*min = eclass->sched_props.timeslice_min;
266		*max = eclass->sched_props.timeslice_max;
267		break;
268	case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
269		*min = eclass->sched_props.preempt_timeout_min;
270		*max = eclass->sched_props.preempt_timeout_max;
271		break;
272	default:
273		break;
274	}
275#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
276	if (capable(CAP_SYS_NICE)) {
277		switch (prop) {
278		case XE_EXEC_QUEUE_JOB_TIMEOUT:
279			*min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
280			*max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
281			break;
282		case XE_EXEC_QUEUE_TIMESLICE:
283			*min = XE_HW_ENGINE_TIMESLICE_MIN;
284			*max = XE_HW_ENGINE_TIMESLICE_MAX;
285			break;
286		case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
287			*min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
288			*max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
289			break;
290		default:
291			break;
292		}
293	}
294#endif
295}
296
297static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
298				    u64 value, bool create)
299{
300	u32 min = 0, max = 0;
301
302	xe_exec_queue_get_prop_minmax(q->hwe->eclass,
303				      XE_EXEC_QUEUE_TIMESLICE, &min, &max);
304
305	if (xe_exec_queue_enforce_schedule_limit() &&
306	    !xe_hw_engine_timeout_in_range(value, min, max))
307		return -EINVAL;
308
309	return q->ops->set_timeslice(q, value);
 
 
 
 
310}
311
312typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
313					     struct xe_exec_queue *q,
314					     u64 value, bool create);
315
316static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
317	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
318	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
319};
320
321static int exec_queue_user_ext_set_property(struct xe_device *xe,
322					    struct xe_exec_queue *q,
323					    u64 extension,
324					    bool create)
325{
326	u64 __user *address = u64_to_user_ptr(extension);
327	struct drm_xe_ext_set_property ext;
328	int err;
329	u32 idx;
330
331	err = __copy_from_user(&ext, address, sizeof(ext));
332	if (XE_IOCTL_DBG(xe, err))
333		return -EFAULT;
334
335	if (XE_IOCTL_DBG(xe, ext.property >=
336			 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
337	    XE_IOCTL_DBG(xe, ext.pad) ||
338	    XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
339			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
340		return -EINVAL;
341
342	idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
343	if (!exec_queue_set_property_funcs[idx])
344		return -EINVAL;
345
346	return exec_queue_set_property_funcs[idx](xe, q, ext.value,  create);
347}
348
349typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
350					       struct xe_exec_queue *q,
351					       u64 extension,
352					       bool create);
353
354static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs[] = {
355	[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
356};
357
358#define MAX_USER_EXTENSIONS	16
359static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
360				      u64 extensions, int ext_number, bool create)
361{
362	u64 __user *address = u64_to_user_ptr(extensions);
363	struct drm_xe_user_extension ext;
364	int err;
365	u32 idx;
366
367	if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
368		return -E2BIG;
369
370	err = __copy_from_user(&ext, address, sizeof(ext));
371	if (XE_IOCTL_DBG(xe, err))
372		return -EFAULT;
373
374	if (XE_IOCTL_DBG(xe, ext.pad) ||
375	    XE_IOCTL_DBG(xe, ext.name >=
376			 ARRAY_SIZE(exec_queue_user_extension_funcs)))
377		return -EINVAL;
378
379	idx = array_index_nospec(ext.name,
380				 ARRAY_SIZE(exec_queue_user_extension_funcs));
381	err = exec_queue_user_extension_funcs[idx](xe, q, extensions, create);
382	if (XE_IOCTL_DBG(xe, err))
383		return err;
384
385	if (ext.next_extension)
386		return exec_queue_user_extensions(xe, q, ext.next_extension,
387					      ++ext_number, create);
388
389	return 0;
390}
391
392static const enum xe_engine_class user_to_xe_engine_class[] = {
393	[DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
394	[DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
395	[DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
396	[DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
397	[DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
398};
399
400static struct xe_hw_engine *
401find_hw_engine(struct xe_device *xe,
402	       struct drm_xe_engine_class_instance eci)
403{
404	u32 idx;
405
406	if (eci.engine_class > ARRAY_SIZE(user_to_xe_engine_class))
407		return NULL;
408
409	if (eci.gt_id >= xe->info.gt_count)
410		return NULL;
411
412	idx = array_index_nospec(eci.engine_class,
413				 ARRAY_SIZE(user_to_xe_engine_class));
414
415	return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
416			       user_to_xe_engine_class[idx],
417			       eci.engine_instance, true);
418}
419
420static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
421					struct drm_xe_engine_class_instance *eci,
422					u16 width, u16 num_placements)
423{
424	struct xe_hw_engine *hwe;
425	enum xe_hw_engine_id id;
426	u32 logical_mask = 0;
427
428	if (XE_IOCTL_DBG(xe, width != 1))
429		return 0;
430	if (XE_IOCTL_DBG(xe, num_placements != 1))
431		return 0;
432	if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
433		return 0;
434
435	eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
436
437	for_each_hw_engine(hwe, gt, id) {
438		if (xe_hw_engine_is_reserved(hwe))
439			continue;
440
441		if (hwe->class ==
442		    user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
443			logical_mask |= BIT(hwe->logical_instance);
444	}
445
446	return logical_mask;
447}
448
449static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
450				      struct drm_xe_engine_class_instance *eci,
451				      u16 width, u16 num_placements)
452{
453	int len = width * num_placements;
454	int i, j, n;
455	u16 class;
456	u16 gt_id;
457	u32 return_mask = 0, prev_mask;
458
459	if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
460			 len > 1))
461		return 0;
462
463	for (i = 0; i < width; ++i) {
464		u32 current_mask = 0;
465
466		for (j = 0; j < num_placements; ++j) {
467			struct xe_hw_engine *hwe;
468
469			n = j * width + i;
470
471			hwe = find_hw_engine(xe, eci[n]);
472			if (XE_IOCTL_DBG(xe, !hwe))
473				return 0;
474
475			if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
476				return 0;
477
478			if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
479			    XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
480				return 0;
481
482			class = eci[n].engine_class;
483			gt_id = eci[n].gt_id;
484
485			if (width == 1 || !i)
486				return_mask |= BIT(eci[n].engine_instance);
487			current_mask |= BIT(eci[n].engine_instance);
488		}
489
490		/* Parallel submissions must be logically contiguous */
491		if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
492			return 0;
493
494		prev_mask = current_mask;
495	}
496
497	return return_mask;
498}
499
500int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
501			       struct drm_file *file)
502{
503	struct xe_device *xe = to_xe_device(dev);
504	struct xe_file *xef = to_xe_file(file);
505	struct drm_xe_exec_queue_create *args = data;
506	struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
507	struct drm_xe_engine_class_instance __user *user_eci =
508		u64_to_user_ptr(args->instances);
509	struct xe_hw_engine *hwe;
510	struct xe_vm *vm, *migrate_vm;
511	struct xe_gt *gt;
512	struct xe_exec_queue *q = NULL;
513	u32 logical_mask;
514	u32 id;
515	u32 len;
516	int err;
517
518	if (XE_IOCTL_DBG(xe, args->flags) ||
519	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
520		return -EINVAL;
521
522	len = args->width * args->num_placements;
523	if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
524		return -EINVAL;
525
526	err = __copy_from_user(eci, user_eci,
527			       sizeof(struct drm_xe_engine_class_instance) *
528			       len);
529	if (XE_IOCTL_DBG(xe, err))
530		return -EFAULT;
531
532	if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
533		return -EINVAL;
534
535	if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
536		for_each_gt(gt, xe, id) {
537			struct xe_exec_queue *new;
 
538
539			if (xe_gt_is_media_type(gt))
540				continue;
541
542			eci[0].gt_id = gt->info.id;
543			logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
544								    args->width,
545								    args->num_placements);
546			if (XE_IOCTL_DBG(xe, !logical_mask))
547				return -EINVAL;
548
549			hwe = find_hw_engine(xe, eci[0]);
550			if (XE_IOCTL_DBG(xe, !hwe))
551				return -EINVAL;
552
553			/* The migration vm doesn't hold rpm ref */
554			xe_device_mem_access_get(xe);
555
 
 
556			migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
557			new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
558						   args->width, hwe,
559						   EXEC_QUEUE_FLAG_PERSISTENT |
560						   EXEC_QUEUE_FLAG_VM |
561						   (id ?
562						    EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD :
563						    0));
564
565			xe_device_mem_access_put(xe); /* now held by engine */
566
567			xe_vm_put(migrate_vm);
568			if (IS_ERR(new)) {
569				err = PTR_ERR(new);
570				if (q)
571					goto put_exec_queue;
572				return err;
573			}
574			if (id == 0)
575				q = new;
576			else
577				list_add_tail(&new->multi_gt_list,
578					      &q->multi_gt_link);
579		}
580	} else {
581		gt = xe_device_get_gt(xe, eci[0].gt_id);
582		logical_mask = calc_validate_logical_mask(xe, gt, eci,
583							  args->width,
584							  args->num_placements);
585		if (XE_IOCTL_DBG(xe, !logical_mask))
586			return -EINVAL;
587
588		hwe = find_hw_engine(xe, eci[0]);
589		if (XE_IOCTL_DBG(xe, !hwe))
590			return -EINVAL;
591
592		vm = xe_vm_lookup(xef, args->vm_id);
593		if (XE_IOCTL_DBG(xe, !vm))
594			return -ENOENT;
595
596		err = down_read_interruptible(&vm->lock);
597		if (err) {
598			xe_vm_put(vm);
599			return err;
600		}
601
602		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
603			up_read(&vm->lock);
604			xe_vm_put(vm);
605			return -ENOENT;
606		}
607
608		q = xe_exec_queue_create(xe, vm, logical_mask,
609					 args->width, hwe, 0);
 
610		up_read(&vm->lock);
611		xe_vm_put(vm);
612		if (IS_ERR(q))
613			return PTR_ERR(q);
614
615		if (xe_vm_in_preempt_fence_mode(vm)) {
616			q->compute.context = dma_fence_context_alloc(1);
617			spin_lock_init(&q->compute.lock);
618
619			err = xe_vm_add_compute_exec_queue(vm, q);
620			if (XE_IOCTL_DBG(xe, err))
621				goto put_exec_queue;
622		}
623	}
624
625	if (args->extensions) {
626		err = exec_queue_user_extensions(xe, q, args->extensions, 0, true);
627		if (XE_IOCTL_DBG(xe, err))
628			goto kill_exec_queue;
629	}
630
631	mutex_lock(&xef->exec_queue.lock);
632	err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
633	mutex_unlock(&xef->exec_queue.lock);
634	if (err)
635		goto kill_exec_queue;
636
637	args->exec_queue_id = id;
638
639	return 0;
640
641kill_exec_queue:
642	xe_exec_queue_kill(q);
643put_exec_queue:
644	xe_exec_queue_put(q);
645	return err;
646}
647
648int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
649				     struct drm_file *file)
650{
651	struct xe_device *xe = to_xe_device(dev);
652	struct xe_file *xef = to_xe_file(file);
653	struct drm_xe_exec_queue_get_property *args = data;
654	struct xe_exec_queue *q;
655	int ret;
656
657	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
658		return -EINVAL;
659
660	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
661	if (XE_IOCTL_DBG(xe, !q))
662		return -ENOENT;
663
664	switch (args->property) {
665	case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
666		args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED);
667		ret = 0;
668		break;
669	default:
670		ret = -EINVAL;
671	}
672
673	xe_exec_queue_put(q);
674
675	return ret;
676}
677
678/**
679 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
680 * @q: The exec_queue
681 *
682 * Return: True if the exec_queue is long-running, false otherwise.
683 */
684bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
685{
686	return q->vm && xe_vm_in_lr_mode(q->vm) &&
687		!(q->flags & EXEC_QUEUE_FLAG_VM);
688}
689
690static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
691{
692	return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1;
693}
694
695/**
696 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
697 * @q: The exec_queue
698 *
699 * Return: True if the exec_queue's ring is full, false otherwise.
700 */
701bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
702{
703	struct xe_lrc *lrc = q->lrc;
704	s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
705
706	return xe_exec_queue_num_job_inflight(q) >= max_job;
707}
708
709/**
710 * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
711 * @q: The exec_queue
712 *
713 * FIXME: Need to determine what to use as the short-lived
714 * timeline lock for the exec_queues, so that the return value
715 * of this function becomes more than just an advisory
716 * snapshot in time. The timeline lock must protect the
717 * seqno from racing submissions on the same exec_queue.
718 * Typically vm->resv, but user-created timeline locks use the migrate vm
719 * and never grabs the migrate vm->resv so we have a race there.
720 *
721 * Return: True if the exec_queue is idle, false otherwise.
722 */
723bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
724{
725	if (xe_exec_queue_is_parallel(q)) {
726		int i;
727
728		for (i = 0; i < q->width; ++i) {
729			if (xe_lrc_seqno(&q->lrc[i]) !=
730			    q->lrc[i].fence_ctx.next_seqno - 1)
731				return false;
732		}
733
734		return true;
735	}
736
737	return xe_lrc_seqno(&q->lrc[0]) ==
738		q->lrc[0].fence_ctx.next_seqno - 1;
739}
740
741void xe_exec_queue_kill(struct xe_exec_queue *q)
742{
743	struct xe_exec_queue *eq = q, *next;
744
745	list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
746				 multi_gt_link) {
747		q->ops->kill(eq);
748		xe_vm_remove_compute_exec_queue(q->vm, eq);
749	}
750
751	q->ops->kill(q);
752	xe_vm_remove_compute_exec_queue(q->vm, q);
753}
754
755int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
756				struct drm_file *file)
757{
758	struct xe_device *xe = to_xe_device(dev);
759	struct xe_file *xef = to_xe_file(file);
760	struct drm_xe_exec_queue_destroy *args = data;
761	struct xe_exec_queue *q;
762
763	if (XE_IOCTL_DBG(xe, args->pad) ||
764	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
765		return -EINVAL;
766
767	mutex_lock(&xef->exec_queue.lock);
768	q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
769	mutex_unlock(&xef->exec_queue.lock);
770	if (XE_IOCTL_DBG(xe, !q))
771		return -ENOENT;
772
773	xe_exec_queue_kill(q);
774
775	trace_xe_exec_queue_close(q);
776	xe_exec_queue_put(q);
777
778	return 0;
779}
780
781static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
782						    struct xe_vm *vm)
783{
784	if (q->flags & EXEC_QUEUE_FLAG_VM)
785		lockdep_assert_held(&vm->lock);
786	else
787		xe_vm_assert_held(vm);
788}
789
790/**
791 * xe_exec_queue_last_fence_put() - Drop ref to last fence
792 * @q: The exec queue
793 * @vm: The VM the engine does a bind or exec for
794 */
795void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
796{
797	xe_exec_queue_last_fence_lockdep_assert(q, vm);
798
799	if (q->last_fence) {
800		dma_fence_put(q->last_fence);
801		q->last_fence = NULL;
802	}
803}
804
805/**
806 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
807 * @q: The exec queue
808 *
809 * Only safe to be called from xe_exec_queue_destroy().
810 */
811void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
812{
813	if (q->last_fence) {
814		dma_fence_put(q->last_fence);
815		q->last_fence = NULL;
816	}
817}
818
819/**
820 * xe_exec_queue_last_fence_get() - Get last fence
821 * @q: The exec queue
822 * @vm: The VM the engine does a bind or exec for
823 *
824 * Get last fence, takes a ref
825 *
826 * Returns: last fence if not signaled, dma fence stub if signaled
827 */
828struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
829					       struct xe_vm *vm)
830{
831	struct dma_fence *fence;
832
833	xe_exec_queue_last_fence_lockdep_assert(q, vm);
834
835	if (q->last_fence &&
836	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
837		xe_exec_queue_last_fence_put(q, vm);
838
839	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
840	dma_fence_get(fence);
841	return fence;
842}
843
844/**
845 * xe_exec_queue_last_fence_set() - Set last fence
846 * @q: The exec queue
847 * @vm: The VM the engine does a bind or exec for
848 * @fence: The fence
849 *
850 * Set the last fence for the engine. Increases reference count for fence, when
851 * closing engine xe_exec_queue_last_fence_put should be called.
852 */
853void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
854				  struct dma_fence *fence)
855{
856	xe_exec_queue_last_fence_lockdep_assert(q, vm);
857
858	xe_exec_queue_last_fence_put(q, vm);
859	q->last_fence = dma_fence_get(fence);
860}