Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include "xe_exec_queue.h"
7
8#include <linux/nospec.h>
9
10#include <drm/drm_device.h>
11#include <drm/drm_file.h>
12#include <drm/xe_drm.h>
13
14#include "xe_device.h"
15#include "xe_gt.h"
16#include "xe_hw_engine_class_sysfs.h"
17#include "xe_hw_fence.h"
18#include "xe_lrc.h"
19#include "xe_macros.h"
20#include "xe_migrate.h"
21#include "xe_pm.h"
22#include "xe_ring_ops_types.h"
23#include "xe_trace.h"
24#include "xe_vm.h"
25
26enum xe_exec_queue_sched_prop {
27 XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
28 XE_EXEC_QUEUE_TIMESLICE = 1,
29 XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
30 XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
31};
32
33static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
34 struct xe_vm *vm,
35 u32 logical_mask,
36 u16 width, struct xe_hw_engine *hwe,
37 u32 flags)
38{
39 struct xe_exec_queue *q;
40 struct xe_gt *gt = hwe->gt;
41 int err;
42 int i;
43
44 /* only kernel queues can be permanent */
45 XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
46
47 q = kzalloc(sizeof(*q) + sizeof(struct xe_lrc) * width, GFP_KERNEL);
48 if (!q)
49 return ERR_PTR(-ENOMEM);
50
51 kref_init(&q->refcount);
52 q->flags = flags;
53 q->hwe = hwe;
54 q->gt = gt;
55 if (vm)
56 q->vm = xe_vm_get(vm);
57 q->class = hwe->class;
58 q->width = width;
59 q->logical_mask = logical_mask;
60 q->fence_irq = >->fence_irq[hwe->class];
61 q->ring_ops = gt->ring_ops[hwe->class];
62 q->ops = gt->exec_queue_ops;
63 INIT_LIST_HEAD(&q->compute.link);
64 INIT_LIST_HEAD(&q->multi_gt_link);
65
66 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
67 q->sched_props.preempt_timeout_us =
68 hwe->eclass->sched_props.preempt_timeout_us;
69 if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
70 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
71 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
72 else
73 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
74
75 if (xe_exec_queue_is_parallel(q)) {
76 q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
77 q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
78 }
79 if (q->flags & EXEC_QUEUE_FLAG_VM) {
80 q->bind.fence_ctx = dma_fence_context_alloc(1);
81 q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
82 }
83
84 for (i = 0; i < width; ++i) {
85 err = xe_lrc_init(q->lrc + i, hwe, q, vm, SZ_16K);
86 if (err)
87 goto err_lrc;
88 }
89
90 err = q->ops->init(q);
91 if (err)
92 goto err_lrc;
93
94 /*
95 * Normally the user vm holds an rpm ref to keep the device
96 * awake, and the context holds a ref for the vm, however for
97 * some engines we use the kernels migrate vm underneath which offers no
98 * such rpm ref, or we lack a vm. Make sure we keep a ref here, so we
99 * can perform GuC CT actions when needed. Caller is expected to have
100 * already grabbed the rpm ref outside any sensitive locks.
101 */
102 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !vm))
103 drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
104
105 return q;
106
107err_lrc:
108 for (i = i - 1; i >= 0; --i)
109 xe_lrc_finish(q->lrc + i);
110 kfree(q);
111 return ERR_PTR(err);
112}
113
114struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
115 u32 logical_mask, u16 width,
116 struct xe_hw_engine *hwe, u32 flags)
117{
118 struct xe_exec_queue *q;
119 int err;
120
121 if (vm) {
122 err = xe_vm_lock(vm, true);
123 if (err)
124 return ERR_PTR(err);
125 }
126 q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags);
127 if (vm)
128 xe_vm_unlock(vm);
129
130 return q;
131}
132
133struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
134 struct xe_vm *vm,
135 enum xe_engine_class class, u32 flags)
136{
137 struct xe_hw_engine *hwe, *hwe0 = NULL;
138 enum xe_hw_engine_id id;
139 u32 logical_mask = 0;
140
141 for_each_hw_engine(hwe, gt, id) {
142 if (xe_hw_engine_is_reserved(hwe))
143 continue;
144
145 if (hwe->class == class) {
146 logical_mask |= BIT(hwe->logical_instance);
147 if (!hwe0)
148 hwe0 = hwe;
149 }
150 }
151
152 if (!logical_mask)
153 return ERR_PTR(-ENODEV);
154
155 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags);
156}
157
158void xe_exec_queue_destroy(struct kref *ref)
159{
160 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
161 struct xe_exec_queue *eq, *next;
162
163 xe_exec_queue_last_fence_put_unlocked(q);
164 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
165 list_for_each_entry_safe(eq, next, &q->multi_gt_list,
166 multi_gt_link)
167 xe_exec_queue_put(eq);
168 }
169
170 q->ops->fini(q);
171}
172
173void xe_exec_queue_fini(struct xe_exec_queue *q)
174{
175 int i;
176
177 for (i = 0; i < q->width; ++i)
178 xe_lrc_finish(q->lrc + i);
179 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
180 xe_device_mem_access_put(gt_to_xe(q->gt));
181 if (q->vm)
182 xe_vm_put(q->vm);
183
184 kfree(q);
185}
186
187void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
188{
189 switch (q->class) {
190 case XE_ENGINE_CLASS_RENDER:
191 sprintf(q->name, "rcs%d", instance);
192 break;
193 case XE_ENGINE_CLASS_VIDEO_DECODE:
194 sprintf(q->name, "vcs%d", instance);
195 break;
196 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
197 sprintf(q->name, "vecs%d", instance);
198 break;
199 case XE_ENGINE_CLASS_COPY:
200 sprintf(q->name, "bcs%d", instance);
201 break;
202 case XE_ENGINE_CLASS_COMPUTE:
203 sprintf(q->name, "ccs%d", instance);
204 break;
205 case XE_ENGINE_CLASS_OTHER:
206 sprintf(q->name, "gsccs%d", instance);
207 break;
208 default:
209 XE_WARN_ON(q->class);
210 }
211}
212
213struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
214{
215 struct xe_exec_queue *q;
216
217 mutex_lock(&xef->exec_queue.lock);
218 q = xa_load(&xef->exec_queue.xa, id);
219 if (q)
220 xe_exec_queue_get(q);
221 mutex_unlock(&xef->exec_queue.lock);
222
223 return q;
224}
225
226enum xe_exec_queue_priority
227xe_exec_queue_device_get_max_priority(struct xe_device *xe)
228{
229 return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
230 XE_EXEC_QUEUE_PRIORITY_NORMAL;
231}
232
233static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
234 u64 value, bool create)
235{
236 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
237 return -EINVAL;
238
239 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
240 return -EPERM;
241
242 return q->ops->set_priority(q, value);
243}
244
245static bool xe_exec_queue_enforce_schedule_limit(void)
246{
247#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
248 return true;
249#else
250 return !capable(CAP_SYS_NICE);
251#endif
252}
253
254static void
255xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
256 enum xe_exec_queue_sched_prop prop,
257 u32 *min, u32 *max)
258{
259 switch (prop) {
260 case XE_EXEC_QUEUE_JOB_TIMEOUT:
261 *min = eclass->sched_props.job_timeout_min;
262 *max = eclass->sched_props.job_timeout_max;
263 break;
264 case XE_EXEC_QUEUE_TIMESLICE:
265 *min = eclass->sched_props.timeslice_min;
266 *max = eclass->sched_props.timeslice_max;
267 break;
268 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
269 *min = eclass->sched_props.preempt_timeout_min;
270 *max = eclass->sched_props.preempt_timeout_max;
271 break;
272 default:
273 break;
274 }
275#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
276 if (capable(CAP_SYS_NICE)) {
277 switch (prop) {
278 case XE_EXEC_QUEUE_JOB_TIMEOUT:
279 *min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
280 *max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
281 break;
282 case XE_EXEC_QUEUE_TIMESLICE:
283 *min = XE_HW_ENGINE_TIMESLICE_MIN;
284 *max = XE_HW_ENGINE_TIMESLICE_MAX;
285 break;
286 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
287 *min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
288 *max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
289 break;
290 default:
291 break;
292 }
293 }
294#endif
295}
296
297static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
298 u64 value, bool create)
299{
300 u32 min = 0, max = 0;
301
302 xe_exec_queue_get_prop_minmax(q->hwe->eclass,
303 XE_EXEC_QUEUE_TIMESLICE, &min, &max);
304
305 if (xe_exec_queue_enforce_schedule_limit() &&
306 !xe_hw_engine_timeout_in_range(value, min, max))
307 return -EINVAL;
308
309 return q->ops->set_timeslice(q, value);
310}
311
312typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
313 struct xe_exec_queue *q,
314 u64 value, bool create);
315
316static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
317 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
318 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
319};
320
321static int exec_queue_user_ext_set_property(struct xe_device *xe,
322 struct xe_exec_queue *q,
323 u64 extension,
324 bool create)
325{
326 u64 __user *address = u64_to_user_ptr(extension);
327 struct drm_xe_ext_set_property ext;
328 int err;
329 u32 idx;
330
331 err = __copy_from_user(&ext, address, sizeof(ext));
332 if (XE_IOCTL_DBG(xe, err))
333 return -EFAULT;
334
335 if (XE_IOCTL_DBG(xe, ext.property >=
336 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
337 XE_IOCTL_DBG(xe, ext.pad) ||
338 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
339 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
340 return -EINVAL;
341
342 idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
343 if (!exec_queue_set_property_funcs[idx])
344 return -EINVAL;
345
346 return exec_queue_set_property_funcs[idx](xe, q, ext.value, create);
347}
348
349typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
350 struct xe_exec_queue *q,
351 u64 extension,
352 bool create);
353
354static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs[] = {
355 [DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
356};
357
358#define MAX_USER_EXTENSIONS 16
359static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
360 u64 extensions, int ext_number, bool create)
361{
362 u64 __user *address = u64_to_user_ptr(extensions);
363 struct drm_xe_user_extension ext;
364 int err;
365 u32 idx;
366
367 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
368 return -E2BIG;
369
370 err = __copy_from_user(&ext, address, sizeof(ext));
371 if (XE_IOCTL_DBG(xe, err))
372 return -EFAULT;
373
374 if (XE_IOCTL_DBG(xe, ext.pad) ||
375 XE_IOCTL_DBG(xe, ext.name >=
376 ARRAY_SIZE(exec_queue_user_extension_funcs)))
377 return -EINVAL;
378
379 idx = array_index_nospec(ext.name,
380 ARRAY_SIZE(exec_queue_user_extension_funcs));
381 err = exec_queue_user_extension_funcs[idx](xe, q, extensions, create);
382 if (XE_IOCTL_DBG(xe, err))
383 return err;
384
385 if (ext.next_extension)
386 return exec_queue_user_extensions(xe, q, ext.next_extension,
387 ++ext_number, create);
388
389 return 0;
390}
391
392static const enum xe_engine_class user_to_xe_engine_class[] = {
393 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
394 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
395 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
396 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
397 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
398};
399
400static struct xe_hw_engine *
401find_hw_engine(struct xe_device *xe,
402 struct drm_xe_engine_class_instance eci)
403{
404 u32 idx;
405
406 if (eci.engine_class > ARRAY_SIZE(user_to_xe_engine_class))
407 return NULL;
408
409 if (eci.gt_id >= xe->info.gt_count)
410 return NULL;
411
412 idx = array_index_nospec(eci.engine_class,
413 ARRAY_SIZE(user_to_xe_engine_class));
414
415 return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
416 user_to_xe_engine_class[idx],
417 eci.engine_instance, true);
418}
419
420static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
421 struct drm_xe_engine_class_instance *eci,
422 u16 width, u16 num_placements)
423{
424 struct xe_hw_engine *hwe;
425 enum xe_hw_engine_id id;
426 u32 logical_mask = 0;
427
428 if (XE_IOCTL_DBG(xe, width != 1))
429 return 0;
430 if (XE_IOCTL_DBG(xe, num_placements != 1))
431 return 0;
432 if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
433 return 0;
434
435 eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
436
437 for_each_hw_engine(hwe, gt, id) {
438 if (xe_hw_engine_is_reserved(hwe))
439 continue;
440
441 if (hwe->class ==
442 user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
443 logical_mask |= BIT(hwe->logical_instance);
444 }
445
446 return logical_mask;
447}
448
449static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
450 struct drm_xe_engine_class_instance *eci,
451 u16 width, u16 num_placements)
452{
453 int len = width * num_placements;
454 int i, j, n;
455 u16 class;
456 u16 gt_id;
457 u32 return_mask = 0, prev_mask;
458
459 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
460 len > 1))
461 return 0;
462
463 for (i = 0; i < width; ++i) {
464 u32 current_mask = 0;
465
466 for (j = 0; j < num_placements; ++j) {
467 struct xe_hw_engine *hwe;
468
469 n = j * width + i;
470
471 hwe = find_hw_engine(xe, eci[n]);
472 if (XE_IOCTL_DBG(xe, !hwe))
473 return 0;
474
475 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
476 return 0;
477
478 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
479 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
480 return 0;
481
482 class = eci[n].engine_class;
483 gt_id = eci[n].gt_id;
484
485 if (width == 1 || !i)
486 return_mask |= BIT(eci[n].engine_instance);
487 current_mask |= BIT(eci[n].engine_instance);
488 }
489
490 /* Parallel submissions must be logically contiguous */
491 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
492 return 0;
493
494 prev_mask = current_mask;
495 }
496
497 return return_mask;
498}
499
500int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
501 struct drm_file *file)
502{
503 struct xe_device *xe = to_xe_device(dev);
504 struct xe_file *xef = to_xe_file(file);
505 struct drm_xe_exec_queue_create *args = data;
506 struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
507 struct drm_xe_engine_class_instance __user *user_eci =
508 u64_to_user_ptr(args->instances);
509 struct xe_hw_engine *hwe;
510 struct xe_vm *vm, *migrate_vm;
511 struct xe_gt *gt;
512 struct xe_exec_queue *q = NULL;
513 u32 logical_mask;
514 u32 id;
515 u32 len;
516 int err;
517
518 if (XE_IOCTL_DBG(xe, args->flags) ||
519 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
520 return -EINVAL;
521
522 len = args->width * args->num_placements;
523 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
524 return -EINVAL;
525
526 err = __copy_from_user(eci, user_eci,
527 sizeof(struct drm_xe_engine_class_instance) *
528 len);
529 if (XE_IOCTL_DBG(xe, err))
530 return -EFAULT;
531
532 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
533 return -EINVAL;
534
535 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
536 for_each_gt(gt, xe, id) {
537 struct xe_exec_queue *new;
538
539 if (xe_gt_is_media_type(gt))
540 continue;
541
542 eci[0].gt_id = gt->info.id;
543 logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
544 args->width,
545 args->num_placements);
546 if (XE_IOCTL_DBG(xe, !logical_mask))
547 return -EINVAL;
548
549 hwe = find_hw_engine(xe, eci[0]);
550 if (XE_IOCTL_DBG(xe, !hwe))
551 return -EINVAL;
552
553 /* The migration vm doesn't hold rpm ref */
554 xe_device_mem_access_get(xe);
555
556 migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
557 new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
558 args->width, hwe,
559 EXEC_QUEUE_FLAG_PERSISTENT |
560 EXEC_QUEUE_FLAG_VM |
561 (id ?
562 EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD :
563 0));
564
565 xe_device_mem_access_put(xe); /* now held by engine */
566
567 xe_vm_put(migrate_vm);
568 if (IS_ERR(new)) {
569 err = PTR_ERR(new);
570 if (q)
571 goto put_exec_queue;
572 return err;
573 }
574 if (id == 0)
575 q = new;
576 else
577 list_add_tail(&new->multi_gt_list,
578 &q->multi_gt_link);
579 }
580 } else {
581 gt = xe_device_get_gt(xe, eci[0].gt_id);
582 logical_mask = calc_validate_logical_mask(xe, gt, eci,
583 args->width,
584 args->num_placements);
585 if (XE_IOCTL_DBG(xe, !logical_mask))
586 return -EINVAL;
587
588 hwe = find_hw_engine(xe, eci[0]);
589 if (XE_IOCTL_DBG(xe, !hwe))
590 return -EINVAL;
591
592 vm = xe_vm_lookup(xef, args->vm_id);
593 if (XE_IOCTL_DBG(xe, !vm))
594 return -ENOENT;
595
596 err = down_read_interruptible(&vm->lock);
597 if (err) {
598 xe_vm_put(vm);
599 return err;
600 }
601
602 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
603 up_read(&vm->lock);
604 xe_vm_put(vm);
605 return -ENOENT;
606 }
607
608 q = xe_exec_queue_create(xe, vm, logical_mask,
609 args->width, hwe, 0);
610 up_read(&vm->lock);
611 xe_vm_put(vm);
612 if (IS_ERR(q))
613 return PTR_ERR(q);
614
615 if (xe_vm_in_preempt_fence_mode(vm)) {
616 q->compute.context = dma_fence_context_alloc(1);
617 spin_lock_init(&q->compute.lock);
618
619 err = xe_vm_add_compute_exec_queue(vm, q);
620 if (XE_IOCTL_DBG(xe, err))
621 goto put_exec_queue;
622 }
623 }
624
625 if (args->extensions) {
626 err = exec_queue_user_extensions(xe, q, args->extensions, 0, true);
627 if (XE_IOCTL_DBG(xe, err))
628 goto kill_exec_queue;
629 }
630
631 mutex_lock(&xef->exec_queue.lock);
632 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
633 mutex_unlock(&xef->exec_queue.lock);
634 if (err)
635 goto kill_exec_queue;
636
637 args->exec_queue_id = id;
638
639 return 0;
640
641kill_exec_queue:
642 xe_exec_queue_kill(q);
643put_exec_queue:
644 xe_exec_queue_put(q);
645 return err;
646}
647
648int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
649 struct drm_file *file)
650{
651 struct xe_device *xe = to_xe_device(dev);
652 struct xe_file *xef = to_xe_file(file);
653 struct drm_xe_exec_queue_get_property *args = data;
654 struct xe_exec_queue *q;
655 int ret;
656
657 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
658 return -EINVAL;
659
660 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
661 if (XE_IOCTL_DBG(xe, !q))
662 return -ENOENT;
663
664 switch (args->property) {
665 case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
666 args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED);
667 ret = 0;
668 break;
669 default:
670 ret = -EINVAL;
671 }
672
673 xe_exec_queue_put(q);
674
675 return ret;
676}
677
678/**
679 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
680 * @q: The exec_queue
681 *
682 * Return: True if the exec_queue is long-running, false otherwise.
683 */
684bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
685{
686 return q->vm && xe_vm_in_lr_mode(q->vm) &&
687 !(q->flags & EXEC_QUEUE_FLAG_VM);
688}
689
690static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
691{
692 return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1;
693}
694
695/**
696 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
697 * @q: The exec_queue
698 *
699 * Return: True if the exec_queue's ring is full, false otherwise.
700 */
701bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
702{
703 struct xe_lrc *lrc = q->lrc;
704 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
705
706 return xe_exec_queue_num_job_inflight(q) >= max_job;
707}
708
709/**
710 * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
711 * @q: The exec_queue
712 *
713 * FIXME: Need to determine what to use as the short-lived
714 * timeline lock for the exec_queues, so that the return value
715 * of this function becomes more than just an advisory
716 * snapshot in time. The timeline lock must protect the
717 * seqno from racing submissions on the same exec_queue.
718 * Typically vm->resv, but user-created timeline locks use the migrate vm
719 * and never grabs the migrate vm->resv so we have a race there.
720 *
721 * Return: True if the exec_queue is idle, false otherwise.
722 */
723bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
724{
725 if (xe_exec_queue_is_parallel(q)) {
726 int i;
727
728 for (i = 0; i < q->width; ++i) {
729 if (xe_lrc_seqno(&q->lrc[i]) !=
730 q->lrc[i].fence_ctx.next_seqno - 1)
731 return false;
732 }
733
734 return true;
735 }
736
737 return xe_lrc_seqno(&q->lrc[0]) ==
738 q->lrc[0].fence_ctx.next_seqno - 1;
739}
740
741void xe_exec_queue_kill(struct xe_exec_queue *q)
742{
743 struct xe_exec_queue *eq = q, *next;
744
745 list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
746 multi_gt_link) {
747 q->ops->kill(eq);
748 xe_vm_remove_compute_exec_queue(q->vm, eq);
749 }
750
751 q->ops->kill(q);
752 xe_vm_remove_compute_exec_queue(q->vm, q);
753}
754
755int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
756 struct drm_file *file)
757{
758 struct xe_device *xe = to_xe_device(dev);
759 struct xe_file *xef = to_xe_file(file);
760 struct drm_xe_exec_queue_destroy *args = data;
761 struct xe_exec_queue *q;
762
763 if (XE_IOCTL_DBG(xe, args->pad) ||
764 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
765 return -EINVAL;
766
767 mutex_lock(&xef->exec_queue.lock);
768 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
769 mutex_unlock(&xef->exec_queue.lock);
770 if (XE_IOCTL_DBG(xe, !q))
771 return -ENOENT;
772
773 xe_exec_queue_kill(q);
774
775 trace_xe_exec_queue_close(q);
776 xe_exec_queue_put(q);
777
778 return 0;
779}
780
781static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
782 struct xe_vm *vm)
783{
784 if (q->flags & EXEC_QUEUE_FLAG_VM)
785 lockdep_assert_held(&vm->lock);
786 else
787 xe_vm_assert_held(vm);
788}
789
790/**
791 * xe_exec_queue_last_fence_put() - Drop ref to last fence
792 * @q: The exec queue
793 * @vm: The VM the engine does a bind or exec for
794 */
795void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
796{
797 xe_exec_queue_last_fence_lockdep_assert(q, vm);
798
799 if (q->last_fence) {
800 dma_fence_put(q->last_fence);
801 q->last_fence = NULL;
802 }
803}
804
805/**
806 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
807 * @q: The exec queue
808 *
809 * Only safe to be called from xe_exec_queue_destroy().
810 */
811void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
812{
813 if (q->last_fence) {
814 dma_fence_put(q->last_fence);
815 q->last_fence = NULL;
816 }
817}
818
819/**
820 * xe_exec_queue_last_fence_get() - Get last fence
821 * @q: The exec queue
822 * @vm: The VM the engine does a bind or exec for
823 *
824 * Get last fence, takes a ref
825 *
826 * Returns: last fence if not signaled, dma fence stub if signaled
827 */
828struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
829 struct xe_vm *vm)
830{
831 struct dma_fence *fence;
832
833 xe_exec_queue_last_fence_lockdep_assert(q, vm);
834
835 if (q->last_fence &&
836 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
837 xe_exec_queue_last_fence_put(q, vm);
838
839 fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
840 dma_fence_get(fence);
841 return fence;
842}
843
844/**
845 * xe_exec_queue_last_fence_set() - Set last fence
846 * @q: The exec queue
847 * @vm: The VM the engine does a bind or exec for
848 * @fence: The fence
849 *
850 * Set the last fence for the engine. Increases reference count for fence, when
851 * closing engine xe_exec_queue_last_fence_put should be called.
852 */
853void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
854 struct dma_fence *fence)
855{
856 xe_exec_queue_last_fence_lockdep_assert(q, vm);
857
858 xe_exec_queue_last_fence_put(q, vm);
859 q->last_fence = dma_fence_get(fence);
860}