Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2020-2023 Intel Corporation
  4 */
  5
  6#include <drm/drm_file.h>
  7
  8#include <linux/bitfield.h>
  9#include <linux/highmem.h>
 10#include <linux/pci.h>
 11#include <linux/module.h>
 12#include <uapi/drm/ivpu_accel.h>
 13
 14#include "ivpu_drv.h"
 
 15#include "ivpu_hw.h"
 16#include "ivpu_ipc.h"
 17#include "ivpu_job.h"
 18#include "ivpu_jsm_msg.h"
 19#include "ivpu_pm.h"
 
 
 20
 21#define CMD_BUF_IDX	     0
 22#define JOB_ID_JOB_MASK	     GENMASK(7, 0)
 23#define JOB_ID_CONTEXT_MASK  GENMASK(31, 8)
 24#define JOB_MAX_BUFFER_COUNT 65535
 25
 26static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
 27{
 28	ivpu_hw_reg_db_set(vdev, cmdq->db_id);
 29}
 30
 31static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32{
 33	struct ivpu_device *vdev = file_priv->vdev;
 34	struct vpu_job_queue_header *jobq_header;
 35	struct ivpu_cmdq *cmdq;
 
 36
 37	cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
 38	if (!cmdq)
 39		return NULL;
 40
 41	cmdq->mem = ivpu_bo_alloc_internal(vdev, 0, SZ_4K, DRM_IVPU_BO_WC);
 42	if (!cmdq->mem)
 43		goto cmdq_free;
 
 
 
 44
 45	cmdq->db_id = file_priv->ctx.id + engine * ivpu_get_context_count(vdev);
 46	cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
 47				  sizeof(struct vpu_job_queue_entry));
 
 
 
 48
 49	cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
 50	jobq_header = &cmdq->jobq->header;
 51	jobq_header->engine_idx = engine;
 52	jobq_header->head = 0;
 53	jobq_header->tail = 0;
 54	wmb(); /* Flush WC buffer for jobq->header */
 
 55
 56	return cmdq;
 57
 58cmdq_free:
 
 
 
 
 59	kfree(cmdq);
 60	return NULL;
 61}
 62
 63static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
 64{
 65	if (!cmdq)
 66		return;
 67
 68	ivpu_bo_free_internal(cmdq->mem);
 
 
 69	kfree(cmdq);
 70}
 71
 72static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73{
 74	struct ivpu_device *vdev = file_priv->vdev;
 75	struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76	int ret;
 77
 78	lockdep_assert_held(&file_priv->lock);
 79
 80	if (!cmdq) {
 81		cmdq = ivpu_cmdq_alloc(file_priv, engine);
 82		if (!cmdq)
 83			return NULL;
 84		file_priv->cmdq[engine] = cmdq;
 
 
 
 
 
 
 
 
 
 85	}
 86
 87	if (cmdq->db_registered)
 88		return cmdq;
 89
 90	ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
 91				   cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
 
 
 
 
 
 92	if (ret)
 93		return NULL;
 94
 95	cmdq->db_registered = true;
 96
 97	return cmdq;
 98}
 99
100static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine)
101{
102	struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
 
103
104	lockdep_assert_held(&file_priv->lock);
105
106	if (cmdq) {
107		file_priv->cmdq[engine] = NULL;
108		if (cmdq->db_registered)
109			ivpu_jsm_unregister_db(file_priv->vdev, cmdq->db_id);
110
111		ivpu_cmdq_free(file_priv, cmdq);
 
 
 
 
 
112	}
 
 
 
 
 
 
113}
114
115void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
116{
117	int i;
 
 
118
119	lockdep_assert_held(&file_priv->lock);
120
121	for (i = 0; i < IVPU_NUM_ENGINES; i++)
122		ivpu_cmdq_release_locked(file_priv, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123}
124
125/*
126 * Mark the doorbell as unregistered and reset job queue pointers.
127 * This function needs to be called when the VPU hardware is restarted
128 * and FW loses job queue state. The next time job queue is used it
129 * will be registered again.
130 */
131static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
132{
133	struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
 
134
135	lockdep_assert_held(&file_priv->lock);
136
137	if (cmdq) {
138		cmdq->db_registered = false;
139		cmdq->jobq->header.head = 0;
140		cmdq->jobq->header.tail = 0;
141		wmb(); /* Flush WC buffer for jobq header */
142	}
143}
144
145static void ivpu_cmdq_reset_all(struct ivpu_file_priv *file_priv)
 
 
 
 
 
 
146{
147	int i;
 
148
149	mutex_lock(&file_priv->lock);
150
151	for (i = 0; i < IVPU_NUM_ENGINES; i++)
152		ivpu_cmdq_reset_locked(file_priv, i);
153
154	mutex_unlock(&file_priv->lock);
155}
156
157void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
158{
159	struct ivpu_file_priv *file_priv;
160	unsigned long ctx_id;
161
162	mutex_lock(&vdev->context_list_lock);
163
164	xa_for_each(&vdev->context_xa, ctx_id, file_priv)
165		ivpu_cmdq_reset_all(file_priv);
166
167	mutex_unlock(&vdev->context_list_lock);
 
168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169}
170
171static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
172{
173	struct ivpu_device *vdev = job->vdev;
174	struct vpu_job_queue_header *header = &cmdq->jobq->header;
175	struct vpu_job_queue_entry *entry;
176	u32 tail = READ_ONCE(header->tail);
177	u32 next_entry = (tail + 1) % cmdq->entry_count;
178
179	/* Check if there is space left in job queue */
180	if (next_entry == header->head) {
181		ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
182			 job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
183		return -EBUSY;
184	}
185
186	entry = &cmdq->jobq->job[tail];
187	entry->batch_buf_addr = job->cmd_buf_vpu_addr;
188	entry->job_id = job->job_id;
189	entry->flags = 0;
190	if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
191		entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192	wmb(); /* Ensure that tail is updated after filling entry */
193	header->tail = next_entry;
194	wmb(); /* Flush WC buffer for jobq header */
195
196	return 0;
197}
198
199struct ivpu_fence {
200	struct dma_fence base;
201	spinlock_t lock; /* protects base */
202	struct ivpu_device *vdev;
203};
204
205static inline struct ivpu_fence *to_vpu_fence(struct dma_fence *fence)
206{
207	return container_of(fence, struct ivpu_fence, base);
208}
209
210static const char *ivpu_fence_get_driver_name(struct dma_fence *fence)
211{
212	return DRIVER_NAME;
213}
214
215static const char *ivpu_fence_get_timeline_name(struct dma_fence *fence)
216{
217	struct ivpu_fence *ivpu_fence = to_vpu_fence(fence);
218
219	return dev_name(ivpu_fence->vdev->drm.dev);
220}
221
222static const struct dma_fence_ops ivpu_fence_ops = {
223	.get_driver_name = ivpu_fence_get_driver_name,
224	.get_timeline_name = ivpu_fence_get_timeline_name,
225};
226
227static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
228{
229	struct ivpu_fence *fence;
230
231	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
232	if (!fence)
233		return NULL;
234
235	fence->vdev = vdev;
236	spin_lock_init(&fence->lock);
237	dma_fence_init(&fence->base, &ivpu_fence_ops, &fence->lock, dma_fence_context_alloc(1), 1);
238
239	return &fence->base;
240}
241
242static void ivpu_job_destroy(struct ivpu_job *job)
243{
244	struct ivpu_device *vdev = job->vdev;
245	u32 i;
246
247	ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
248		 job->job_id, job->file_priv->ctx.id, job->engine_idx);
249
250	for (i = 0; i < job->bo_count; i++)
251		if (job->bos[i])
252			drm_gem_object_put(&job->bos[i]->base.base);
253
254	dma_fence_put(job->done_fence);
255	ivpu_file_priv_put(&job->file_priv);
256	kfree(job);
257}
258
259static struct ivpu_job *
260ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
261{
262	struct ivpu_device *vdev = file_priv->vdev;
263	struct ivpu_job *job;
264
265	job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
266	if (!job)
267		return NULL;
268
269	job->vdev = vdev;
270	job->engine_idx = engine_idx;
271	job->bo_count = bo_count;
272	job->done_fence = ivpu_fence_create(vdev);
273	if (!job->done_fence) {
274		ivpu_warn_ratelimited(vdev, "Failed to create a fence\n");
275		goto err_free_job;
276	}
277
278	job->file_priv = ivpu_file_priv_get(file_priv);
279
 
280	ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
281	return job;
282
283err_free_job:
284	kfree(job);
285	return NULL;
286}
287
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
289{
290	struct ivpu_job *job;
291
292	job = xa_erase(&vdev->submitted_jobs_xa, job_id);
293	if (!job)
294		return -ENOENT;
295
296	if (job->file_priv->has_mmu_faults)
297		job_status = DRM_IVPU_JOB_STATUS_ABORTED;
298
299	job->bos[CMD_BUF_IDX]->job_status = job_status;
300	dma_fence_signal(job->done_fence);
301
 
302	ivpu_dbg(vdev, JOB, "Job complete:  id %3u ctx %2d engine %d status 0x%x\n",
303		 job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
304
305	ivpu_job_destroy(job);
306	ivpu_stop_job_timeout_detection(vdev);
307
308	ivpu_rpm_put(vdev);
309	return 0;
310}
311
312void ivpu_jobs_abort_all(struct ivpu_device *vdev)
313{
314	struct ivpu_job *job;
315	unsigned long id;
316
317	xa_for_each(&vdev->submitted_jobs_xa, id, job)
318		ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
319}
320
321static int ivpu_job_submit(struct ivpu_job *job)
322{
323	struct ivpu_file_priv *file_priv = job->file_priv;
324	struct ivpu_device *vdev = job->vdev;
325	struct xa_limit job_id_range;
326	struct ivpu_cmdq *cmdq;
 
327	int ret;
328
329	ret = ivpu_rpm_get(vdev);
330	if (ret < 0)
331		return ret;
332
333	mutex_lock(&file_priv->lock);
334
335	cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
336	if (!cmdq) {
337		ivpu_warn_ratelimited(vdev, "Failed get job queue, ctx %d engine %d\n",
338				      file_priv->ctx.id, job->engine_idx);
339		ret = -EINVAL;
340		goto err_unlock_file_priv;
341	}
342
343	job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
344	job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
345
346	xa_lock(&vdev->submitted_jobs_xa);
347	ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
348	if (ret) {
 
 
349		ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
350			 file_priv->ctx.id);
351		ret = -EBUSY;
352		goto err_unlock_submitted_jobs_xa;
353	}
354
355	ret = ivpu_cmdq_push_job(cmdq, job);
356	if (ret)
357		goto err_erase_xa;
358
359	ivpu_start_job_timeout_detection(vdev);
360
361	if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
362		cmdq->jobq->header.head = cmdq->jobq->header.tail;
363		wmb(); /* Flush WC buffer for jobq header */
364	} else {
365		ivpu_cmdq_ring_db(vdev, cmdq);
 
 
366	}
367
368	ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n",
369		 job->job_id, file_priv->ctx.id, job->engine_idx,
 
370		 job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
371
372	xa_unlock(&vdev->submitted_jobs_xa);
373
374	mutex_unlock(&file_priv->lock);
375
376	if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
377		ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
378
379	return 0;
380
381err_erase_xa:
382	__xa_erase(&vdev->submitted_jobs_xa, job->job_id);
383err_unlock_submitted_jobs_xa:
384	xa_unlock(&vdev->submitted_jobs_xa);
385err_unlock_file_priv:
386	mutex_unlock(&file_priv->lock);
387	ivpu_rpm_put(vdev);
388	return ret;
389}
390
391static int
392ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
393				u32 buf_count, u32 commands_offset)
394{
395	struct ivpu_file_priv *file_priv = file->driver_priv;
396	struct ivpu_device *vdev = file_priv->vdev;
397	struct ww_acquire_ctx acquire_ctx;
398	enum dma_resv_usage usage;
399	struct ivpu_bo *bo;
400	int ret;
401	u32 i;
402
403	for (i = 0; i < buf_count; i++) {
404		struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]);
405
406		if (!obj)
407			return -ENOENT;
408
409		job->bos[i] = to_ivpu_bo(obj);
410
411		ret = ivpu_bo_pin(job->bos[i]);
412		if (ret)
413			return ret;
414	}
415
416	bo = job->bos[CMD_BUF_IDX];
417	if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) {
418		ivpu_warn(vdev, "Buffer is already in use\n");
419		return -EBUSY;
420	}
421
422	if (commands_offset >= ivpu_bo_size(bo)) {
423		ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
424		return -EINVAL;
425	}
426
427	job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
428
429	ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
430					&acquire_ctx);
431	if (ret) {
432		ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
433		return ret;
434	}
435
436	for (i = 0; i < buf_count; i++) {
437		ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1);
438		if (ret) {
439			ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
440			goto unlock_reservations;
441		}
442	}
443
444	for (i = 0; i < buf_count; i++) {
445		usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP;
446		dma_resv_add_fence(job->bos[i]->base.base.resv, job->done_fence, usage);
447	}
448
449unlock_reservations:
450	drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
451
452	wmb(); /* Flush write combining buffers */
453
454	return ret;
455}
456
 
 
 
 
 
 
 
 
457int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
458{
459	struct ivpu_file_priv *file_priv = file->driver_priv;
460	struct ivpu_device *vdev = file_priv->vdev;
461	struct drm_ivpu_submit *params = data;
462	struct ivpu_job *job;
463	u32 *buf_handles;
464	int idx, ret;
 
465
466	if (params->engine > DRM_IVPU_ENGINE_COPY)
467		return -EINVAL;
468
469	if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
470		return -EINVAL;
471
472	if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
473		return -EINVAL;
474
475	if (!IS_ALIGNED(params->commands_offset, 8))
476		return -EINVAL;
477
478	if (!file_priv->ctx.id)
479		return -EINVAL;
480
481	if (file_priv->has_mmu_faults)
482		return -EBADFD;
483
484	buf_handles = kcalloc(params->buffer_count, sizeof(u32), GFP_KERNEL);
485	if (!buf_handles)
486		return -ENOMEM;
487
488	ret = copy_from_user(buf_handles,
489			     (void __user *)params->buffers_ptr,
490			     params->buffer_count * sizeof(u32));
491	if (ret) {
492		ret = -EFAULT;
493		goto err_free_handles;
494	}
495
496	if (!drm_dev_enter(&vdev->drm, &idx)) {
497		ret = -ENODEV;
498		goto err_free_handles;
499	}
500
501	ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
502		 file_priv->ctx.id, params->buffer_count);
503
504	job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
505	if (!job) {
506		ivpu_err(vdev, "Failed to create job\n");
507		ret = -ENOMEM;
508		goto err_exit_dev;
509	}
510
511	ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
512					      params->commands_offset);
513	if (ret) {
514		ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
515		goto err_destroy_job;
516	}
517
 
 
518	down_read(&vdev->pm->reset_lock);
519	ret = ivpu_job_submit(job);
520	up_read(&vdev->pm->reset_lock);
521	if (ret)
522		goto err_signal_fence;
523
524	drm_dev_exit(idx);
525	kfree(buf_handles);
526	return ret;
527
528err_signal_fence:
529	dma_fence_signal(job->done_fence);
530err_destroy_job:
531	ivpu_job_destroy(job);
532err_exit_dev:
533	drm_dev_exit(idx);
534err_free_handles:
535	kfree(buf_handles);
536	return ret;
537}
538
539static void
540ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
541		       struct vpu_jsm_msg *jsm_msg)
542{
543	struct vpu_ipc_msg_payload_job_done *payload;
544	int ret;
545
546	if (!jsm_msg) {
547		ivpu_err(vdev, "IPC message has no JSM payload\n");
548		return;
549	}
550
551	if (jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
552		ivpu_err(vdev, "Invalid JSM message result: %d\n", jsm_msg->result);
553		return;
554	}
555
556	payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
557	ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
558	if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
559		ivpu_start_job_timeout_detection(vdev);
560}
561
562void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
563{
564	ivpu_ipc_consumer_add(vdev, &vdev->job_done_consumer,
565			      VPU_IPC_CHAN_JOB_RET, ivpu_job_done_callback);
566}
567
568void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
569{
570	ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
571}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2020-2024 Intel Corporation
  4 */
  5
  6#include <drm/drm_file.h>
  7
  8#include <linux/bitfield.h>
  9#include <linux/highmem.h>
 10#include <linux/pci.h>
 11#include <linux/module.h>
 12#include <uapi/drm/ivpu_accel.h>
 13
 14#include "ivpu_drv.h"
 15#include "ivpu_fw.h"
 16#include "ivpu_hw.h"
 17#include "ivpu_ipc.h"
 18#include "ivpu_job.h"
 19#include "ivpu_jsm_msg.h"
 20#include "ivpu_pm.h"
 21#include "ivpu_trace.h"
 22#include "vpu_boot_api.h"
 23
 24#define CMD_BUF_IDX	     0
 
 
 25#define JOB_MAX_BUFFER_COUNT 65535
 26
 27static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
 28{
 29	ivpu_hw_db_set(vdev, cmdq->db_id);
 30}
 31
 32static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
 33					  struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
 34{
 35	u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
 36	u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
 37
 38	if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW ||
 39	    ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
 40		return 0;
 41
 42	cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user,
 43						   primary_size, DRM_IVPU_BO_WC);
 44	if (!cmdq->primary_preempt_buf) {
 45		ivpu_err(vdev, "Failed to create primary preemption buffer\n");
 46		return -ENOMEM;
 47	}
 48
 49	cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma,
 50						     secondary_size, DRM_IVPU_BO_WC);
 51	if (!cmdq->secondary_preempt_buf) {
 52		ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
 53		goto err_free_primary;
 54	}
 55
 56	return 0;
 57
 58err_free_primary:
 59	ivpu_bo_free(cmdq->primary_preempt_buf);
 60	cmdq->primary_preempt_buf = NULL;
 61	return -ENOMEM;
 62}
 63
 64static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
 65					 struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
 66{
 67	if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
 68		return;
 69
 70	if (cmdq->primary_preempt_buf)
 71		ivpu_bo_free(cmdq->primary_preempt_buf);
 72	if (cmdq->secondary_preempt_buf)
 73		ivpu_bo_free(cmdq->secondary_preempt_buf);
 74}
 75
 76static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
 77{
 78	struct ivpu_device *vdev = file_priv->vdev;
 
 79	struct ivpu_cmdq *cmdq;
 80	int ret;
 81
 82	cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
 83	if (!cmdq)
 84		return NULL;
 85
 86	ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
 87			      GFP_KERNEL);
 88	if (ret < 0) {
 89		ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
 90		goto err_free_cmdq;
 91	}
 92
 93	ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
 94			      &file_priv->cmdq_id_next, GFP_KERNEL);
 95	if (ret < 0) {
 96		ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret);
 97		goto err_erase_db_xa;
 98	}
 99
100	cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
101	if (!cmdq->mem)
102		goto err_erase_cmdq_xa;
103
104	ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
105	if (ret)
106		ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
107
108	return cmdq;
109
110err_erase_cmdq_xa:
111	xa_erase(&file_priv->cmdq_xa, cmdq->id);
112err_erase_db_xa:
113	xa_erase(&vdev->db_xa, cmdq->db_id);
114err_free_cmdq:
115	kfree(cmdq);
116	return NULL;
117}
118
119static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
120{
121	if (!cmdq)
122		return;
123
124	ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq);
125	ivpu_bo_free(cmdq->mem);
126	xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
127	kfree(cmdq);
128}
129
130static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine,
131			      u8 priority)
132{
133	struct ivpu_device *vdev = file_priv->vdev;
134	int ret;
135
136	ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id,
137				       task_pid_nr(current), engine,
138				       cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
139	if (ret)
140		return ret;
141
142	ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id,
143							priority);
144	if (ret)
145		return ret;
146
147	return 0;
148}
149
150static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
151{
152	struct ivpu_device *vdev = file_priv->vdev;
153	int ret;
154
155	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
156		ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id,
157					       cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
158	else
159		ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
160					   cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
161
162	if (!ret)
163		ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n",
164			 cmdq->db_id, cmdq->id, file_priv->ctx.id);
165
166	return ret;
167}
168
169static int
170ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 priority)
171{
172	struct ivpu_device *vdev = file_priv->vdev;
173	struct vpu_job_queue_header *jobq_header;
174	int ret;
175
176	lockdep_assert_held(&file_priv->lock);
177
178	if (cmdq->db_registered)
179		return 0;
180
181	cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
182				  sizeof(struct vpu_job_queue_entry));
183
184	cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
185	jobq_header = &cmdq->jobq->header;
186	jobq_header->engine_idx = VPU_ENGINE_COMPUTE;
187	jobq_header->head = 0;
188	jobq_header->tail = 0;
189	if (ivpu_test_mode & IVPU_TEST_MODE_TURBO) {
190		ivpu_dbg(vdev, JOB, "Turbo mode enabled");
191		jobq_header->flags = VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
192	}
193
194	wmb(); /* Flush WC buffer for jobq->header */
 
195
196	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
197		ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, priority);
198		if (ret)
199			return ret;
200	}
201
202	ret = ivpu_register_db(file_priv, cmdq);
203	if (ret)
204		return ret;
205
206	cmdq->db_registered = true;
207
208	return 0;
209}
210
211static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
212{
213	struct ivpu_device *vdev = file_priv->vdev;
214	int ret;
215
216	lockdep_assert_held(&file_priv->lock);
217
218	if (!cmdq->db_registered)
219		return 0;
 
 
220
221	cmdq->db_registered = false;
222
223	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
224		ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
225		if (!ret)
226			ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id);
227	}
228
229	ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
230	if (!ret)
231		ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
232
233	return 0;
234}
235
236static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority)
237{
238	struct ivpu_cmdq *cmdq;
239	unsigned long cmdq_id;
240	int ret;
241
242	lockdep_assert_held(&file_priv->lock);
243
244	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
245		if (cmdq->priority == priority)
246			break;
247
248	if (!cmdq) {
249		cmdq = ivpu_cmdq_alloc(file_priv);
250		if (!cmdq)
251			return NULL;
252		cmdq->priority = priority;
253	}
254
255	ret = ivpu_cmdq_init(file_priv, cmdq, priority);
256	if (ret)
257		return NULL;
258
259	return cmdq;
260}
261
262void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
 
 
 
 
 
 
263{
264	struct ivpu_cmdq *cmdq;
265	unsigned long cmdq_id;
266
267	lockdep_assert_held(&file_priv->lock);
268
269	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
270		xa_erase(&file_priv->cmdq_xa, cmdq_id);
271		ivpu_cmdq_fini(file_priv, cmdq);
272		ivpu_cmdq_free(file_priv, cmdq);
 
273	}
274}
275
276/*
277 * Mark the doorbell as unregistered
278 * This function needs to be called when the VPU hardware is restarted
279 * and FW loses job queue state. The next time job queue is used it
280 * will be registered again.
281 */
282static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
283{
284	struct ivpu_cmdq *cmdq;
285	unsigned long cmdq_id;
286
287	mutex_lock(&file_priv->lock);
288
289	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
290		cmdq->db_registered = false;
291
292	mutex_unlock(&file_priv->lock);
293}
294
295void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
296{
297	struct ivpu_file_priv *file_priv;
298	unsigned long ctx_id;
299
300	mutex_lock(&vdev->context_list_lock);
301
302	xa_for_each(&vdev->context_xa, ctx_id, file_priv)
303		ivpu_cmdq_reset(file_priv);
304
305	mutex_unlock(&vdev->context_list_lock);
306}
307
308static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
309{
310	struct ivpu_cmdq *cmdq;
311	unsigned long cmdq_id;
312
313	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
314		ivpu_cmdq_fini(file_priv, cmdq);
315}
316
317void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
318{
319	struct ivpu_device *vdev = file_priv->vdev;
320
321	lockdep_assert_held(&file_priv->lock);
322
323	ivpu_cmdq_fini_all(file_priv);
324
325	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
326		ivpu_jsm_context_release(vdev, file_priv->ctx.id);
327}
328
329static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
330{
331	struct ivpu_device *vdev = job->vdev;
332	struct vpu_job_queue_header *header = &cmdq->jobq->header;
333	struct vpu_job_queue_entry *entry;
334	u32 tail = READ_ONCE(header->tail);
335	u32 next_entry = (tail + 1) % cmdq->entry_count;
336
337	/* Check if there is space left in job queue */
338	if (next_entry == header->head) {
339		ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n",
340			 job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail);
341		return -EBUSY;
342	}
343
344	entry = &cmdq->jobq->slot[tail].job;
345	entry->batch_buf_addr = job->cmd_buf_vpu_addr;
346	entry->job_id = job->job_id;
347	entry->flags = 0;
348	if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
349		entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
350
351	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
352		if (cmdq->primary_preempt_buf) {
353			entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
354			entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
355		}
356
357		if (cmdq->secondary_preempt_buf) {
358			entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
359			entry->secondary_preempt_buf_size =
360				ivpu_bo_size(cmdq->secondary_preempt_buf);
361		}
362	}
363
364	wmb(); /* Ensure that tail is updated after filling entry */
365	header->tail = next_entry;
366	wmb(); /* Flush WC buffer for jobq header */
367
368	return 0;
369}
370
371struct ivpu_fence {
372	struct dma_fence base;
373	spinlock_t lock; /* protects base */
374	struct ivpu_device *vdev;
375};
376
377static inline struct ivpu_fence *to_vpu_fence(struct dma_fence *fence)
378{
379	return container_of(fence, struct ivpu_fence, base);
380}
381
382static const char *ivpu_fence_get_driver_name(struct dma_fence *fence)
383{
384	return DRIVER_NAME;
385}
386
387static const char *ivpu_fence_get_timeline_name(struct dma_fence *fence)
388{
389	struct ivpu_fence *ivpu_fence = to_vpu_fence(fence);
390
391	return dev_name(ivpu_fence->vdev->drm.dev);
392}
393
394static const struct dma_fence_ops ivpu_fence_ops = {
395	.get_driver_name = ivpu_fence_get_driver_name,
396	.get_timeline_name = ivpu_fence_get_timeline_name,
397};
398
399static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
400{
401	struct ivpu_fence *fence;
402
403	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
404	if (!fence)
405		return NULL;
406
407	fence->vdev = vdev;
408	spin_lock_init(&fence->lock);
409	dma_fence_init(&fence->base, &ivpu_fence_ops, &fence->lock, dma_fence_context_alloc(1), 1);
410
411	return &fence->base;
412}
413
414static void ivpu_job_destroy(struct ivpu_job *job)
415{
416	struct ivpu_device *vdev = job->vdev;
417	u32 i;
418
419	ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
420		 job->job_id, job->file_priv->ctx.id, job->engine_idx);
421
422	for (i = 0; i < job->bo_count; i++)
423		if (job->bos[i])
424			drm_gem_object_put(&job->bos[i]->base.base);
425
426	dma_fence_put(job->done_fence);
427	ivpu_file_priv_put(&job->file_priv);
428	kfree(job);
429}
430
431static struct ivpu_job *
432ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
433{
434	struct ivpu_device *vdev = file_priv->vdev;
435	struct ivpu_job *job;
436
437	job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
438	if (!job)
439		return NULL;
440
441	job->vdev = vdev;
442	job->engine_idx = engine_idx;
443	job->bo_count = bo_count;
444	job->done_fence = ivpu_fence_create(vdev);
445	if (!job->done_fence) {
446		ivpu_warn_ratelimited(vdev, "Failed to create a fence\n");
447		goto err_free_job;
448	}
449
450	job->file_priv = ivpu_file_priv_get(file_priv);
451
452	trace_job("create", job);
453	ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
454	return job;
455
456err_free_job:
457	kfree(job);
458	return NULL;
459}
460
461static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *vdev, u32 job_id)
462{
463	struct ivpu_job *job;
464
465	xa_lock(&vdev->submitted_jobs_xa);
466	job = __xa_erase(&vdev->submitted_jobs_xa, job_id);
467
468	if (xa_empty(&vdev->submitted_jobs_xa) && job) {
469		vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
470					    vdev->busy_time);
471	}
472
473	xa_unlock(&vdev->submitted_jobs_xa);
474
475	return job;
476}
477
478static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
479{
480	struct ivpu_job *job;
481
482	job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
483	if (!job)
484		return -ENOENT;
485
486	if (job->file_priv->has_mmu_faults)
487		job_status = DRM_IVPU_JOB_STATUS_ABORTED;
488
489	job->bos[CMD_BUF_IDX]->job_status = job_status;
490	dma_fence_signal(job->done_fence);
491
492	trace_job("done", job);
493	ivpu_dbg(vdev, JOB, "Job complete:  id %3u ctx %2d engine %d status 0x%x\n",
494		 job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
495
496	ivpu_job_destroy(job);
497	ivpu_stop_job_timeout_detection(vdev);
498
499	ivpu_rpm_put(vdev);
500	return 0;
501}
502
503void ivpu_jobs_abort_all(struct ivpu_device *vdev)
504{
505	struct ivpu_job *job;
506	unsigned long id;
507
508	xa_for_each(&vdev->submitted_jobs_xa, id, job)
509		ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
510}
511
512static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
513{
514	struct ivpu_file_priv *file_priv = job->file_priv;
515	struct ivpu_device *vdev = job->vdev;
 
516	struct ivpu_cmdq *cmdq;
517	bool is_first_job;
518	int ret;
519
520	ret = ivpu_rpm_get(vdev);
521	if (ret < 0)
522		return ret;
523
524	mutex_lock(&file_priv->lock);
525
526	cmdq = ivpu_cmdq_acquire(file_priv, priority);
527	if (!cmdq) {
528		ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
529				      file_priv->ctx.id, job->engine_idx, priority);
530		ret = -EINVAL;
531		goto err_unlock_file_priv;
532	}
533
 
 
 
534	xa_lock(&vdev->submitted_jobs_xa);
535	is_first_job = xa_empty(&vdev->submitted_jobs_xa);
536	ret = __xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
537				&file_priv->job_id_next, GFP_KERNEL);
538	if (ret < 0) {
539		ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
540			 file_priv->ctx.id);
541		ret = -EBUSY;
542		goto err_unlock_submitted_jobs_xa;
543	}
544
545	ret = ivpu_cmdq_push_job(cmdq, job);
546	if (ret)
547		goto err_erase_xa;
548
549	ivpu_start_job_timeout_detection(vdev);
550
551	if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
552		cmdq->jobq->header.head = cmdq->jobq->header.tail;
553		wmb(); /* Flush WC buffer for jobq header */
554	} else {
555		ivpu_cmdq_ring_db(vdev, cmdq);
556		if (is_first_job)
557			vdev->busy_start_ts = ktime_get();
558	}
559
560	trace_job("submit", job);
561	ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n",
562		 job->job_id, file_priv->ctx.id, job->engine_idx, priority,
563		 job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
564
565	xa_unlock(&vdev->submitted_jobs_xa);
566
567	mutex_unlock(&file_priv->lock);
568
569	if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
570		ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
571
572	return 0;
573
574err_erase_xa:
575	__xa_erase(&vdev->submitted_jobs_xa, job->job_id);
576err_unlock_submitted_jobs_xa:
577	xa_unlock(&vdev->submitted_jobs_xa);
578err_unlock_file_priv:
579	mutex_unlock(&file_priv->lock);
580	ivpu_rpm_put(vdev);
581	return ret;
582}
583
584static int
585ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
586				u32 buf_count, u32 commands_offset)
587{
588	struct ivpu_file_priv *file_priv = file->driver_priv;
589	struct ivpu_device *vdev = file_priv->vdev;
590	struct ww_acquire_ctx acquire_ctx;
591	enum dma_resv_usage usage;
592	struct ivpu_bo *bo;
593	int ret;
594	u32 i;
595
596	for (i = 0; i < buf_count; i++) {
597		struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]);
598
599		if (!obj)
600			return -ENOENT;
601
602		job->bos[i] = to_ivpu_bo(obj);
603
604		ret = ivpu_bo_pin(job->bos[i]);
605		if (ret)
606			return ret;
607	}
608
609	bo = job->bos[CMD_BUF_IDX];
610	if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) {
611		ivpu_warn(vdev, "Buffer is already in use\n");
612		return -EBUSY;
613	}
614
615	if (commands_offset >= ivpu_bo_size(bo)) {
616		ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
617		return -EINVAL;
618	}
619
620	job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
621
622	ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
623					&acquire_ctx);
624	if (ret) {
625		ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
626		return ret;
627	}
628
629	for (i = 0; i < buf_count; i++) {
630		ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1);
631		if (ret) {
632			ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
633			goto unlock_reservations;
634		}
635	}
636
637	for (i = 0; i < buf_count; i++) {
638		usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP;
639		dma_resv_add_fence(job->bos[i]->base.base.resv, job->done_fence, usage);
640	}
641
642unlock_reservations:
643	drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
644
645	wmb(); /* Flush write combining buffers */
646
647	return ret;
648}
649
650static inline u8 ivpu_job_to_hws_priority(struct ivpu_file_priv *file_priv, u8 priority)
651{
652	if (priority == DRM_IVPU_JOB_PRIORITY_DEFAULT)
653		return DRM_IVPU_JOB_PRIORITY_NORMAL;
654
655	return priority - 1;
656}
657
658int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
659{
660	struct ivpu_file_priv *file_priv = file->driver_priv;
661	struct ivpu_device *vdev = file_priv->vdev;
662	struct drm_ivpu_submit *params = data;
663	struct ivpu_job *job;
664	u32 *buf_handles;
665	int idx, ret;
666	u8 priority;
667
668	if (params->engine != DRM_IVPU_ENGINE_COMPUTE)
669		return -EINVAL;
670
671	if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
672		return -EINVAL;
673
674	if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
675		return -EINVAL;
676
677	if (!IS_ALIGNED(params->commands_offset, 8))
678		return -EINVAL;
679
680	if (!file_priv->ctx.id)
681		return -EINVAL;
682
683	if (file_priv->has_mmu_faults)
684		return -EBADFD;
685
686	buf_handles = kcalloc(params->buffer_count, sizeof(u32), GFP_KERNEL);
687	if (!buf_handles)
688		return -ENOMEM;
689
690	ret = copy_from_user(buf_handles,
691			     (void __user *)params->buffers_ptr,
692			     params->buffer_count * sizeof(u32));
693	if (ret) {
694		ret = -EFAULT;
695		goto err_free_handles;
696	}
697
698	if (!drm_dev_enter(&vdev->drm, &idx)) {
699		ret = -ENODEV;
700		goto err_free_handles;
701	}
702
703	ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
704		 file_priv->ctx.id, params->buffer_count);
705
706	job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
707	if (!job) {
708		ivpu_err(vdev, "Failed to create job\n");
709		ret = -ENOMEM;
710		goto err_exit_dev;
711	}
712
713	ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
714					      params->commands_offset);
715	if (ret) {
716		ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
717		goto err_destroy_job;
718	}
719
720	priority = ivpu_job_to_hws_priority(file_priv, params->priority);
721
722	down_read(&vdev->pm->reset_lock);
723	ret = ivpu_job_submit(job, priority);
724	up_read(&vdev->pm->reset_lock);
725	if (ret)
726		goto err_signal_fence;
727
728	drm_dev_exit(idx);
729	kfree(buf_handles);
730	return ret;
731
732err_signal_fence:
733	dma_fence_signal(job->done_fence);
734err_destroy_job:
735	ivpu_job_destroy(job);
736err_exit_dev:
737	drm_dev_exit(idx);
738err_free_handles:
739	kfree(buf_handles);
740	return ret;
741}
742
743static void
744ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
745		       struct vpu_jsm_msg *jsm_msg)
746{
747	struct vpu_ipc_msg_payload_job_done *payload;
748	int ret;
749
750	if (!jsm_msg) {
751		ivpu_err(vdev, "IPC message has no JSM payload\n");
752		return;
753	}
754
755	if (jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
756		ivpu_err(vdev, "Invalid JSM message result: %d\n", jsm_msg->result);
757		return;
758	}
759
760	payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
761	ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
762	if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
763		ivpu_start_job_timeout_detection(vdev);
764}
765
766void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
767{
768	ivpu_ipc_consumer_add(vdev, &vdev->job_done_consumer,
769			      VPU_IPC_CHAN_JOB_RET, ivpu_job_done_callback);
770}
771
772void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
773{
774	ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
775}