Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2022 Intel Corporation
  4 */
  5
  6#include "xe_exec.h"
  7
  8#include <drm/drm_device.h>
  9#include <drm/drm_exec.h>
 10#include <drm/drm_file.h>
 11#include <drm/xe_drm.h>
 12#include <linux/delay.h>
 13
 14#include "xe_bo.h"
 15#include "xe_device.h"
 16#include "xe_exec_queue.h"
 17#include "xe_macros.h"
 18#include "xe_ring_ops_types.h"
 19#include "xe_sched_job.h"
 20#include "xe_sync.h"
 21#include "xe_vm.h"
 22
 23/**
 24 * DOC: Execbuf (User GPU command submission)
 25 *
 26 * Execs have historically been rather complicated in DRM drivers (at least in
 27 * the i915) because a few things:
 28 *
 29 * - Passing in a list BO which are read / written to creating implicit syncs
 30 * - Binding at exec time
 31 * - Flow controlling the ring at exec time
 32 *
 33 * In XE we avoid all of this complication by not allowing a BO list to be
 34 * passed into an exec, using the dma-buf implicit sync uAPI, have binds as
 35 * seperate operations, and using the DRM scheduler to flow control the ring.
 36 * Let's deep dive on each of these.
 37 *
 38 * We can get away from a BO list by forcing the user to use in / out fences on
 39 * every exec rather than the kernel tracking dependencies of BO (e.g. if the
 40 * user knows an exec writes to a BO and reads from the BO in the next exec, it
 41 * is the user's responsibility to pass in / out fence between the two execs).
 42 *
 43 * Implicit dependencies for external BOs are handled by using the dma-buf
 44 * implicit dependency uAPI (TODO: add link). To make this works each exec must
 45 * install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external
 46 * BO mapped in the VM.
 47 *
 48 * We do not allow a user to trigger a bind at exec time rather we have a VM
 49 * bind IOCTL which uses the same in / out fence interface as exec. In that
 50 * sense, a VM bind is basically the same operation as an exec from the user
 51 * perspective. e.g. If an exec depends on a VM bind use the in / out fence
 52 * interface (struct drm_xe_sync) to synchronize like syncing between two
 53 * dependent execs.
 54 *
 55 * Although a user cannot trigger a bind, we still have to rebind userptrs in
 56 * the VM that have been invalidated since the last exec, likewise we also have
 57 * to rebind BOs that have been evicted by the kernel. We schedule these rebinds
 58 * behind any pending kernel operations on any external BOs in VM or any BOs
 59 * private to the VM. This is accomplished by the rebinds waiting on BOs
 60 * DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs
 61 * slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and
 62 * in DMA_RESV_USAGE_WRITE for external BOs).
 63 *
 64 * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
 65 * mode VMs we use preempt fences and a rebind worker (TODO: add link).
 66 *
 67 * There is no need to flow control the ring in the exec as we write the ring at
 68 * submission time and set the DRM scheduler max job limit SIZE_OF_RING /
 69 * MAX_JOB_SIZE. The DRM scheduler will then hold all jobs until space in the
 70 * ring is available.
 71 *
 72 * All of this results in a rather simple exec implementation.
 73 *
 74 * Flow
 75 * ~~~~
 76 *
 77 * .. code-block::
 78 *
 79 *	Parse input arguments
 80 *	Wait for any async VM bind passed as in-fences to start
 81 *	<----------------------------------------------------------------------|
 82 *	Lock global VM lock in read mode                                       |
 83 *	Pin userptrs (also finds userptr invalidated since last exec)          |
 84 *	Lock exec (VM dma-resv lock, external BOs dma-resv locks)              |
 85 *	Validate BOs that have been evicted                                    |
 86 *	Create job                                                             |
 87 *	Rebind invalidated userptrs + evicted BOs (non-compute-mode)           |
 88 *	Add rebind fence dependency to job                                     |
 89 *	Add job VM dma-resv bookkeeping slot (non-compute mode)                |
 90 *	Add job to external BOs dma-resv write slots (non-compute mode)        |
 91 *	Check if any userptrs invalidated since pin ------ Drop locks ---------|
 92 *	Install in / out fences for job
 93 *	Submit job
 94 *	Unlock all
 95 */
 96
 97static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
 98{
 99	return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
100}
101
102int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
103{
104	struct xe_device *xe = to_xe_device(dev);
105	struct xe_file *xef = to_xe_file(file);
106	struct drm_xe_exec *args = data;
107	struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs);
108	u64 __user *addresses_user = u64_to_user_ptr(args->address);
109	struct xe_exec_queue *q;
110	struct xe_sync_entry *syncs = NULL;
111	u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
112	struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
113	struct drm_exec *exec = &vm_exec.exec;
114	u32 i, num_syncs = 0, num_ufence = 0;
115	struct xe_sched_job *job;
116	struct dma_fence *rebind_fence;
117	struct xe_vm *vm;
118	bool write_locked, skip_retry = false;
119	ktime_t end = 0;
120	int err = 0;
121
122	if (XE_IOCTL_DBG(xe, args->extensions) ||
123	    XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
124	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
125		return -EINVAL;
126
127	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
128	if (XE_IOCTL_DBG(xe, !q))
129		return -ENOENT;
130
131	if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
132		return -EINVAL;
133
134	if (XE_IOCTL_DBG(xe, args->num_batch_buffer &&
135			 q->width != args->num_batch_buffer))
136		return -EINVAL;
137
138	if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_BANNED)) {
139		err = -ECANCELED;
140		goto err_exec_queue;
141	}
142
143	if (args->num_syncs) {
144		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
145		if (!syncs) {
146			err = -ENOMEM;
147			goto err_exec_queue;
148		}
149	}
150
151	vm = q->vm;
152
153	for (i = 0; i < args->num_syncs; i++) {
154		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
155					  &syncs_user[i], SYNC_PARSE_FLAG_EXEC |
156					  (xe_vm_in_lr_mode(vm) ?
157					   SYNC_PARSE_FLAG_LR_MODE : 0));
158		if (err)
159			goto err_syncs;
160
161		if (xe_sync_is_ufence(&syncs[i]))
162			num_ufence++;
163	}
164
165	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
166		err = -EINVAL;
167		goto err_syncs;
168	}
169
170	if (xe_exec_queue_is_parallel(q)) {
171		err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
172				       q->width);
173		if (err) {
174			err = -EFAULT;
175			goto err_syncs;
176		}
177	}
178
179retry:
180	if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) {
181		err = down_write_killable(&vm->lock);
182		write_locked = true;
183	} else {
184		/* We don't allow execs while the VM is in error state */
185		err = down_read_interruptible(&vm->lock);
186		write_locked = false;
187	}
188	if (err)
189		goto err_syncs;
190
191	if (write_locked) {
192		err = xe_vm_userptr_pin(vm);
193		downgrade_write(&vm->lock);
194		write_locked = false;
195		if (err)
196			goto err_unlock_list;
197	}
198
199	vm_exec.vm = &vm->gpuvm;
200	vm_exec.num_fences = 1 + vm->xe->info.tile_count;
201	vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
202	if (xe_vm_in_lr_mode(vm)) {
203		drm_exec_init(exec, vm_exec.flags, 0);
204	} else {
205		err = drm_gpuvm_exec_lock(&vm_exec);
206		if (err) {
207			if (xe_vm_validate_should_retry(exec, err, &end))
208				err = -EAGAIN;
209			goto err_unlock_list;
210		}
211	}
212
213	if (xe_vm_is_closed_or_banned(q->vm)) {
214		drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n");
215		err = -ECANCELED;
216		goto err_exec;
217	}
218
219	if (!args->num_batch_buffer) {
220		if (!xe_vm_in_lr_mode(vm)) {
221			struct dma_fence *fence;
222
223			fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
224			if (IS_ERR(fence)) {
225				err = PTR_ERR(fence);
226				goto err_exec;
227			}
228			for (i = 0; i < num_syncs; i++)
229				xe_sync_entry_signal(&syncs[i], NULL, fence);
230			xe_exec_queue_last_fence_set(q, vm, fence);
231			dma_fence_put(fence);
232		}
233
234		goto err_exec;
235	}
236
237	if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
238		err = -EWOULDBLOCK;	/* Aliased to -EAGAIN */
239		skip_retry = true;
240		goto err_exec;
241	}
242
243	job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
244				  addresses : &args->address);
245	if (IS_ERR(job)) {
246		err = PTR_ERR(job);
247		goto err_exec;
248	}
249
250	/*
251	 * Rebind any invalidated userptr or evicted BOs in the VM, non-compute
252	 * VM mode only.
253	 */
254	rebind_fence = xe_vm_rebind(vm, false);
255	if (IS_ERR(rebind_fence)) {
256		err = PTR_ERR(rebind_fence);
257		goto err_put_job;
258	}
259
260	/*
261	 * We store the rebind_fence in the VM so subsequent execs don't get
262	 * scheduled before the rebinds of userptrs / evicted BOs is complete.
263	 */
264	if (rebind_fence) {
265		dma_fence_put(vm->rebind_fence);
266		vm->rebind_fence = rebind_fence;
267	}
268	if (vm->rebind_fence) {
269		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
270			     &vm->rebind_fence->flags)) {
271			dma_fence_put(vm->rebind_fence);
272			vm->rebind_fence = NULL;
273		} else {
274			dma_fence_get(vm->rebind_fence);
275			err = drm_sched_job_add_dependency(&job->drm,
276							   vm->rebind_fence);
277			if (err)
278				goto err_put_job;
279		}
280	}
281
282	/* Wait behind munmap style rebinds */
283	if (!xe_vm_in_lr_mode(vm)) {
284		err = drm_sched_job_add_resv_dependencies(&job->drm,
285							  xe_vm_resv(vm),
286							  DMA_RESV_USAGE_KERNEL);
287		if (err)
288			goto err_put_job;
289	}
290
291	for (i = 0; i < num_syncs && !err; i++)
292		err = xe_sync_entry_add_deps(&syncs[i], job);
293	if (err)
294		goto err_put_job;
295
296	if (!xe_vm_in_lr_mode(vm)) {
297		err = xe_sched_job_last_fence_add_dep(job, vm);
298		if (err)
299			goto err_put_job;
300
301		err = down_read_interruptible(&vm->userptr.notifier_lock);
302		if (err)
303			goto err_put_job;
304
305		err = __xe_vm_userptr_needs_repin(vm);
306		if (err)
307			goto err_repin;
308	}
309
310	/*
311	 * Point of no return, if we error after this point just set an error on
312	 * the job and let the DRM scheduler / backend clean up the job.
313	 */
314	xe_sched_job_arm(job);
315	if (!xe_vm_in_lr_mode(vm))
316		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
317					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
318
319	for (i = 0; i < num_syncs; i++)
320		xe_sync_entry_signal(&syncs[i], job,
321				     &job->drm.s_fence->finished);
322
323	if (xe_exec_queue_is_lr(q))
324		q->ring_ops->emit_job(job);
325	if (!xe_vm_in_lr_mode(vm))
326		xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished);
327	xe_sched_job_push(job);
328	xe_vm_reactivate_rebind(vm);
329
330	if (!err && !xe_vm_in_lr_mode(vm)) {
331		spin_lock(&xe->ttm.lru_lock);
332		ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
333		spin_unlock(&xe->ttm.lru_lock);
334	}
335
336err_repin:
337	if (!xe_vm_in_lr_mode(vm))
338		up_read(&vm->userptr.notifier_lock);
339err_put_job:
340	if (err)
341		xe_sched_job_put(job);
342err_exec:
343	drm_exec_fini(exec);
344err_unlock_list:
345	if (write_locked)
346		up_write(&vm->lock);
347	else
348		up_read(&vm->lock);
349	if (err == -EAGAIN && !skip_retry)
350		goto retry;
351err_syncs:
352	for (i = 0; i < num_syncs; i++)
353		xe_sync_entry_cleanup(&syncs[i]);
354	kfree(syncs);
355err_exec_queue:
356	xe_exec_queue_put(q);
357
358	return err;
359}