Loading...
Note: File does not exist in v5.9.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include "xe_sync.h"
7
8#include <linux/dma-fence-array.h>
9#include <linux/kthread.h>
10#include <linux/sched/mm.h>
11#include <linux/uaccess.h>
12
13#include <drm/drm_print.h>
14#include <drm/drm_syncobj.h>
15#include <uapi/drm/xe_drm.h>
16
17#include "xe_device_types.h"
18#include "xe_exec_queue.h"
19#include "xe_macros.h"
20#include "xe_sched_job_types.h"
21
22struct xe_user_fence {
23 struct xe_device *xe;
24 struct kref refcount;
25 struct dma_fence_cb cb;
26 struct work_struct worker;
27 struct mm_struct *mm;
28 u64 __user *addr;
29 u64 value;
30 int signalled;
31};
32
33static void user_fence_destroy(struct kref *kref)
34{
35 struct xe_user_fence *ufence = container_of(kref, struct xe_user_fence,
36 refcount);
37
38 mmdrop(ufence->mm);
39 kfree(ufence);
40}
41
42static void user_fence_get(struct xe_user_fence *ufence)
43{
44 kref_get(&ufence->refcount);
45}
46
47static void user_fence_put(struct xe_user_fence *ufence)
48{
49 kref_put(&ufence->refcount, user_fence_destroy);
50}
51
52static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
53 u64 value)
54{
55 struct xe_user_fence *ufence;
56 u64 __user *ptr = u64_to_user_ptr(addr);
57 u64 __maybe_unused prefetch_val;
58
59 if (get_user(prefetch_val, ptr))
60 return ERR_PTR(-EFAULT);
61
62 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
63 if (!ufence)
64 return ERR_PTR(-ENOMEM);
65
66 ufence->xe = xe;
67 kref_init(&ufence->refcount);
68 ufence->addr = ptr;
69 ufence->value = value;
70 ufence->mm = current->mm;
71 mmgrab(ufence->mm);
72
73 return ufence;
74}
75
76static void user_fence_worker(struct work_struct *w)
77{
78 struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
79
80 if (mmget_not_zero(ufence->mm)) {
81 kthread_use_mm(ufence->mm);
82 if (copy_to_user(ufence->addr, &ufence->value, sizeof(ufence->value)))
83 XE_WARN_ON("Copy to user failed");
84 kthread_unuse_mm(ufence->mm);
85 mmput(ufence->mm);
86 } else {
87 drm_dbg(&ufence->xe->drm, "mmget_not_zero() failed, ufence wasn't signaled\n");
88 }
89
90 /*
91 * Wake up waiters only after updating the ufence state, allowing the UMD
92 * to safely reuse the same ufence without encountering -EBUSY errors.
93 */
94 WRITE_ONCE(ufence->signalled, 1);
95 wake_up_all(&ufence->xe->ufence_wq);
96 user_fence_put(ufence);
97}
98
99static void kick_ufence(struct xe_user_fence *ufence, struct dma_fence *fence)
100{
101 INIT_WORK(&ufence->worker, user_fence_worker);
102 queue_work(ufence->xe->ordered_wq, &ufence->worker);
103 dma_fence_put(fence);
104}
105
106static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
107{
108 struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb);
109
110 kick_ufence(ufence, fence);
111}
112
113int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
114 struct xe_sync_entry *sync,
115 struct drm_xe_sync __user *sync_user,
116 unsigned int flags)
117{
118 struct drm_xe_sync sync_in;
119 int err;
120 bool exec = flags & SYNC_PARSE_FLAG_EXEC;
121 bool in_lr_mode = flags & SYNC_PARSE_FLAG_LR_MODE;
122 bool disallow_user_fence = flags & SYNC_PARSE_FLAG_DISALLOW_USER_FENCE;
123 bool signal;
124
125 if (copy_from_user(&sync_in, sync_user, sizeof(*sync_user)))
126 return -EFAULT;
127
128 if (XE_IOCTL_DBG(xe, sync_in.flags & ~DRM_XE_SYNC_FLAG_SIGNAL) ||
129 XE_IOCTL_DBG(xe, sync_in.reserved[0] || sync_in.reserved[1]))
130 return -EINVAL;
131
132 signal = sync_in.flags & DRM_XE_SYNC_FLAG_SIGNAL;
133 switch (sync_in.type) {
134 case DRM_XE_SYNC_TYPE_SYNCOBJ:
135 if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
136 return -EOPNOTSUPP;
137
138 if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
139 return -EINVAL;
140
141 sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle);
142 if (XE_IOCTL_DBG(xe, !sync->syncobj))
143 return -ENOENT;
144
145 if (!signal) {
146 sync->fence = drm_syncobj_fence_get(sync->syncobj);
147 if (XE_IOCTL_DBG(xe, !sync->fence))
148 return -EINVAL;
149 }
150 break;
151
152 case DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ:
153 if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
154 return -EOPNOTSUPP;
155
156 if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
157 return -EINVAL;
158
159 if (XE_IOCTL_DBG(xe, sync_in.timeline_value == 0))
160 return -EINVAL;
161
162 sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle);
163 if (XE_IOCTL_DBG(xe, !sync->syncobj))
164 return -ENOENT;
165
166 if (signal) {
167 sync->chain_fence = dma_fence_chain_alloc();
168 if (!sync->chain_fence)
169 return -ENOMEM;
170 } else {
171 sync->fence = drm_syncobj_fence_get(sync->syncobj);
172 if (XE_IOCTL_DBG(xe, !sync->fence))
173 return -EINVAL;
174
175 err = dma_fence_chain_find_seqno(&sync->fence,
176 sync_in.timeline_value);
177 if (err)
178 return err;
179 }
180 break;
181
182 case DRM_XE_SYNC_TYPE_USER_FENCE:
183 if (XE_IOCTL_DBG(xe, disallow_user_fence))
184 return -EOPNOTSUPP;
185
186 if (XE_IOCTL_DBG(xe, !signal))
187 return -EOPNOTSUPP;
188
189 if (XE_IOCTL_DBG(xe, sync_in.addr & 0x7))
190 return -EINVAL;
191
192 if (exec) {
193 sync->addr = sync_in.addr;
194 } else {
195 sync->ufence = user_fence_create(xe, sync_in.addr,
196 sync_in.timeline_value);
197 if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence)))
198 return PTR_ERR(sync->ufence);
199 }
200
201 break;
202
203 default:
204 return -EINVAL;
205 }
206
207 sync->type = sync_in.type;
208 sync->flags = sync_in.flags;
209 sync->timeline_value = sync_in.timeline_value;
210
211 return 0;
212}
213
214int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
215{
216 if (sync->fence)
217 return drm_sched_job_add_dependency(&job->drm,
218 dma_fence_get(sync->fence));
219
220 return 0;
221}
222
223void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence)
224{
225 if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
226 return;
227
228 if (sync->chain_fence) {
229 drm_syncobj_add_point(sync->syncobj, sync->chain_fence,
230 fence, sync->timeline_value);
231 /*
232 * The chain's ownership is transferred to the
233 * timeline.
234 */
235 sync->chain_fence = NULL;
236 } else if (sync->syncobj) {
237 drm_syncobj_replace_fence(sync->syncobj, fence);
238 } else if (sync->ufence) {
239 int err;
240
241 dma_fence_get(fence);
242 user_fence_get(sync->ufence);
243 err = dma_fence_add_callback(fence, &sync->ufence->cb,
244 user_fence_cb);
245 if (err == -ENOENT) {
246 kick_ufence(sync->ufence, fence);
247 } else if (err) {
248 XE_WARN_ON("failed to add user fence");
249 user_fence_put(sync->ufence);
250 dma_fence_put(fence);
251 }
252 }
253}
254
255void xe_sync_entry_cleanup(struct xe_sync_entry *sync)
256{
257 if (sync->syncobj)
258 drm_syncobj_put(sync->syncobj);
259 dma_fence_put(sync->fence);
260 dma_fence_chain_free(sync->chain_fence);
261 if (sync->ufence)
262 user_fence_put(sync->ufence);
263}
264
265/**
266 * xe_sync_in_fence_get() - Get a fence from syncs, exec queue, and VM
267 * @sync: input syncs
268 * @num_sync: number of syncs
269 * @q: exec queue
270 * @vm: VM
271 *
272 * Get a fence from syncs, exec queue, and VM. If syncs contain in-fences create
273 * and return a composite fence of all in-fences + last fence. If no in-fences
274 * return last fence on input exec queue. Caller must drop reference to
275 * returned fence.
276 *
277 * Return: fence on success, ERR_PTR(-ENOMEM) on failure
278 */
279struct dma_fence *
280xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
281 struct xe_exec_queue *q, struct xe_vm *vm)
282{
283 struct dma_fence **fences = NULL;
284 struct dma_fence_array *cf = NULL;
285 struct dma_fence *fence;
286 int i, num_in_fence = 0, current_fence = 0;
287
288 lockdep_assert_held(&vm->lock);
289
290 /* Count in-fences */
291 for (i = 0; i < num_sync; ++i) {
292 if (sync[i].fence) {
293 ++num_in_fence;
294 fence = sync[i].fence;
295 }
296 }
297
298 /* Easy case... */
299 if (!num_in_fence) {
300 fence = xe_exec_queue_last_fence_get(q, vm);
301 return fence;
302 }
303
304 /* Create composite fence */
305 fences = kmalloc_array(num_in_fence + 1, sizeof(*fences), GFP_KERNEL);
306 if (!fences)
307 return ERR_PTR(-ENOMEM);
308 for (i = 0; i < num_sync; ++i) {
309 if (sync[i].fence) {
310 dma_fence_get(sync[i].fence);
311 fences[current_fence++] = sync[i].fence;
312 }
313 }
314 fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm);
315 cf = dma_fence_array_create(num_in_fence, fences,
316 vm->composite_fence_ctx,
317 vm->composite_fence_seqno++,
318 false);
319 if (!cf) {
320 --vm->composite_fence_seqno;
321 goto err_out;
322 }
323
324 return &cf->base;
325
326err_out:
327 while (current_fence)
328 dma_fence_put(fences[--current_fence]);
329 kfree(fences);
330 kfree(cf);
331
332 return ERR_PTR(-ENOMEM);
333}
334
335/**
336 * __xe_sync_ufence_get() - Get user fence from user fence
337 * @ufence: input user fence
338 *
339 * Get a user fence reference from user fence
340 *
341 * Return: xe_user_fence pointer with reference
342 */
343struct xe_user_fence *__xe_sync_ufence_get(struct xe_user_fence *ufence)
344{
345 user_fence_get(ufence);
346
347 return ufence;
348}
349
350/**
351 * xe_sync_ufence_get() - Get user fence from sync
352 * @sync: input sync
353 *
354 * Get a user fence reference from sync.
355 *
356 * Return: xe_user_fence pointer with reference
357 */
358struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync)
359{
360 user_fence_get(sync->ufence);
361
362 return sync->ufence;
363}
364
365/**
366 * xe_sync_ufence_put() - Put user fence reference
367 * @ufence: user fence reference
368 *
369 */
370void xe_sync_ufence_put(struct xe_user_fence *ufence)
371{
372 user_fence_put(ufence);
373}
374
375/**
376 * xe_sync_ufence_get_status() - Get user fence status
377 * @ufence: user fence
378 *
379 * Return: 1 if signalled, 0 not signalled, <0 on error
380 */
381int xe_sync_ufence_get_status(struct xe_user_fence *ufence)
382{
383 return READ_ONCE(ufence->signalled);
384}