Loading...
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28
29#include <linux/ktime.h>
30#include <linux/hrtimer.h>
31#include <trace/events/fence.h>
32
33#include <nvif/cl826e.h>
34#include <nvif/notify.h>
35#include <nvif/event.h>
36
37#include "nouveau_drm.h"
38#include "nouveau_dma.h"
39#include "nouveau_fence.h"
40
41static const struct fence_ops nouveau_fence_ops_uevent;
42static const struct fence_ops nouveau_fence_ops_legacy;
43
44static inline struct nouveau_fence *
45from_fence(struct fence *fence)
46{
47 return container_of(fence, struct nouveau_fence, base);
48}
49
50static inline struct nouveau_fence_chan *
51nouveau_fctx(struct nouveau_fence *fence)
52{
53 return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
54}
55
56static int
57nouveau_fence_signal(struct nouveau_fence *fence)
58{
59 int drop = 0;
60
61 fence_signal_locked(&fence->base);
62 list_del(&fence->head);
63 rcu_assign_pointer(fence->channel, NULL);
64
65 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
66 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
67
68 if (!--fctx->notify_ref)
69 drop = 1;
70 }
71
72 fence_put(&fence->base);
73 return drop;
74}
75
76static struct nouveau_fence *
77nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
78 struct nouveau_fence_priv *priv = (void*)drm->fence;
79
80 if (fence->ops != &nouveau_fence_ops_legacy &&
81 fence->ops != &nouveau_fence_ops_uevent)
82 return NULL;
83
84 if (fence->context < priv->context_base ||
85 fence->context >= priv->context_base + priv->contexts)
86 return NULL;
87
88 return from_fence(fence);
89}
90
91void
92nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
93{
94 struct nouveau_fence *fence;
95
96 spin_lock_irq(&fctx->lock);
97 while (!list_empty(&fctx->pending)) {
98 fence = list_entry(fctx->pending.next, typeof(*fence), head);
99
100 if (nouveau_fence_signal(fence))
101 nvif_notify_put(&fctx->notify);
102 }
103 spin_unlock_irq(&fctx->lock);
104
105 nvif_notify_fini(&fctx->notify);
106 fctx->dead = 1;
107
108 /*
109 * Ensure that all accesses to fence->channel complete before freeing
110 * the channel.
111 */
112 synchronize_rcu();
113}
114
115static void
116nouveau_fence_context_put(struct kref *fence_ref)
117{
118 kfree(container_of(fence_ref, struct nouveau_fence_chan, fence_ref));
119}
120
121void
122nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
123{
124 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
125}
126
127static int
128nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
129{
130 struct nouveau_fence *fence;
131 int drop = 0;
132 u32 seq = fctx->read(chan);
133
134 while (!list_empty(&fctx->pending)) {
135 fence = list_entry(fctx->pending.next, typeof(*fence), head);
136
137 if ((int)(seq - fence->base.seqno) < 0)
138 break;
139
140 drop |= nouveau_fence_signal(fence);
141 }
142
143 return drop;
144}
145
146static int
147nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
148{
149 struct nouveau_fence_chan *fctx =
150 container_of(notify, typeof(*fctx), notify);
151 unsigned long flags;
152 int ret = NVIF_NOTIFY_KEEP;
153
154 spin_lock_irqsave(&fctx->lock, flags);
155 if (!list_empty(&fctx->pending)) {
156 struct nouveau_fence *fence;
157 struct nouveau_channel *chan;
158
159 fence = list_entry(fctx->pending.next, typeof(*fence), head);
160 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
161 if (nouveau_fence_update(fence->channel, fctx))
162 ret = NVIF_NOTIFY_DROP;
163 }
164 spin_unlock_irqrestore(&fctx->lock, flags);
165
166 return ret;
167}
168
169void
170nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
171{
172 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
173 struct nouveau_cli *cli = (void *)chan->user.client;
174 int ret;
175
176 INIT_LIST_HEAD(&fctx->flip);
177 INIT_LIST_HEAD(&fctx->pending);
178 spin_lock_init(&fctx->lock);
179 fctx->context = priv->context_base + chan->chid;
180
181 if (chan == chan->drm->cechan)
182 strcpy(fctx->name, "copy engine channel");
183 else if (chan == chan->drm->channel)
184 strcpy(fctx->name, "generic kernel channel");
185 else
186 strcpy(fctx->name, nvxx_client(&cli->base)->name);
187
188 kref_init(&fctx->fence_ref);
189 if (!priv->uevent)
190 return;
191
192 ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
193 false, G82_CHANNEL_DMA_V0_NTFY_UEVENT,
194 &(struct nvif_notify_uevent_req) { },
195 sizeof(struct nvif_notify_uevent_req),
196 sizeof(struct nvif_notify_uevent_rep),
197 &fctx->notify);
198
199 WARN_ON(ret);
200}
201
202struct nouveau_fence_work {
203 struct work_struct work;
204 struct fence_cb cb;
205 void (*func)(void *);
206 void *data;
207};
208
209static void
210nouveau_fence_work_handler(struct work_struct *kwork)
211{
212 struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work);
213 work->func(work->data);
214 kfree(work);
215}
216
217static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
218{
219 struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
220
221 schedule_work(&work->work);
222}
223
224void
225nouveau_fence_work(struct fence *fence,
226 void (*func)(void *), void *data)
227{
228 struct nouveau_fence_work *work;
229
230 if (fence_is_signaled(fence))
231 goto err;
232
233 work = kmalloc(sizeof(*work), GFP_KERNEL);
234 if (!work) {
235 /*
236 * this might not be a nouveau fence any more,
237 * so force a lazy wait here
238 */
239 WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
240 true, false));
241 goto err;
242 }
243
244 INIT_WORK(&work->work, nouveau_fence_work_handler);
245 work->func = func;
246 work->data = data;
247
248 if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
249 goto err_free;
250 return;
251
252err_free:
253 kfree(work);
254err:
255 func(data);
256}
257
258int
259nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
260{
261 struct nouveau_fence_chan *fctx = chan->fence;
262 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
263 int ret;
264
265 fence->channel = chan;
266 fence->timeout = jiffies + (15 * HZ);
267
268 if (priv->uevent)
269 fence_init(&fence->base, &nouveau_fence_ops_uevent,
270 &fctx->lock, fctx->context, ++fctx->sequence);
271 else
272 fence_init(&fence->base, &nouveau_fence_ops_legacy,
273 &fctx->lock, fctx->context, ++fctx->sequence);
274 kref_get(&fctx->fence_ref);
275
276 trace_fence_emit(&fence->base);
277 ret = fctx->emit(fence);
278 if (!ret) {
279 fence_get(&fence->base);
280 spin_lock_irq(&fctx->lock);
281
282 if (nouveau_fence_update(chan, fctx))
283 nvif_notify_put(&fctx->notify);
284
285 list_add_tail(&fence->head, &fctx->pending);
286 spin_unlock_irq(&fctx->lock);
287 }
288
289 return ret;
290}
291
292bool
293nouveau_fence_done(struct nouveau_fence *fence)
294{
295 if (fence->base.ops == &nouveau_fence_ops_legacy ||
296 fence->base.ops == &nouveau_fence_ops_uevent) {
297 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
298 struct nouveau_channel *chan;
299 unsigned long flags;
300
301 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
302 return true;
303
304 spin_lock_irqsave(&fctx->lock, flags);
305 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
306 if (chan && nouveau_fence_update(chan, fctx))
307 nvif_notify_put(&fctx->notify);
308 spin_unlock_irqrestore(&fctx->lock, flags);
309 }
310 return fence_is_signaled(&fence->base);
311}
312
313static long
314nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
315{
316 struct nouveau_fence *fence = from_fence(f);
317 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
318 unsigned long t = jiffies, timeout = t + wait;
319
320 while (!nouveau_fence_done(fence)) {
321 ktime_t kt;
322
323 t = jiffies;
324
325 if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
326 __set_current_state(TASK_RUNNING);
327 return 0;
328 }
329
330 __set_current_state(intr ? TASK_INTERRUPTIBLE :
331 TASK_UNINTERRUPTIBLE);
332
333 kt = ktime_set(0, sleep_time);
334 schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
335 sleep_time *= 2;
336 if (sleep_time > NSEC_PER_MSEC)
337 sleep_time = NSEC_PER_MSEC;
338
339 if (intr && signal_pending(current))
340 return -ERESTARTSYS;
341 }
342
343 __set_current_state(TASK_RUNNING);
344
345 return timeout - t;
346}
347
348static int
349nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
350{
351 int ret = 0;
352
353 while (!nouveau_fence_done(fence)) {
354 if (time_after_eq(jiffies, fence->timeout)) {
355 ret = -EBUSY;
356 break;
357 }
358
359 __set_current_state(intr ?
360 TASK_INTERRUPTIBLE :
361 TASK_UNINTERRUPTIBLE);
362
363 if (intr && signal_pending(current)) {
364 ret = -ERESTARTSYS;
365 break;
366 }
367 }
368
369 __set_current_state(TASK_RUNNING);
370 return ret;
371}
372
373int
374nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
375{
376 long ret;
377
378 if (!lazy)
379 return nouveau_fence_wait_busy(fence, intr);
380
381 ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
382 if (ret < 0)
383 return ret;
384 else if (!ret)
385 return -EBUSY;
386 else
387 return 0;
388}
389
390int
391nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
392{
393 struct nouveau_fence_chan *fctx = chan->fence;
394 struct fence *fence;
395 struct reservation_object *resv = nvbo->bo.resv;
396 struct reservation_object_list *fobj;
397 struct nouveau_fence *f;
398 int ret = 0, i;
399
400 if (!exclusive) {
401 ret = reservation_object_reserve_shared(resv);
402
403 if (ret)
404 return ret;
405 }
406
407 fobj = reservation_object_get_list(resv);
408 fence = reservation_object_get_excl(resv);
409
410 if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
411 struct nouveau_channel *prev = NULL;
412 bool must_wait = true;
413
414 f = nouveau_local_fence(fence, chan->drm);
415 if (f) {
416 rcu_read_lock();
417 prev = rcu_dereference(f->channel);
418 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
419 must_wait = false;
420 rcu_read_unlock();
421 }
422
423 if (must_wait)
424 ret = fence_wait(fence, intr);
425
426 return ret;
427 }
428
429 if (!exclusive || !fobj)
430 return ret;
431
432 for (i = 0; i < fobj->shared_count && !ret; ++i) {
433 struct nouveau_channel *prev = NULL;
434 bool must_wait = true;
435
436 fence = rcu_dereference_protected(fobj->shared[i],
437 reservation_object_held(resv));
438
439 f = nouveau_local_fence(fence, chan->drm);
440 if (f) {
441 rcu_read_lock();
442 prev = rcu_dereference(f->channel);
443 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
444 must_wait = false;
445 rcu_read_unlock();
446 }
447
448 if (must_wait)
449 ret = fence_wait(fence, intr);
450 }
451
452 return ret;
453}
454
455void
456nouveau_fence_unref(struct nouveau_fence **pfence)
457{
458 if (*pfence)
459 fence_put(&(*pfence)->base);
460 *pfence = NULL;
461}
462
463int
464nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
465 struct nouveau_fence **pfence)
466{
467 struct nouveau_fence *fence;
468 int ret = 0;
469
470 if (unlikely(!chan->fence))
471 return -ENODEV;
472
473 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
474 if (!fence)
475 return -ENOMEM;
476
477 fence->sysmem = sysmem;
478
479 ret = nouveau_fence_emit(fence, chan);
480 if (ret)
481 nouveau_fence_unref(&fence);
482
483 *pfence = fence;
484 return ret;
485}
486
487static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
488{
489 return "nouveau";
490}
491
492static const char *nouveau_fence_get_timeline_name(struct fence *f)
493{
494 struct nouveau_fence *fence = from_fence(f);
495 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
496
497 return !fctx->dead ? fctx->name : "dead channel";
498}
499
500/*
501 * In an ideal world, read would not assume the channel context is still alive.
502 * This function may be called from another device, running into free memory as a
503 * result. The drm node should still be there, so we can derive the index from
504 * the fence context.
505 */
506static bool nouveau_fence_is_signaled(struct fence *f)
507{
508 struct nouveau_fence *fence = from_fence(f);
509 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
510 struct nouveau_channel *chan;
511 bool ret = false;
512
513 rcu_read_lock();
514 chan = rcu_dereference(fence->channel);
515 if (chan)
516 ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
517 rcu_read_unlock();
518
519 return ret;
520}
521
522static bool nouveau_fence_no_signaling(struct fence *f)
523{
524 struct nouveau_fence *fence = from_fence(f);
525
526 /*
527 * caller should have a reference on the fence,
528 * else fence could get freed here
529 */
530 WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
531
532 /*
533 * This needs uevents to work correctly, but fence_add_callback relies on
534 * being able to enable signaling. It will still get signaled eventually,
535 * just not right away.
536 */
537 if (nouveau_fence_is_signaled(f)) {
538 list_del(&fence->head);
539
540 fence_put(&fence->base);
541 return false;
542 }
543
544 return true;
545}
546
547static void nouveau_fence_release(struct fence *f)
548{
549 struct nouveau_fence *fence = from_fence(f);
550 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
551
552 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
553 fence_free(&fence->base);
554}
555
556static const struct fence_ops nouveau_fence_ops_legacy = {
557 .get_driver_name = nouveau_fence_get_get_driver_name,
558 .get_timeline_name = nouveau_fence_get_timeline_name,
559 .enable_signaling = nouveau_fence_no_signaling,
560 .signaled = nouveau_fence_is_signaled,
561 .wait = nouveau_fence_wait_legacy,
562 .release = nouveau_fence_release
563};
564
565static bool nouveau_fence_enable_signaling(struct fence *f)
566{
567 struct nouveau_fence *fence = from_fence(f);
568 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
569 bool ret;
570
571 if (!fctx->notify_ref++)
572 nvif_notify_get(&fctx->notify);
573
574 ret = nouveau_fence_no_signaling(f);
575 if (ret)
576 set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
577 else if (!--fctx->notify_ref)
578 nvif_notify_put(&fctx->notify);
579
580 return ret;
581}
582
583static const struct fence_ops nouveau_fence_ops_uevent = {
584 .get_driver_name = nouveau_fence_get_get_driver_name,
585 .get_timeline_name = nouveau_fence_get_timeline_name,
586 .enable_signaling = nouveau_fence_enable_signaling,
587 .signaled = nouveau_fence_is_signaled,
588 .wait = fence_default_wait,
589 .release = NULL
590};
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <linux/ktime.h>
28#include <linux/hrtimer.h>
29#include <linux/sched/signal.h>
30#include <trace/events/dma_fence.h>
31
32#include <nvif/cl826e.h>
33#include <nvif/notify.h>
34#include <nvif/event.h>
35
36#include "nouveau_drv.h"
37#include "nouveau_dma.h"
38#include "nouveau_fence.h"
39
40static const struct dma_fence_ops nouveau_fence_ops_uevent;
41static const struct dma_fence_ops nouveau_fence_ops_legacy;
42
43static inline struct nouveau_fence *
44from_fence(struct dma_fence *fence)
45{
46 return container_of(fence, struct nouveau_fence, base);
47}
48
49static inline struct nouveau_fence_chan *
50nouveau_fctx(struct nouveau_fence *fence)
51{
52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
53}
54
55static int
56nouveau_fence_signal(struct nouveau_fence *fence)
57{
58 int drop = 0;
59
60 dma_fence_signal_locked(&fence->base);
61 list_del(&fence->head);
62 rcu_assign_pointer(fence->channel, NULL);
63
64 if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
65 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
66
67 if (!--fctx->notify_ref)
68 drop = 1;
69 }
70
71 dma_fence_put(&fence->base);
72 return drop;
73}
74
75static struct nouveau_fence *
76nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
77{
78 if (fence->ops != &nouveau_fence_ops_legacy &&
79 fence->ops != &nouveau_fence_ops_uevent)
80 return NULL;
81
82 if (fence->context < drm->chan.context_base ||
83 fence->context >= drm->chan.context_base + drm->chan.nr)
84 return NULL;
85
86 return from_fence(fence);
87}
88
89void
90nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
91{
92 struct nouveau_fence *fence;
93
94 spin_lock_irq(&fctx->lock);
95 while (!list_empty(&fctx->pending)) {
96 fence = list_entry(fctx->pending.next, typeof(*fence), head);
97
98 if (nouveau_fence_signal(fence))
99 nvif_notify_put(&fctx->notify);
100 }
101 spin_unlock_irq(&fctx->lock);
102
103 nvif_notify_fini(&fctx->notify);
104 fctx->dead = 1;
105
106 /*
107 * Ensure that all accesses to fence->channel complete before freeing
108 * the channel.
109 */
110 synchronize_rcu();
111}
112
113static void
114nouveau_fence_context_put(struct kref *fence_ref)
115{
116 kfree(container_of(fence_ref, struct nouveau_fence_chan, fence_ref));
117}
118
119void
120nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
121{
122 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
123}
124
125static int
126nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
127{
128 struct nouveau_fence *fence;
129 int drop = 0;
130 u32 seq = fctx->read(chan);
131
132 while (!list_empty(&fctx->pending)) {
133 fence = list_entry(fctx->pending.next, typeof(*fence), head);
134
135 if ((int)(seq - fence->base.seqno) < 0)
136 break;
137
138 drop |= nouveau_fence_signal(fence);
139 }
140
141 return drop;
142}
143
144static int
145nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
146{
147 struct nouveau_fence_chan *fctx =
148 container_of(notify, typeof(*fctx), notify);
149 unsigned long flags;
150 int ret = NVIF_NOTIFY_KEEP;
151
152 spin_lock_irqsave(&fctx->lock, flags);
153 if (!list_empty(&fctx->pending)) {
154 struct nouveau_fence *fence;
155 struct nouveau_channel *chan;
156
157 fence = list_entry(fctx->pending.next, typeof(*fence), head);
158 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
159 if (nouveau_fence_update(fence->channel, fctx))
160 ret = NVIF_NOTIFY_DROP;
161 }
162 spin_unlock_irqrestore(&fctx->lock, flags);
163
164 return ret;
165}
166
167void
168nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
169{
170 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
171 struct nouveau_cli *cli = (void *)chan->user.client;
172 int ret;
173
174 INIT_LIST_HEAD(&fctx->flip);
175 INIT_LIST_HEAD(&fctx->pending);
176 spin_lock_init(&fctx->lock);
177 fctx->context = chan->drm->chan.context_base + chan->chid;
178
179 if (chan == chan->drm->cechan)
180 strcpy(fctx->name, "copy engine channel");
181 else if (chan == chan->drm->channel)
182 strcpy(fctx->name, "generic kernel channel");
183 else
184 strcpy(fctx->name, nvxx_client(&cli->base)->name);
185
186 kref_init(&fctx->fence_ref);
187 if (!priv->uevent)
188 return;
189
190 ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
191 false, NV826E_V0_NTFY_NON_STALL_INTERRUPT,
192 &(struct nvif_notify_uevent_req) { },
193 sizeof(struct nvif_notify_uevent_req),
194 sizeof(struct nvif_notify_uevent_rep),
195 &fctx->notify);
196
197 WARN_ON(ret);
198}
199
200int
201nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
202{
203 struct nouveau_fence_chan *fctx = chan->fence;
204 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
205 int ret;
206
207 fence->channel = chan;
208 fence->timeout = jiffies + (15 * HZ);
209
210 if (priv->uevent)
211 dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
212 &fctx->lock, fctx->context, ++fctx->sequence);
213 else
214 dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
215 &fctx->lock, fctx->context, ++fctx->sequence);
216 kref_get(&fctx->fence_ref);
217
218 trace_dma_fence_emit(&fence->base);
219 ret = fctx->emit(fence);
220 if (!ret) {
221 dma_fence_get(&fence->base);
222 spin_lock_irq(&fctx->lock);
223
224 if (nouveau_fence_update(chan, fctx))
225 nvif_notify_put(&fctx->notify);
226
227 list_add_tail(&fence->head, &fctx->pending);
228 spin_unlock_irq(&fctx->lock);
229 }
230
231 return ret;
232}
233
234bool
235nouveau_fence_done(struct nouveau_fence *fence)
236{
237 if (fence->base.ops == &nouveau_fence_ops_legacy ||
238 fence->base.ops == &nouveau_fence_ops_uevent) {
239 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
240 struct nouveau_channel *chan;
241 unsigned long flags;
242
243 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
244 return true;
245
246 spin_lock_irqsave(&fctx->lock, flags);
247 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
248 if (chan && nouveau_fence_update(chan, fctx))
249 nvif_notify_put(&fctx->notify);
250 spin_unlock_irqrestore(&fctx->lock, flags);
251 }
252 return dma_fence_is_signaled(&fence->base);
253}
254
255static long
256nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
257{
258 struct nouveau_fence *fence = from_fence(f);
259 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
260 unsigned long t = jiffies, timeout = t + wait;
261
262 while (!nouveau_fence_done(fence)) {
263 ktime_t kt;
264
265 t = jiffies;
266
267 if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
268 __set_current_state(TASK_RUNNING);
269 return 0;
270 }
271
272 __set_current_state(intr ? TASK_INTERRUPTIBLE :
273 TASK_UNINTERRUPTIBLE);
274
275 kt = sleep_time;
276 schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
277 sleep_time *= 2;
278 if (sleep_time > NSEC_PER_MSEC)
279 sleep_time = NSEC_PER_MSEC;
280
281 if (intr && signal_pending(current))
282 return -ERESTARTSYS;
283 }
284
285 __set_current_state(TASK_RUNNING);
286
287 return timeout - t;
288}
289
290static int
291nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
292{
293 int ret = 0;
294
295 while (!nouveau_fence_done(fence)) {
296 if (time_after_eq(jiffies, fence->timeout)) {
297 ret = -EBUSY;
298 break;
299 }
300
301 __set_current_state(intr ?
302 TASK_INTERRUPTIBLE :
303 TASK_UNINTERRUPTIBLE);
304
305 if (intr && signal_pending(current)) {
306 ret = -ERESTARTSYS;
307 break;
308 }
309 }
310
311 __set_current_state(TASK_RUNNING);
312 return ret;
313}
314
315int
316nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
317{
318 long ret;
319
320 if (!lazy)
321 return nouveau_fence_wait_busy(fence, intr);
322
323 ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
324 if (ret < 0)
325 return ret;
326 else if (!ret)
327 return -EBUSY;
328 else
329 return 0;
330}
331
332int
333nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
334{
335 struct nouveau_fence_chan *fctx = chan->fence;
336 struct dma_fence *fence;
337 struct dma_resv *resv = nvbo->bo.base.resv;
338 struct dma_resv_list *fobj;
339 struct nouveau_fence *f;
340 int ret = 0, i;
341
342 if (!exclusive) {
343 ret = dma_resv_reserve_shared(resv, 1);
344
345 if (ret)
346 return ret;
347 }
348
349 fobj = dma_resv_get_list(resv);
350 fence = dma_resv_get_excl(resv);
351
352 if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
353 struct nouveau_channel *prev = NULL;
354 bool must_wait = true;
355
356 f = nouveau_local_fence(fence, chan->drm);
357 if (f) {
358 rcu_read_lock();
359 prev = rcu_dereference(f->channel);
360 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
361 must_wait = false;
362 rcu_read_unlock();
363 }
364
365 if (must_wait)
366 ret = dma_fence_wait(fence, intr);
367
368 return ret;
369 }
370
371 if (!exclusive || !fobj)
372 return ret;
373
374 for (i = 0; i < fobj->shared_count && !ret; ++i) {
375 struct nouveau_channel *prev = NULL;
376 bool must_wait = true;
377
378 fence = rcu_dereference_protected(fobj->shared[i],
379 dma_resv_held(resv));
380
381 f = nouveau_local_fence(fence, chan->drm);
382 if (f) {
383 rcu_read_lock();
384 prev = rcu_dereference(f->channel);
385 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
386 must_wait = false;
387 rcu_read_unlock();
388 }
389
390 if (must_wait)
391 ret = dma_fence_wait(fence, intr);
392 }
393
394 return ret;
395}
396
397void
398nouveau_fence_unref(struct nouveau_fence **pfence)
399{
400 if (*pfence)
401 dma_fence_put(&(*pfence)->base);
402 *pfence = NULL;
403}
404
405int
406nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
407 struct nouveau_fence **pfence)
408{
409 struct nouveau_fence *fence;
410 int ret = 0;
411
412 if (unlikely(!chan->fence))
413 return -ENODEV;
414
415 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
416 if (!fence)
417 return -ENOMEM;
418
419 ret = nouveau_fence_emit(fence, chan);
420 if (ret)
421 nouveau_fence_unref(&fence);
422
423 *pfence = fence;
424 return ret;
425}
426
427static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
428{
429 return "nouveau";
430}
431
432static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
433{
434 struct nouveau_fence *fence = from_fence(f);
435 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
436
437 return !fctx->dead ? fctx->name : "dead channel";
438}
439
440/*
441 * In an ideal world, read would not assume the channel context is still alive.
442 * This function may be called from another device, running into free memory as a
443 * result. The drm node should still be there, so we can derive the index from
444 * the fence context.
445 */
446static bool nouveau_fence_is_signaled(struct dma_fence *f)
447{
448 struct nouveau_fence *fence = from_fence(f);
449 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
450 struct nouveau_channel *chan;
451 bool ret = false;
452
453 rcu_read_lock();
454 chan = rcu_dereference(fence->channel);
455 if (chan)
456 ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
457 rcu_read_unlock();
458
459 return ret;
460}
461
462static bool nouveau_fence_no_signaling(struct dma_fence *f)
463{
464 struct nouveau_fence *fence = from_fence(f);
465
466 /*
467 * caller should have a reference on the fence,
468 * else fence could get freed here
469 */
470 WARN_ON(kref_read(&fence->base.refcount) <= 1);
471
472 /*
473 * This needs uevents to work correctly, but dma_fence_add_callback relies on
474 * being able to enable signaling. It will still get signaled eventually,
475 * just not right away.
476 */
477 if (nouveau_fence_is_signaled(f)) {
478 list_del(&fence->head);
479
480 dma_fence_put(&fence->base);
481 return false;
482 }
483
484 return true;
485}
486
487static void nouveau_fence_release(struct dma_fence *f)
488{
489 struct nouveau_fence *fence = from_fence(f);
490 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
491
492 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
493 dma_fence_free(&fence->base);
494}
495
496static const struct dma_fence_ops nouveau_fence_ops_legacy = {
497 .get_driver_name = nouveau_fence_get_get_driver_name,
498 .get_timeline_name = nouveau_fence_get_timeline_name,
499 .enable_signaling = nouveau_fence_no_signaling,
500 .signaled = nouveau_fence_is_signaled,
501 .wait = nouveau_fence_wait_legacy,
502 .release = nouveau_fence_release
503};
504
505static bool nouveau_fence_enable_signaling(struct dma_fence *f)
506{
507 struct nouveau_fence *fence = from_fence(f);
508 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
509 bool ret;
510
511 if (!fctx->notify_ref++)
512 nvif_notify_get(&fctx->notify);
513
514 ret = nouveau_fence_no_signaling(f);
515 if (ret)
516 set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
517 else if (!--fctx->notify_ref)
518 nvif_notify_put(&fctx->notify);
519
520 return ret;
521}
522
523static const struct dma_fence_ops nouveau_fence_ops_uevent = {
524 .get_driver_name = nouveau_fence_get_get_driver_name,
525 .get_timeline_name = nouveau_fence_get_timeline_name,
526 .enable_signaling = nouveau_fence_enable_signaling,
527 .signaled = nouveau_fence_is_signaled,
528 .release = nouveau_fence_release
529};