Loading...
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/device.h>
28#include <linux/io.h>
29#include <linux/sched/signal.h>
30#include <linux/dma-fence-array.h>
31
32#include <drm/drm_syncobj.h>
33
34#include "uapi/drm/vc4_drm.h"
35#include "vc4_drv.h"
36#include "vc4_regs.h"
37#include "vc4_trace.h"
38
39static void
40vc4_queue_hangcheck(struct drm_device *dev)
41{
42 struct vc4_dev *vc4 = to_vc4_dev(dev);
43
44 mod_timer(&vc4->hangcheck.timer,
45 round_jiffies_up(jiffies + msecs_to_jiffies(100)));
46}
47
48struct vc4_hang_state {
49 struct drm_vc4_get_hang_state user_state;
50
51 u32 bo_count;
52 struct drm_gem_object **bo;
53};
54
55static void
56vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
57{
58 unsigned int i;
59
60 for (i = 0; i < state->user_state.bo_count; i++)
61 drm_gem_object_put_unlocked(state->bo[i]);
62
63 kfree(state);
64}
65
66int
67vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
68 struct drm_file *file_priv)
69{
70 struct drm_vc4_get_hang_state *get_state = data;
71 struct drm_vc4_get_hang_state_bo *bo_state;
72 struct vc4_hang_state *kernel_state;
73 struct drm_vc4_get_hang_state *state;
74 struct vc4_dev *vc4 = to_vc4_dev(dev);
75 unsigned long irqflags;
76 u32 i;
77 int ret = 0;
78
79 if (!vc4->v3d) {
80 DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
81 return -ENODEV;
82 }
83
84 spin_lock_irqsave(&vc4->job_lock, irqflags);
85 kernel_state = vc4->hang_state;
86 if (!kernel_state) {
87 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
88 return -ENOENT;
89 }
90 state = &kernel_state->user_state;
91
92 /* If the user's array isn't big enough, just return the
93 * required array size.
94 */
95 if (get_state->bo_count < state->bo_count) {
96 get_state->bo_count = state->bo_count;
97 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
98 return 0;
99 }
100
101 vc4->hang_state = NULL;
102 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
103
104 /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
105 state->bo = get_state->bo;
106 memcpy(get_state, state, sizeof(*state));
107
108 bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
109 if (!bo_state) {
110 ret = -ENOMEM;
111 goto err_free;
112 }
113
114 for (i = 0; i < state->bo_count; i++) {
115 struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
116 u32 handle;
117
118 ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
119 &handle);
120
121 if (ret) {
122 state->bo_count = i;
123 goto err_delete_handle;
124 }
125 bo_state[i].handle = handle;
126 bo_state[i].paddr = vc4_bo->base.paddr;
127 bo_state[i].size = vc4_bo->base.base.size;
128 }
129
130 if (copy_to_user(u64_to_user_ptr(get_state->bo),
131 bo_state,
132 state->bo_count * sizeof(*bo_state)))
133 ret = -EFAULT;
134
135err_delete_handle:
136 if (ret) {
137 for (i = 0; i < state->bo_count; i++)
138 drm_gem_handle_delete(file_priv, bo_state[i].handle);
139 }
140
141err_free:
142 vc4_free_hang_state(dev, kernel_state);
143 kfree(bo_state);
144
145 return ret;
146}
147
148static void
149vc4_save_hang_state(struct drm_device *dev)
150{
151 struct vc4_dev *vc4 = to_vc4_dev(dev);
152 struct drm_vc4_get_hang_state *state;
153 struct vc4_hang_state *kernel_state;
154 struct vc4_exec_info *exec[2];
155 struct vc4_bo *bo;
156 unsigned long irqflags;
157 unsigned int i, j, k, unref_list_count;
158
159 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
160 if (!kernel_state)
161 return;
162
163 state = &kernel_state->user_state;
164
165 spin_lock_irqsave(&vc4->job_lock, irqflags);
166 exec[0] = vc4_first_bin_job(vc4);
167 exec[1] = vc4_first_render_job(vc4);
168 if (!exec[0] && !exec[1]) {
169 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
170 return;
171 }
172
173 /* Get the bos from both binner and renderer into hang state. */
174 state->bo_count = 0;
175 for (i = 0; i < 2; i++) {
176 if (!exec[i])
177 continue;
178
179 unref_list_count = 0;
180 list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
181 unref_list_count++;
182 state->bo_count += exec[i]->bo_count + unref_list_count;
183 }
184
185 kernel_state->bo = kcalloc(state->bo_count,
186 sizeof(*kernel_state->bo), GFP_ATOMIC);
187
188 if (!kernel_state->bo) {
189 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
190 return;
191 }
192
193 k = 0;
194 for (i = 0; i < 2; i++) {
195 if (!exec[i])
196 continue;
197
198 for (j = 0; j < exec[i]->bo_count; j++) {
199 bo = to_vc4_bo(&exec[i]->bo[j]->base);
200
201 /* Retain BOs just in case they were marked purgeable.
202 * This prevents the BO from being purged before
203 * someone had a chance to dump the hang state.
204 */
205 WARN_ON(!refcount_read(&bo->usecnt));
206 refcount_inc(&bo->usecnt);
207 drm_gem_object_get(&exec[i]->bo[j]->base);
208 kernel_state->bo[k++] = &exec[i]->bo[j]->base;
209 }
210
211 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
212 /* No need to retain BOs coming from the ->unref_list
213 * because they are naturally unpurgeable.
214 */
215 drm_gem_object_get(&bo->base.base);
216 kernel_state->bo[k++] = &bo->base.base;
217 }
218 }
219
220 WARN_ON_ONCE(k != state->bo_count);
221
222 if (exec[0])
223 state->start_bin = exec[0]->ct0ca;
224 if (exec[1])
225 state->start_render = exec[1]->ct1ca;
226
227 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
228
229 state->ct0ca = V3D_READ(V3D_CTNCA(0));
230 state->ct0ea = V3D_READ(V3D_CTNEA(0));
231
232 state->ct1ca = V3D_READ(V3D_CTNCA(1));
233 state->ct1ea = V3D_READ(V3D_CTNEA(1));
234
235 state->ct0cs = V3D_READ(V3D_CTNCS(0));
236 state->ct1cs = V3D_READ(V3D_CTNCS(1));
237
238 state->ct0ra0 = V3D_READ(V3D_CT00RA0);
239 state->ct1ra0 = V3D_READ(V3D_CT01RA0);
240
241 state->bpca = V3D_READ(V3D_BPCA);
242 state->bpcs = V3D_READ(V3D_BPCS);
243 state->bpoa = V3D_READ(V3D_BPOA);
244 state->bpos = V3D_READ(V3D_BPOS);
245
246 state->vpmbase = V3D_READ(V3D_VPMBASE);
247
248 state->dbge = V3D_READ(V3D_DBGE);
249 state->fdbgo = V3D_READ(V3D_FDBGO);
250 state->fdbgb = V3D_READ(V3D_FDBGB);
251 state->fdbgr = V3D_READ(V3D_FDBGR);
252 state->fdbgs = V3D_READ(V3D_FDBGS);
253 state->errstat = V3D_READ(V3D_ERRSTAT);
254
255 /* We need to turn purgeable BOs into unpurgeable ones so that
256 * userspace has a chance to dump the hang state before the kernel
257 * decides to purge those BOs.
258 * Note that BO consistency at dump time cannot be guaranteed. For
259 * example, if the owner of these BOs decides to re-use them or mark
260 * them purgeable again there's nothing we can do to prevent it.
261 */
262 for (i = 0; i < kernel_state->user_state.bo_count; i++) {
263 struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
264
265 if (bo->madv == __VC4_MADV_NOTSUPP)
266 continue;
267
268 mutex_lock(&bo->madv_lock);
269 if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
270 bo->madv = VC4_MADV_WILLNEED;
271 refcount_dec(&bo->usecnt);
272 mutex_unlock(&bo->madv_lock);
273 }
274
275 spin_lock_irqsave(&vc4->job_lock, irqflags);
276 if (vc4->hang_state) {
277 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
278 vc4_free_hang_state(dev, kernel_state);
279 } else {
280 vc4->hang_state = kernel_state;
281 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
282 }
283}
284
285static void
286vc4_reset(struct drm_device *dev)
287{
288 struct vc4_dev *vc4 = to_vc4_dev(dev);
289
290 DRM_INFO("Resetting GPU.\n");
291
292 mutex_lock(&vc4->power_lock);
293 if (vc4->power_refcount) {
294 /* Power the device off and back on the by dropping the
295 * reference on runtime PM.
296 */
297 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
298 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
299 }
300 mutex_unlock(&vc4->power_lock);
301
302 vc4_irq_reset(dev);
303
304 /* Rearm the hangcheck -- another job might have been waiting
305 * for our hung one to get kicked off, and vc4_irq_reset()
306 * would have started it.
307 */
308 vc4_queue_hangcheck(dev);
309}
310
311static void
312vc4_reset_work(struct work_struct *work)
313{
314 struct vc4_dev *vc4 =
315 container_of(work, struct vc4_dev, hangcheck.reset_work);
316
317 vc4_save_hang_state(vc4->dev);
318
319 vc4_reset(vc4->dev);
320}
321
322static void
323vc4_hangcheck_elapsed(struct timer_list *t)
324{
325 struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
326 struct drm_device *dev = vc4->dev;
327 uint32_t ct0ca, ct1ca;
328 unsigned long irqflags;
329 struct vc4_exec_info *bin_exec, *render_exec;
330
331 spin_lock_irqsave(&vc4->job_lock, irqflags);
332
333 bin_exec = vc4_first_bin_job(vc4);
334 render_exec = vc4_first_render_job(vc4);
335
336 /* If idle, we can stop watching for hangs. */
337 if (!bin_exec && !render_exec) {
338 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
339 return;
340 }
341
342 ct0ca = V3D_READ(V3D_CTNCA(0));
343 ct1ca = V3D_READ(V3D_CTNCA(1));
344
345 /* If we've made any progress in execution, rearm the timer
346 * and wait.
347 */
348 if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
349 (render_exec && ct1ca != render_exec->last_ct1ca)) {
350 if (bin_exec)
351 bin_exec->last_ct0ca = ct0ca;
352 if (render_exec)
353 render_exec->last_ct1ca = ct1ca;
354 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
355 vc4_queue_hangcheck(dev);
356 return;
357 }
358
359 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
360
361 /* We've gone too long with no progress, reset. This has to
362 * be done from a work struct, since resetting can sleep and
363 * this timer hook isn't allowed to.
364 */
365 schedule_work(&vc4->hangcheck.reset_work);
366}
367
368static void
369submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
370{
371 struct vc4_dev *vc4 = to_vc4_dev(dev);
372
373 /* Set the current and end address of the control list.
374 * Writing the end register is what starts the job.
375 */
376 V3D_WRITE(V3D_CTNCA(thread), start);
377 V3D_WRITE(V3D_CTNEA(thread), end);
378}
379
380int
381vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
382 bool interruptible)
383{
384 struct vc4_dev *vc4 = to_vc4_dev(dev);
385 int ret = 0;
386 unsigned long timeout_expire;
387 DEFINE_WAIT(wait);
388
389 if (vc4->finished_seqno >= seqno)
390 return 0;
391
392 if (timeout_ns == 0)
393 return -ETIME;
394
395 timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
396
397 trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
398 for (;;) {
399 prepare_to_wait(&vc4->job_wait_queue, &wait,
400 interruptible ? TASK_INTERRUPTIBLE :
401 TASK_UNINTERRUPTIBLE);
402
403 if (interruptible && signal_pending(current)) {
404 ret = -ERESTARTSYS;
405 break;
406 }
407
408 if (vc4->finished_seqno >= seqno)
409 break;
410
411 if (timeout_ns != ~0ull) {
412 if (time_after_eq(jiffies, timeout_expire)) {
413 ret = -ETIME;
414 break;
415 }
416 schedule_timeout(timeout_expire - jiffies);
417 } else {
418 schedule();
419 }
420 }
421
422 finish_wait(&vc4->job_wait_queue, &wait);
423 trace_vc4_wait_for_seqno_end(dev, seqno);
424
425 return ret;
426}
427
428static void
429vc4_flush_caches(struct drm_device *dev)
430{
431 struct vc4_dev *vc4 = to_vc4_dev(dev);
432
433 /* Flush the GPU L2 caches. These caches sit on top of system
434 * L3 (the 128kb or so shared with the CPU), and are
435 * non-allocating in the L3.
436 */
437 V3D_WRITE(V3D_L2CACTL,
438 V3D_L2CACTL_L2CCLR);
439
440 V3D_WRITE(V3D_SLCACTL,
441 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
442 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
443 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
444 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
445}
446
447static void
448vc4_flush_texture_caches(struct drm_device *dev)
449{
450 struct vc4_dev *vc4 = to_vc4_dev(dev);
451
452 V3D_WRITE(V3D_L2CACTL,
453 V3D_L2CACTL_L2CCLR);
454
455 V3D_WRITE(V3D_SLCACTL,
456 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
457 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
458}
459
460/* Sets the registers for the next job to be actually be executed in
461 * the hardware.
462 *
463 * The job_lock should be held during this.
464 */
465void
466vc4_submit_next_bin_job(struct drm_device *dev)
467{
468 struct vc4_dev *vc4 = to_vc4_dev(dev);
469 struct vc4_exec_info *exec;
470
471again:
472 exec = vc4_first_bin_job(vc4);
473 if (!exec)
474 return;
475
476 vc4_flush_caches(dev);
477
478 /* Only start the perfmon if it was not already started by a previous
479 * job.
480 */
481 if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
482 vc4_perfmon_start(vc4, exec->perfmon);
483
484 /* Either put the job in the binner if it uses the binner, or
485 * immediately move it to the to-be-rendered queue.
486 */
487 if (exec->ct0ca != exec->ct0ea) {
488 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
489 } else {
490 struct vc4_exec_info *next;
491
492 vc4_move_job_to_render(dev, exec);
493 next = vc4_first_bin_job(vc4);
494
495 /* We can't start the next bin job if the previous job had a
496 * different perfmon instance attached to it. The same goes
497 * if one of them had a perfmon attached to it and the other
498 * one doesn't.
499 */
500 if (next && next->perfmon == exec->perfmon)
501 goto again;
502 }
503}
504
505void
506vc4_submit_next_render_job(struct drm_device *dev)
507{
508 struct vc4_dev *vc4 = to_vc4_dev(dev);
509 struct vc4_exec_info *exec = vc4_first_render_job(vc4);
510
511 if (!exec)
512 return;
513
514 /* A previous RCL may have written to one of our textures, and
515 * our full cache flush at bin time may have occurred before
516 * that RCL completed. Flush the texture cache now, but not
517 * the instructions or uniforms (since we don't write those
518 * from an RCL).
519 */
520 vc4_flush_texture_caches(dev);
521
522 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
523}
524
525void
526vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
527{
528 struct vc4_dev *vc4 = to_vc4_dev(dev);
529 bool was_empty = list_empty(&vc4->render_job_list);
530
531 list_move_tail(&exec->head, &vc4->render_job_list);
532 if (was_empty)
533 vc4_submit_next_render_job(dev);
534}
535
536static void
537vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
538{
539 struct vc4_bo *bo;
540 unsigned i;
541
542 for (i = 0; i < exec->bo_count; i++) {
543 bo = to_vc4_bo(&exec->bo[i]->base);
544 bo->seqno = seqno;
545
546 dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
547 }
548
549 list_for_each_entry(bo, &exec->unref_list, unref_head) {
550 bo->seqno = seqno;
551 }
552
553 for (i = 0; i < exec->rcl_write_bo_count; i++) {
554 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
555 bo->write_seqno = seqno;
556
557 dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
558 }
559}
560
561static void
562vc4_unlock_bo_reservations(struct drm_device *dev,
563 struct vc4_exec_info *exec,
564 struct ww_acquire_ctx *acquire_ctx)
565{
566 int i;
567
568 for (i = 0; i < exec->bo_count; i++) {
569 struct drm_gem_object *bo = &exec->bo[i]->base;
570
571 ww_mutex_unlock(&bo->resv->lock);
572 }
573
574 ww_acquire_fini(acquire_ctx);
575}
576
577/* Takes the reservation lock on all the BOs being referenced, so that
578 * at queue submit time we can update the reservations.
579 *
580 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
581 * (all of which are on exec->unref_list). They're entirely private
582 * to vc4, so we don't attach dma-buf fences to them.
583 */
584static int
585vc4_lock_bo_reservations(struct drm_device *dev,
586 struct vc4_exec_info *exec,
587 struct ww_acquire_ctx *acquire_ctx)
588{
589 int contended_lock = -1;
590 int i, ret;
591 struct drm_gem_object *bo;
592
593 ww_acquire_init(acquire_ctx, &reservation_ww_class);
594
595retry:
596 if (contended_lock != -1) {
597 bo = &exec->bo[contended_lock]->base;
598 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
599 acquire_ctx);
600 if (ret) {
601 ww_acquire_done(acquire_ctx);
602 return ret;
603 }
604 }
605
606 for (i = 0; i < exec->bo_count; i++) {
607 if (i == contended_lock)
608 continue;
609
610 bo = &exec->bo[i]->base;
611
612 ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
613 if (ret) {
614 int j;
615
616 for (j = 0; j < i; j++) {
617 bo = &exec->bo[j]->base;
618 ww_mutex_unlock(&bo->resv->lock);
619 }
620
621 if (contended_lock != -1 && contended_lock >= i) {
622 bo = &exec->bo[contended_lock]->base;
623
624 ww_mutex_unlock(&bo->resv->lock);
625 }
626
627 if (ret == -EDEADLK) {
628 contended_lock = i;
629 goto retry;
630 }
631
632 ww_acquire_done(acquire_ctx);
633 return ret;
634 }
635 }
636
637 ww_acquire_done(acquire_ctx);
638
639 /* Reserve space for our shared (read-only) fence references,
640 * before we commit the CL to the hardware.
641 */
642 for (i = 0; i < exec->bo_count; i++) {
643 bo = &exec->bo[i]->base;
644
645 ret = dma_resv_reserve_shared(bo->resv, 1);
646 if (ret) {
647 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
648 return ret;
649 }
650 }
651
652 return 0;
653}
654
655/* Queues a struct vc4_exec_info for execution. If no job is
656 * currently executing, then submits it.
657 *
658 * Unlike most GPUs, our hardware only handles one command list at a
659 * time. To queue multiple jobs at once, we'd need to edit the
660 * previous command list to have a jump to the new one at the end, and
661 * then bump the end address. That's a change for a later date,
662 * though.
663 */
664static int
665vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
666 struct ww_acquire_ctx *acquire_ctx,
667 struct drm_syncobj *out_sync)
668{
669 struct vc4_dev *vc4 = to_vc4_dev(dev);
670 struct vc4_exec_info *renderjob;
671 uint64_t seqno;
672 unsigned long irqflags;
673 struct vc4_fence *fence;
674
675 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
676 if (!fence)
677 return -ENOMEM;
678 fence->dev = dev;
679
680 spin_lock_irqsave(&vc4->job_lock, irqflags);
681
682 seqno = ++vc4->emit_seqno;
683 exec->seqno = seqno;
684
685 dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
686 vc4->dma_fence_context, exec->seqno);
687 fence->seqno = exec->seqno;
688 exec->fence = &fence->base;
689
690 if (out_sync)
691 drm_syncobj_replace_fence(out_sync, exec->fence);
692
693 vc4_update_bo_seqnos(exec, seqno);
694
695 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
696
697 list_add_tail(&exec->head, &vc4->bin_job_list);
698
699 /* If no bin job was executing and if the render job (if any) has the
700 * same perfmon as our job attached to it (or if both jobs don't have
701 * perfmon activated), then kick ours off. Otherwise, it'll get
702 * started when the previous job's flush/render done interrupt occurs.
703 */
704 renderjob = vc4_first_render_job(vc4);
705 if (vc4_first_bin_job(vc4) == exec &&
706 (!renderjob || renderjob->perfmon == exec->perfmon)) {
707 vc4_submit_next_bin_job(dev);
708 vc4_queue_hangcheck(dev);
709 }
710
711 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
712
713 return 0;
714}
715
716/**
717 * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
718 * referenced by the job.
719 * @dev: DRM device
720 * @file_priv: DRM file for this fd
721 * @exec: V3D job being set up
722 *
723 * The command validator needs to reference BOs by their index within
724 * the submitted job's BO list. This does the validation of the job's
725 * BO list and reference counting for the lifetime of the job.
726 */
727static int
728vc4_cl_lookup_bos(struct drm_device *dev,
729 struct drm_file *file_priv,
730 struct vc4_exec_info *exec)
731{
732 struct drm_vc4_submit_cl *args = exec->args;
733 uint32_t *handles;
734 int ret = 0;
735 int i;
736
737 exec->bo_count = args->bo_handle_count;
738
739 if (!exec->bo_count) {
740 /* See comment on bo_index for why we have to check
741 * this.
742 */
743 DRM_DEBUG("Rendering requires BOs to validate\n");
744 return -EINVAL;
745 }
746
747 exec->bo = kvmalloc_array(exec->bo_count,
748 sizeof(struct drm_gem_cma_object *),
749 GFP_KERNEL | __GFP_ZERO);
750 if (!exec->bo) {
751 DRM_ERROR("Failed to allocate validated BO pointers\n");
752 return -ENOMEM;
753 }
754
755 handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
756 if (!handles) {
757 ret = -ENOMEM;
758 DRM_ERROR("Failed to allocate incoming GEM handles\n");
759 goto fail;
760 }
761
762 if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
763 exec->bo_count * sizeof(uint32_t))) {
764 ret = -EFAULT;
765 DRM_ERROR("Failed to copy in GEM handles\n");
766 goto fail;
767 }
768
769 spin_lock(&file_priv->table_lock);
770 for (i = 0; i < exec->bo_count; i++) {
771 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
772 handles[i]);
773 if (!bo) {
774 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
775 i, handles[i]);
776 ret = -EINVAL;
777 break;
778 }
779
780 drm_gem_object_get(bo);
781 exec->bo[i] = (struct drm_gem_cma_object *)bo;
782 }
783 spin_unlock(&file_priv->table_lock);
784
785 if (ret)
786 goto fail_put_bo;
787
788 for (i = 0; i < exec->bo_count; i++) {
789 ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
790 if (ret)
791 goto fail_dec_usecnt;
792 }
793
794 kvfree(handles);
795 return 0;
796
797fail_dec_usecnt:
798 /* Decrease usecnt on acquired objects.
799 * We cannot rely on vc4_complete_exec() to release resources here,
800 * because vc4_complete_exec() has no information about which BO has
801 * had its ->usecnt incremented.
802 * To make things easier we just free everything explicitly and set
803 * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
804 * step.
805 */
806 for (i-- ; i >= 0; i--)
807 vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
808
809fail_put_bo:
810 /* Release any reference to acquired objects. */
811 for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
812 drm_gem_object_put_unlocked(&exec->bo[i]->base);
813
814fail:
815 kvfree(handles);
816 kvfree(exec->bo);
817 exec->bo = NULL;
818 return ret;
819}
820
821static int
822vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
823{
824 struct drm_vc4_submit_cl *args = exec->args;
825 struct vc4_dev *vc4 = to_vc4_dev(dev);
826 void *temp = NULL;
827 void *bin;
828 int ret = 0;
829 uint32_t bin_offset = 0;
830 uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
831 16);
832 uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
833 uint32_t exec_size = uniforms_offset + args->uniforms_size;
834 uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
835 args->shader_rec_count);
836 struct vc4_bo *bo;
837
838 if (shader_rec_offset < args->bin_cl_size ||
839 uniforms_offset < shader_rec_offset ||
840 exec_size < uniforms_offset ||
841 args->shader_rec_count >= (UINT_MAX /
842 sizeof(struct vc4_shader_state)) ||
843 temp_size < exec_size) {
844 DRM_DEBUG("overflow in exec arguments\n");
845 ret = -EINVAL;
846 goto fail;
847 }
848
849 /* Allocate space where we'll store the copied in user command lists
850 * and shader records.
851 *
852 * We don't just copy directly into the BOs because we need to
853 * read the contents back for validation, and I think the
854 * bo->vaddr is uncached access.
855 */
856 temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
857 if (!temp) {
858 DRM_ERROR("Failed to allocate storage for copying "
859 "in bin/render CLs.\n");
860 ret = -ENOMEM;
861 goto fail;
862 }
863 bin = temp + bin_offset;
864 exec->shader_rec_u = temp + shader_rec_offset;
865 exec->uniforms_u = temp + uniforms_offset;
866 exec->shader_state = temp + exec_size;
867 exec->shader_state_size = args->shader_rec_count;
868
869 if (copy_from_user(bin,
870 u64_to_user_ptr(args->bin_cl),
871 args->bin_cl_size)) {
872 ret = -EFAULT;
873 goto fail;
874 }
875
876 if (copy_from_user(exec->shader_rec_u,
877 u64_to_user_ptr(args->shader_rec),
878 args->shader_rec_size)) {
879 ret = -EFAULT;
880 goto fail;
881 }
882
883 if (copy_from_user(exec->uniforms_u,
884 u64_to_user_ptr(args->uniforms),
885 args->uniforms_size)) {
886 ret = -EFAULT;
887 goto fail;
888 }
889
890 bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
891 if (IS_ERR(bo)) {
892 DRM_ERROR("Couldn't allocate BO for binning\n");
893 ret = PTR_ERR(bo);
894 goto fail;
895 }
896 exec->exec_bo = &bo->base;
897
898 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
899 &exec->unref_list);
900
901 exec->ct0ca = exec->exec_bo->paddr + bin_offset;
902
903 exec->bin_u = bin;
904
905 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
906 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
907 exec->shader_rec_size = args->shader_rec_size;
908
909 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
910 exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
911 exec->uniforms_size = args->uniforms_size;
912
913 ret = vc4_validate_bin_cl(dev,
914 exec->exec_bo->vaddr + bin_offset,
915 bin,
916 exec);
917 if (ret)
918 goto fail;
919
920 ret = vc4_validate_shader_recs(dev, exec);
921 if (ret)
922 goto fail;
923
924 if (exec->found_tile_binning_mode_config_packet) {
925 ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
926 if (ret)
927 goto fail;
928 }
929
930 /* Block waiting on any previous rendering into the CS's VBO,
931 * IB, or textures, so that pixels are actually written by the
932 * time we try to read them.
933 */
934 ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
935
936fail:
937 kvfree(temp);
938 return ret;
939}
940
941static void
942vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
943{
944 struct vc4_dev *vc4 = to_vc4_dev(dev);
945 unsigned long irqflags;
946 unsigned i;
947
948 /* If we got force-completed because of GPU reset rather than
949 * through our IRQ handler, signal the fence now.
950 */
951 if (exec->fence) {
952 dma_fence_signal(exec->fence);
953 dma_fence_put(exec->fence);
954 }
955
956 if (exec->bo) {
957 for (i = 0; i < exec->bo_count; i++) {
958 struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
959
960 vc4_bo_dec_usecnt(bo);
961 drm_gem_object_put_unlocked(&exec->bo[i]->base);
962 }
963 kvfree(exec->bo);
964 }
965
966 while (!list_empty(&exec->unref_list)) {
967 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
968 struct vc4_bo, unref_head);
969 list_del(&bo->unref_head);
970 drm_gem_object_put_unlocked(&bo->base.base);
971 }
972
973 /* Free up the allocation of any bin slots we used. */
974 spin_lock_irqsave(&vc4->job_lock, irqflags);
975 vc4->bin_alloc_used &= ~exec->bin_slots;
976 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
977
978 /* Release the reference on the binner BO if needed. */
979 if (exec->bin_bo_used)
980 vc4_v3d_bin_bo_put(vc4);
981
982 /* Release the reference we had on the perf monitor. */
983 vc4_perfmon_put(exec->perfmon);
984
985 vc4_v3d_pm_put(vc4);
986
987 kfree(exec);
988}
989
990void
991vc4_job_handle_completed(struct vc4_dev *vc4)
992{
993 unsigned long irqflags;
994 struct vc4_seqno_cb *cb, *cb_temp;
995
996 spin_lock_irqsave(&vc4->job_lock, irqflags);
997 while (!list_empty(&vc4->job_done_list)) {
998 struct vc4_exec_info *exec =
999 list_first_entry(&vc4->job_done_list,
1000 struct vc4_exec_info, head);
1001 list_del(&exec->head);
1002
1003 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1004 vc4_complete_exec(vc4->dev, exec);
1005 spin_lock_irqsave(&vc4->job_lock, irqflags);
1006 }
1007
1008 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
1009 if (cb->seqno <= vc4->finished_seqno) {
1010 list_del_init(&cb->work.entry);
1011 schedule_work(&cb->work);
1012 }
1013 }
1014
1015 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1016}
1017
1018static void vc4_seqno_cb_work(struct work_struct *work)
1019{
1020 struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1021
1022 cb->func(cb);
1023}
1024
1025int vc4_queue_seqno_cb(struct drm_device *dev,
1026 struct vc4_seqno_cb *cb, uint64_t seqno,
1027 void (*func)(struct vc4_seqno_cb *cb))
1028{
1029 struct vc4_dev *vc4 = to_vc4_dev(dev);
1030 int ret = 0;
1031 unsigned long irqflags;
1032
1033 cb->func = func;
1034 INIT_WORK(&cb->work, vc4_seqno_cb_work);
1035
1036 spin_lock_irqsave(&vc4->job_lock, irqflags);
1037 if (seqno > vc4->finished_seqno) {
1038 cb->seqno = seqno;
1039 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1040 } else {
1041 schedule_work(&cb->work);
1042 }
1043 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1044
1045 return ret;
1046}
1047
1048/* Scheduled when any job has been completed, this walks the list of
1049 * jobs that had completed and unrefs their BOs and frees their exec
1050 * structs.
1051 */
1052static void
1053vc4_job_done_work(struct work_struct *work)
1054{
1055 struct vc4_dev *vc4 =
1056 container_of(work, struct vc4_dev, job_done_work);
1057
1058 vc4_job_handle_completed(vc4);
1059}
1060
1061static int
1062vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1063 uint64_t seqno,
1064 uint64_t *timeout_ns)
1065{
1066 unsigned long start = jiffies;
1067 int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1068
1069 if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1070 uint64_t delta = jiffies_to_nsecs(jiffies - start);
1071
1072 if (*timeout_ns >= delta)
1073 *timeout_ns -= delta;
1074 }
1075
1076 return ret;
1077}
1078
1079int
1080vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1081 struct drm_file *file_priv)
1082{
1083 struct drm_vc4_wait_seqno *args = data;
1084
1085 return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1086 &args->timeout_ns);
1087}
1088
1089int
1090vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1091 struct drm_file *file_priv)
1092{
1093 int ret;
1094 struct drm_vc4_wait_bo *args = data;
1095 struct drm_gem_object *gem_obj;
1096 struct vc4_bo *bo;
1097
1098 if (args->pad != 0)
1099 return -EINVAL;
1100
1101 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1102 if (!gem_obj) {
1103 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1104 return -EINVAL;
1105 }
1106 bo = to_vc4_bo(gem_obj);
1107
1108 ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
1109 &args->timeout_ns);
1110
1111 drm_gem_object_put_unlocked(gem_obj);
1112 return ret;
1113}
1114
1115/**
1116 * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
1117 * @dev: DRM device
1118 * @data: ioctl argument
1119 * @file_priv: DRM file for this fd
1120 *
1121 * This is the main entrypoint for userspace to submit a 3D frame to
1122 * the GPU. Userspace provides the binner command list (if
1123 * applicable), and the kernel sets up the render command list to draw
1124 * to the framebuffer described in the ioctl, using the command lists
1125 * that the 3D engine's binner will produce.
1126 */
1127int
1128vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1129 struct drm_file *file_priv)
1130{
1131 struct vc4_dev *vc4 = to_vc4_dev(dev);
1132 struct vc4_file *vc4file = file_priv->driver_priv;
1133 struct drm_vc4_submit_cl *args = data;
1134 struct drm_syncobj *out_sync = NULL;
1135 struct vc4_exec_info *exec;
1136 struct ww_acquire_ctx acquire_ctx;
1137 struct dma_fence *in_fence;
1138 int ret = 0;
1139
1140 if (!vc4->v3d) {
1141 DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
1142 return -ENODEV;
1143 }
1144
1145 if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
1146 VC4_SUBMIT_CL_FIXED_RCL_ORDER |
1147 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
1148 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1149 DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1150 return -EINVAL;
1151 }
1152
1153 if (args->pad2 != 0) {
1154 DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2);
1155 return -EINVAL;
1156 }
1157
1158 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1159 if (!exec) {
1160 DRM_ERROR("malloc failure on exec struct\n");
1161 return -ENOMEM;
1162 }
1163
1164 ret = vc4_v3d_pm_get(vc4);
1165 if (ret) {
1166 kfree(exec);
1167 return ret;
1168 }
1169
1170 exec->args = args;
1171 INIT_LIST_HEAD(&exec->unref_list);
1172
1173 ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1174 if (ret)
1175 goto fail;
1176
1177 if (args->perfmonid) {
1178 exec->perfmon = vc4_perfmon_find(vc4file,
1179 args->perfmonid);
1180 if (!exec->perfmon) {
1181 ret = -ENOENT;
1182 goto fail;
1183 }
1184 }
1185
1186 if (args->in_sync) {
1187 ret = drm_syncobj_find_fence(file_priv, args->in_sync,
1188 0, 0, &in_fence);
1189 if (ret)
1190 goto fail;
1191
1192 /* When the fence (or fence array) is exclusively from our
1193 * context we can skip the wait since jobs are executed in
1194 * order of their submission through this ioctl and this can
1195 * only have fences from a prior job.
1196 */
1197 if (!dma_fence_match_context(in_fence,
1198 vc4->dma_fence_context)) {
1199 ret = dma_fence_wait(in_fence, true);
1200 if (ret) {
1201 dma_fence_put(in_fence);
1202 goto fail;
1203 }
1204 }
1205
1206 dma_fence_put(in_fence);
1207 }
1208
1209 if (exec->args->bin_cl_size != 0) {
1210 ret = vc4_get_bcl(dev, exec);
1211 if (ret)
1212 goto fail;
1213 } else {
1214 exec->ct0ca = 0;
1215 exec->ct0ea = 0;
1216 }
1217
1218 ret = vc4_get_rcl(dev, exec);
1219 if (ret)
1220 goto fail;
1221
1222 ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1223 if (ret)
1224 goto fail;
1225
1226 if (args->out_sync) {
1227 out_sync = drm_syncobj_find(file_priv, args->out_sync);
1228 if (!out_sync) {
1229 ret = -EINVAL;
1230 goto fail;
1231 }
1232
1233 /* We replace the fence in out_sync in vc4_queue_submit since
1234 * the render job could execute immediately after that call.
1235 * If it finishes before our ioctl processing resumes the
1236 * render job fence could already have been freed.
1237 */
1238 }
1239
1240 /* Clear this out of the struct we'll be putting in the queue,
1241 * since it's part of our stack.
1242 */
1243 exec->args = NULL;
1244
1245 ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
1246
1247 /* The syncobj isn't part of the exec data and we need to free our
1248 * reference even if job submission failed.
1249 */
1250 if (out_sync)
1251 drm_syncobj_put(out_sync);
1252
1253 if (ret)
1254 goto fail;
1255
1256 /* Return the seqno for our job. */
1257 args->seqno = vc4->emit_seqno;
1258
1259 return 0;
1260
1261fail:
1262 vc4_complete_exec(vc4->dev, exec);
1263
1264 return ret;
1265}
1266
1267void
1268vc4_gem_init(struct drm_device *dev)
1269{
1270 struct vc4_dev *vc4 = to_vc4_dev(dev);
1271
1272 vc4->dma_fence_context = dma_fence_context_alloc(1);
1273
1274 INIT_LIST_HEAD(&vc4->bin_job_list);
1275 INIT_LIST_HEAD(&vc4->render_job_list);
1276 INIT_LIST_HEAD(&vc4->job_done_list);
1277 INIT_LIST_HEAD(&vc4->seqno_cb_list);
1278 spin_lock_init(&vc4->job_lock);
1279
1280 INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1281 timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1282
1283 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1284
1285 mutex_init(&vc4->power_lock);
1286
1287 INIT_LIST_HEAD(&vc4->purgeable.list);
1288 mutex_init(&vc4->purgeable.lock);
1289}
1290
1291void
1292vc4_gem_destroy(struct drm_device *dev)
1293{
1294 struct vc4_dev *vc4 = to_vc4_dev(dev);
1295
1296 /* Waiting for exec to finish would need to be done before
1297 * unregistering V3D.
1298 */
1299 WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1300
1301 /* V3D should already have disabled its interrupt and cleared
1302 * the overflow allocation registers. Now free the object.
1303 */
1304 if (vc4->bin_bo) {
1305 drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
1306 vc4->bin_bo = NULL;
1307 }
1308
1309 if (vc4->hang_state)
1310 vc4_free_hang_state(dev, vc4->hang_state);
1311}
1312
1313int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
1314 struct drm_file *file_priv)
1315{
1316 struct drm_vc4_gem_madvise *args = data;
1317 struct drm_gem_object *gem_obj;
1318 struct vc4_bo *bo;
1319 int ret;
1320
1321 switch (args->madv) {
1322 case VC4_MADV_DONTNEED:
1323 case VC4_MADV_WILLNEED:
1324 break;
1325 default:
1326 return -EINVAL;
1327 }
1328
1329 if (args->pad != 0)
1330 return -EINVAL;
1331
1332 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1333 if (!gem_obj) {
1334 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1335 return -ENOENT;
1336 }
1337
1338 bo = to_vc4_bo(gem_obj);
1339
1340 /* Only BOs exposed to userspace can be purged. */
1341 if (bo->madv == __VC4_MADV_NOTSUPP) {
1342 DRM_DEBUG("madvise not supported on this BO\n");
1343 ret = -EINVAL;
1344 goto out_put_gem;
1345 }
1346
1347 /* Not sure it's safe to purge imported BOs. Let's just assume it's
1348 * not until proven otherwise.
1349 */
1350 if (gem_obj->import_attach) {
1351 DRM_DEBUG("madvise not supported on imported BOs\n");
1352 ret = -EINVAL;
1353 goto out_put_gem;
1354 }
1355
1356 mutex_lock(&bo->madv_lock);
1357
1358 if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
1359 !refcount_read(&bo->usecnt)) {
1360 /* If the BO is about to be marked as purgeable, is not used
1361 * and is not already purgeable or purged, add it to the
1362 * purgeable list.
1363 */
1364 vc4_bo_add_to_purgeable_pool(bo);
1365 } else if (args->madv == VC4_MADV_WILLNEED &&
1366 bo->madv == VC4_MADV_DONTNEED &&
1367 !refcount_read(&bo->usecnt)) {
1368 /* The BO has not been purged yet, just remove it from
1369 * the purgeable list.
1370 */
1371 vc4_bo_remove_from_purgeable_pool(bo);
1372 }
1373
1374 /* Save the purged state. */
1375 args->retained = bo->madv != __VC4_MADV_PURGED;
1376
1377 /* Update internal madv state only if the bo was not purged. */
1378 if (bo->madv != __VC4_MADV_PURGED)
1379 bo->madv = args->madv;
1380
1381 mutex_unlock(&bo->madv_lock);
1382
1383 ret = 0;
1384
1385out_put_gem:
1386 drm_gem_object_put_unlocked(gem_obj);
1387
1388 return ret;
1389}
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/device.h>
28#include <linux/io.h>
29
30#include "uapi/drm/vc4_drm.h"
31#include "vc4_drv.h"
32#include "vc4_regs.h"
33#include "vc4_trace.h"
34
35static void
36vc4_queue_hangcheck(struct drm_device *dev)
37{
38 struct vc4_dev *vc4 = to_vc4_dev(dev);
39
40 mod_timer(&vc4->hangcheck.timer,
41 round_jiffies_up(jiffies + msecs_to_jiffies(100)));
42}
43
44struct vc4_hang_state {
45 struct drm_vc4_get_hang_state user_state;
46
47 u32 bo_count;
48 struct drm_gem_object **bo;
49};
50
51static void
52vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
53{
54 unsigned int i;
55
56 for (i = 0; i < state->user_state.bo_count; i++)
57 drm_gem_object_unreference_unlocked(state->bo[i]);
58
59 kfree(state);
60}
61
62int
63vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
64 struct drm_file *file_priv)
65{
66 struct drm_vc4_get_hang_state *get_state = data;
67 struct drm_vc4_get_hang_state_bo *bo_state;
68 struct vc4_hang_state *kernel_state;
69 struct drm_vc4_get_hang_state *state;
70 struct vc4_dev *vc4 = to_vc4_dev(dev);
71 unsigned long irqflags;
72 u32 i;
73 int ret = 0;
74
75 spin_lock_irqsave(&vc4->job_lock, irqflags);
76 kernel_state = vc4->hang_state;
77 if (!kernel_state) {
78 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
79 return -ENOENT;
80 }
81 state = &kernel_state->user_state;
82
83 /* If the user's array isn't big enough, just return the
84 * required array size.
85 */
86 if (get_state->bo_count < state->bo_count) {
87 get_state->bo_count = state->bo_count;
88 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
89 return 0;
90 }
91
92 vc4->hang_state = NULL;
93 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
94
95 /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
96 state->bo = get_state->bo;
97 memcpy(get_state, state, sizeof(*state));
98
99 bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
100 if (!bo_state) {
101 ret = -ENOMEM;
102 goto err_free;
103 }
104
105 for (i = 0; i < state->bo_count; i++) {
106 struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
107 u32 handle;
108
109 ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
110 &handle);
111
112 if (ret) {
113 state->bo_count = i - 1;
114 goto err;
115 }
116 bo_state[i].handle = handle;
117 bo_state[i].paddr = vc4_bo->base.paddr;
118 bo_state[i].size = vc4_bo->base.base.size;
119 }
120
121 if (copy_to_user((void __user *)(uintptr_t)get_state->bo,
122 bo_state,
123 state->bo_count * sizeof(*bo_state)))
124 ret = -EFAULT;
125
126 kfree(bo_state);
127
128err_free:
129
130 vc4_free_hang_state(dev, kernel_state);
131
132err:
133 return ret;
134}
135
136static void
137vc4_save_hang_state(struct drm_device *dev)
138{
139 struct vc4_dev *vc4 = to_vc4_dev(dev);
140 struct drm_vc4_get_hang_state *state;
141 struct vc4_hang_state *kernel_state;
142 struct vc4_exec_info *exec[2];
143 struct vc4_bo *bo;
144 unsigned long irqflags;
145 unsigned int i, j, unref_list_count, prev_idx;
146
147 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
148 if (!kernel_state)
149 return;
150
151 state = &kernel_state->user_state;
152
153 spin_lock_irqsave(&vc4->job_lock, irqflags);
154 exec[0] = vc4_first_bin_job(vc4);
155 exec[1] = vc4_first_render_job(vc4);
156 if (!exec[0] && !exec[1]) {
157 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
158 return;
159 }
160
161 /* Get the bos from both binner and renderer into hang state. */
162 state->bo_count = 0;
163 for (i = 0; i < 2; i++) {
164 if (!exec[i])
165 continue;
166
167 unref_list_count = 0;
168 list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
169 unref_list_count++;
170 state->bo_count += exec[i]->bo_count + unref_list_count;
171 }
172
173 kernel_state->bo = kcalloc(state->bo_count,
174 sizeof(*kernel_state->bo), GFP_ATOMIC);
175
176 if (!kernel_state->bo) {
177 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
178 return;
179 }
180
181 prev_idx = 0;
182 for (i = 0; i < 2; i++) {
183 if (!exec[i])
184 continue;
185
186 for (j = 0; j < exec[i]->bo_count; j++) {
187 drm_gem_object_reference(&exec[i]->bo[j]->base);
188 kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
189 }
190
191 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
192 drm_gem_object_reference(&bo->base.base);
193 kernel_state->bo[j + prev_idx] = &bo->base.base;
194 j++;
195 }
196 prev_idx = j + 1;
197 }
198
199 if (exec[0])
200 state->start_bin = exec[0]->ct0ca;
201 if (exec[1])
202 state->start_render = exec[1]->ct1ca;
203
204 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
205
206 state->ct0ca = V3D_READ(V3D_CTNCA(0));
207 state->ct0ea = V3D_READ(V3D_CTNEA(0));
208
209 state->ct1ca = V3D_READ(V3D_CTNCA(1));
210 state->ct1ea = V3D_READ(V3D_CTNEA(1));
211
212 state->ct0cs = V3D_READ(V3D_CTNCS(0));
213 state->ct1cs = V3D_READ(V3D_CTNCS(1));
214
215 state->ct0ra0 = V3D_READ(V3D_CT00RA0);
216 state->ct1ra0 = V3D_READ(V3D_CT01RA0);
217
218 state->bpca = V3D_READ(V3D_BPCA);
219 state->bpcs = V3D_READ(V3D_BPCS);
220 state->bpoa = V3D_READ(V3D_BPOA);
221 state->bpos = V3D_READ(V3D_BPOS);
222
223 state->vpmbase = V3D_READ(V3D_VPMBASE);
224
225 state->dbge = V3D_READ(V3D_DBGE);
226 state->fdbgo = V3D_READ(V3D_FDBGO);
227 state->fdbgb = V3D_READ(V3D_FDBGB);
228 state->fdbgr = V3D_READ(V3D_FDBGR);
229 state->fdbgs = V3D_READ(V3D_FDBGS);
230 state->errstat = V3D_READ(V3D_ERRSTAT);
231
232 spin_lock_irqsave(&vc4->job_lock, irqflags);
233 if (vc4->hang_state) {
234 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
235 vc4_free_hang_state(dev, kernel_state);
236 } else {
237 vc4->hang_state = kernel_state;
238 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
239 }
240}
241
242static void
243vc4_reset(struct drm_device *dev)
244{
245 struct vc4_dev *vc4 = to_vc4_dev(dev);
246
247 DRM_INFO("Resetting GPU.\n");
248
249 mutex_lock(&vc4->power_lock);
250 if (vc4->power_refcount) {
251 /* Power the device off and back on the by dropping the
252 * reference on runtime PM.
253 */
254 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
255 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
256 }
257 mutex_unlock(&vc4->power_lock);
258
259 vc4_irq_reset(dev);
260
261 /* Rearm the hangcheck -- another job might have been waiting
262 * for our hung one to get kicked off, and vc4_irq_reset()
263 * would have started it.
264 */
265 vc4_queue_hangcheck(dev);
266}
267
268static void
269vc4_reset_work(struct work_struct *work)
270{
271 struct vc4_dev *vc4 =
272 container_of(work, struct vc4_dev, hangcheck.reset_work);
273
274 vc4_save_hang_state(vc4->dev);
275
276 vc4_reset(vc4->dev);
277}
278
279static void
280vc4_hangcheck_elapsed(unsigned long data)
281{
282 struct drm_device *dev = (struct drm_device *)data;
283 struct vc4_dev *vc4 = to_vc4_dev(dev);
284 uint32_t ct0ca, ct1ca;
285 unsigned long irqflags;
286 struct vc4_exec_info *bin_exec, *render_exec;
287
288 spin_lock_irqsave(&vc4->job_lock, irqflags);
289
290 bin_exec = vc4_first_bin_job(vc4);
291 render_exec = vc4_first_render_job(vc4);
292
293 /* If idle, we can stop watching for hangs. */
294 if (!bin_exec && !render_exec) {
295 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
296 return;
297 }
298
299 ct0ca = V3D_READ(V3D_CTNCA(0));
300 ct1ca = V3D_READ(V3D_CTNCA(1));
301
302 /* If we've made any progress in execution, rearm the timer
303 * and wait.
304 */
305 if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
306 (render_exec && ct1ca != render_exec->last_ct1ca)) {
307 if (bin_exec)
308 bin_exec->last_ct0ca = ct0ca;
309 if (render_exec)
310 render_exec->last_ct1ca = ct1ca;
311 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
312 vc4_queue_hangcheck(dev);
313 return;
314 }
315
316 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
317
318 /* We've gone too long with no progress, reset. This has to
319 * be done from a work struct, since resetting can sleep and
320 * this timer hook isn't allowed to.
321 */
322 schedule_work(&vc4->hangcheck.reset_work);
323}
324
325static void
326submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
327{
328 struct vc4_dev *vc4 = to_vc4_dev(dev);
329
330 /* Set the current and end address of the control list.
331 * Writing the end register is what starts the job.
332 */
333 V3D_WRITE(V3D_CTNCA(thread), start);
334 V3D_WRITE(V3D_CTNEA(thread), end);
335}
336
337int
338vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
339 bool interruptible)
340{
341 struct vc4_dev *vc4 = to_vc4_dev(dev);
342 int ret = 0;
343 unsigned long timeout_expire;
344 DEFINE_WAIT(wait);
345
346 if (vc4->finished_seqno >= seqno)
347 return 0;
348
349 if (timeout_ns == 0)
350 return -ETIME;
351
352 timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
353
354 trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
355 for (;;) {
356 prepare_to_wait(&vc4->job_wait_queue, &wait,
357 interruptible ? TASK_INTERRUPTIBLE :
358 TASK_UNINTERRUPTIBLE);
359
360 if (interruptible && signal_pending(current)) {
361 ret = -ERESTARTSYS;
362 break;
363 }
364
365 if (vc4->finished_seqno >= seqno)
366 break;
367
368 if (timeout_ns != ~0ull) {
369 if (time_after_eq(jiffies, timeout_expire)) {
370 ret = -ETIME;
371 break;
372 }
373 schedule_timeout(timeout_expire - jiffies);
374 } else {
375 schedule();
376 }
377 }
378
379 finish_wait(&vc4->job_wait_queue, &wait);
380 trace_vc4_wait_for_seqno_end(dev, seqno);
381
382 return ret;
383}
384
385static void
386vc4_flush_caches(struct drm_device *dev)
387{
388 struct vc4_dev *vc4 = to_vc4_dev(dev);
389
390 /* Flush the GPU L2 caches. These caches sit on top of system
391 * L3 (the 128kb or so shared with the CPU), and are
392 * non-allocating in the L3.
393 */
394 V3D_WRITE(V3D_L2CACTL,
395 V3D_L2CACTL_L2CCLR);
396
397 V3D_WRITE(V3D_SLCACTL,
398 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
399 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
400 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
401 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
402}
403
404/* Sets the registers for the next job to be actually be executed in
405 * the hardware.
406 *
407 * The job_lock should be held during this.
408 */
409void
410vc4_submit_next_bin_job(struct drm_device *dev)
411{
412 struct vc4_dev *vc4 = to_vc4_dev(dev);
413 struct vc4_exec_info *exec;
414
415again:
416 exec = vc4_first_bin_job(vc4);
417 if (!exec)
418 return;
419
420 vc4_flush_caches(dev);
421
422 /* Either put the job in the binner if it uses the binner, or
423 * immediately move it to the to-be-rendered queue.
424 */
425 if (exec->ct0ca != exec->ct0ea) {
426 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
427 } else {
428 vc4_move_job_to_render(dev, exec);
429 goto again;
430 }
431}
432
433void
434vc4_submit_next_render_job(struct drm_device *dev)
435{
436 struct vc4_dev *vc4 = to_vc4_dev(dev);
437 struct vc4_exec_info *exec = vc4_first_render_job(vc4);
438
439 if (!exec)
440 return;
441
442 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
443}
444
445void
446vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
447{
448 struct vc4_dev *vc4 = to_vc4_dev(dev);
449 bool was_empty = list_empty(&vc4->render_job_list);
450
451 list_move_tail(&exec->head, &vc4->render_job_list);
452 if (was_empty)
453 vc4_submit_next_render_job(dev);
454}
455
456static void
457vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
458{
459 struct vc4_bo *bo;
460 unsigned i;
461
462 for (i = 0; i < exec->bo_count; i++) {
463 bo = to_vc4_bo(&exec->bo[i]->base);
464 bo->seqno = seqno;
465 }
466
467 list_for_each_entry(bo, &exec->unref_list, unref_head) {
468 bo->seqno = seqno;
469 }
470
471 for (i = 0; i < exec->rcl_write_bo_count; i++) {
472 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
473 bo->write_seqno = seqno;
474 }
475}
476
477/* Queues a struct vc4_exec_info for execution. If no job is
478 * currently executing, then submits it.
479 *
480 * Unlike most GPUs, our hardware only handles one command list at a
481 * time. To queue multiple jobs at once, we'd need to edit the
482 * previous command list to have a jump to the new one at the end, and
483 * then bump the end address. That's a change for a later date,
484 * though.
485 */
486static void
487vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
488{
489 struct vc4_dev *vc4 = to_vc4_dev(dev);
490 uint64_t seqno;
491 unsigned long irqflags;
492
493 spin_lock_irqsave(&vc4->job_lock, irqflags);
494
495 seqno = ++vc4->emit_seqno;
496 exec->seqno = seqno;
497 vc4_update_bo_seqnos(exec, seqno);
498
499 list_add_tail(&exec->head, &vc4->bin_job_list);
500
501 /* If no job was executing, kick ours off. Otherwise, it'll
502 * get started when the previous job's flush done interrupt
503 * occurs.
504 */
505 if (vc4_first_bin_job(vc4) == exec) {
506 vc4_submit_next_bin_job(dev);
507 vc4_queue_hangcheck(dev);
508 }
509
510 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
511}
512
513/**
514 * Looks up a bunch of GEM handles for BOs and stores the array for
515 * use in the command validator that actually writes relocated
516 * addresses pointing to them.
517 */
518static int
519vc4_cl_lookup_bos(struct drm_device *dev,
520 struct drm_file *file_priv,
521 struct vc4_exec_info *exec)
522{
523 struct drm_vc4_submit_cl *args = exec->args;
524 uint32_t *handles;
525 int ret = 0;
526 int i;
527
528 exec->bo_count = args->bo_handle_count;
529
530 if (!exec->bo_count) {
531 /* See comment on bo_index for why we have to check
532 * this.
533 */
534 DRM_ERROR("Rendering requires BOs to validate\n");
535 return -EINVAL;
536 }
537
538 exec->bo = drm_calloc_large(exec->bo_count,
539 sizeof(struct drm_gem_cma_object *));
540 if (!exec->bo) {
541 DRM_ERROR("Failed to allocate validated BO pointers\n");
542 return -ENOMEM;
543 }
544
545 handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
546 if (!handles) {
547 ret = -ENOMEM;
548 DRM_ERROR("Failed to allocate incoming GEM handles\n");
549 goto fail;
550 }
551
552 if (copy_from_user(handles,
553 (void __user *)(uintptr_t)args->bo_handles,
554 exec->bo_count * sizeof(uint32_t))) {
555 ret = -EFAULT;
556 DRM_ERROR("Failed to copy in GEM handles\n");
557 goto fail;
558 }
559
560 spin_lock(&file_priv->table_lock);
561 for (i = 0; i < exec->bo_count; i++) {
562 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
563 handles[i]);
564 if (!bo) {
565 DRM_ERROR("Failed to look up GEM BO %d: %d\n",
566 i, handles[i]);
567 ret = -EINVAL;
568 spin_unlock(&file_priv->table_lock);
569 goto fail;
570 }
571 drm_gem_object_reference(bo);
572 exec->bo[i] = (struct drm_gem_cma_object *)bo;
573 }
574 spin_unlock(&file_priv->table_lock);
575
576fail:
577 drm_free_large(handles);
578 return ret;
579}
580
581static int
582vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
583{
584 struct drm_vc4_submit_cl *args = exec->args;
585 void *temp = NULL;
586 void *bin;
587 int ret = 0;
588 uint32_t bin_offset = 0;
589 uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
590 16);
591 uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
592 uint32_t exec_size = uniforms_offset + args->uniforms_size;
593 uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
594 args->shader_rec_count);
595 struct vc4_bo *bo;
596
597 if (shader_rec_offset < args->bin_cl_size ||
598 uniforms_offset < shader_rec_offset ||
599 exec_size < uniforms_offset ||
600 args->shader_rec_count >= (UINT_MAX /
601 sizeof(struct vc4_shader_state)) ||
602 temp_size < exec_size) {
603 DRM_ERROR("overflow in exec arguments\n");
604 ret = -EINVAL;
605 goto fail;
606 }
607
608 /* Allocate space where we'll store the copied in user command lists
609 * and shader records.
610 *
611 * We don't just copy directly into the BOs because we need to
612 * read the contents back for validation, and I think the
613 * bo->vaddr is uncached access.
614 */
615 temp = drm_malloc_ab(temp_size, 1);
616 if (!temp) {
617 DRM_ERROR("Failed to allocate storage for copying "
618 "in bin/render CLs.\n");
619 ret = -ENOMEM;
620 goto fail;
621 }
622 bin = temp + bin_offset;
623 exec->shader_rec_u = temp + shader_rec_offset;
624 exec->uniforms_u = temp + uniforms_offset;
625 exec->shader_state = temp + exec_size;
626 exec->shader_state_size = args->shader_rec_count;
627
628 if (copy_from_user(bin,
629 (void __user *)(uintptr_t)args->bin_cl,
630 args->bin_cl_size)) {
631 ret = -EFAULT;
632 goto fail;
633 }
634
635 if (copy_from_user(exec->shader_rec_u,
636 (void __user *)(uintptr_t)args->shader_rec,
637 args->shader_rec_size)) {
638 ret = -EFAULT;
639 goto fail;
640 }
641
642 if (copy_from_user(exec->uniforms_u,
643 (void __user *)(uintptr_t)args->uniforms,
644 args->uniforms_size)) {
645 ret = -EFAULT;
646 goto fail;
647 }
648
649 bo = vc4_bo_create(dev, exec_size, true);
650 if (IS_ERR(bo)) {
651 DRM_ERROR("Couldn't allocate BO for binning\n");
652 ret = PTR_ERR(bo);
653 goto fail;
654 }
655 exec->exec_bo = &bo->base;
656
657 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
658 &exec->unref_list);
659
660 exec->ct0ca = exec->exec_bo->paddr + bin_offset;
661
662 exec->bin_u = bin;
663
664 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
665 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
666 exec->shader_rec_size = args->shader_rec_size;
667
668 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
669 exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
670 exec->uniforms_size = args->uniforms_size;
671
672 ret = vc4_validate_bin_cl(dev,
673 exec->exec_bo->vaddr + bin_offset,
674 bin,
675 exec);
676 if (ret)
677 goto fail;
678
679 ret = vc4_validate_shader_recs(dev, exec);
680 if (ret)
681 goto fail;
682
683 /* Block waiting on any previous rendering into the CS's VBO,
684 * IB, or textures, so that pixels are actually written by the
685 * time we try to read them.
686 */
687 ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
688
689fail:
690 drm_free_large(temp);
691 return ret;
692}
693
694static void
695vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
696{
697 struct vc4_dev *vc4 = to_vc4_dev(dev);
698 unsigned i;
699
700 if (exec->bo) {
701 for (i = 0; i < exec->bo_count; i++)
702 drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
703 drm_free_large(exec->bo);
704 }
705
706 while (!list_empty(&exec->unref_list)) {
707 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
708 struct vc4_bo, unref_head);
709 list_del(&bo->unref_head);
710 drm_gem_object_unreference_unlocked(&bo->base.base);
711 }
712
713 mutex_lock(&vc4->power_lock);
714 if (--vc4->power_refcount == 0) {
715 pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
716 pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
717 }
718 mutex_unlock(&vc4->power_lock);
719
720 kfree(exec);
721}
722
723void
724vc4_job_handle_completed(struct vc4_dev *vc4)
725{
726 unsigned long irqflags;
727 struct vc4_seqno_cb *cb, *cb_temp;
728
729 spin_lock_irqsave(&vc4->job_lock, irqflags);
730 while (!list_empty(&vc4->job_done_list)) {
731 struct vc4_exec_info *exec =
732 list_first_entry(&vc4->job_done_list,
733 struct vc4_exec_info, head);
734 list_del(&exec->head);
735
736 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
737 vc4_complete_exec(vc4->dev, exec);
738 spin_lock_irqsave(&vc4->job_lock, irqflags);
739 }
740
741 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
742 if (cb->seqno <= vc4->finished_seqno) {
743 list_del_init(&cb->work.entry);
744 schedule_work(&cb->work);
745 }
746 }
747
748 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
749}
750
751static void vc4_seqno_cb_work(struct work_struct *work)
752{
753 struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
754
755 cb->func(cb);
756}
757
758int vc4_queue_seqno_cb(struct drm_device *dev,
759 struct vc4_seqno_cb *cb, uint64_t seqno,
760 void (*func)(struct vc4_seqno_cb *cb))
761{
762 struct vc4_dev *vc4 = to_vc4_dev(dev);
763 int ret = 0;
764 unsigned long irqflags;
765
766 cb->func = func;
767 INIT_WORK(&cb->work, vc4_seqno_cb_work);
768
769 spin_lock_irqsave(&vc4->job_lock, irqflags);
770 if (seqno > vc4->finished_seqno) {
771 cb->seqno = seqno;
772 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
773 } else {
774 schedule_work(&cb->work);
775 }
776 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
777
778 return ret;
779}
780
781/* Scheduled when any job has been completed, this walks the list of
782 * jobs that had completed and unrefs their BOs and frees their exec
783 * structs.
784 */
785static void
786vc4_job_done_work(struct work_struct *work)
787{
788 struct vc4_dev *vc4 =
789 container_of(work, struct vc4_dev, job_done_work);
790
791 vc4_job_handle_completed(vc4);
792}
793
794static int
795vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
796 uint64_t seqno,
797 uint64_t *timeout_ns)
798{
799 unsigned long start = jiffies;
800 int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
801
802 if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
803 uint64_t delta = jiffies_to_nsecs(jiffies - start);
804
805 if (*timeout_ns >= delta)
806 *timeout_ns -= delta;
807 }
808
809 return ret;
810}
811
812int
813vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
814 struct drm_file *file_priv)
815{
816 struct drm_vc4_wait_seqno *args = data;
817
818 return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
819 &args->timeout_ns);
820}
821
822int
823vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
824 struct drm_file *file_priv)
825{
826 int ret;
827 struct drm_vc4_wait_bo *args = data;
828 struct drm_gem_object *gem_obj;
829 struct vc4_bo *bo;
830
831 if (args->pad != 0)
832 return -EINVAL;
833
834 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
835 if (!gem_obj) {
836 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
837 return -EINVAL;
838 }
839 bo = to_vc4_bo(gem_obj);
840
841 ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
842 &args->timeout_ns);
843
844 drm_gem_object_unreference_unlocked(gem_obj);
845 return ret;
846}
847
848/**
849 * Submits a command list to the VC4.
850 *
851 * This is what is called batchbuffer emitting on other hardware.
852 */
853int
854vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
855 struct drm_file *file_priv)
856{
857 struct vc4_dev *vc4 = to_vc4_dev(dev);
858 struct drm_vc4_submit_cl *args = data;
859 struct vc4_exec_info *exec;
860 int ret = 0;
861
862 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
863 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
864 return -EINVAL;
865 }
866
867 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
868 if (!exec) {
869 DRM_ERROR("malloc failure on exec struct\n");
870 return -ENOMEM;
871 }
872
873 mutex_lock(&vc4->power_lock);
874 if (vc4->power_refcount++ == 0)
875 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
876 mutex_unlock(&vc4->power_lock);
877 if (ret < 0) {
878 kfree(exec);
879 return ret;
880 }
881
882 exec->args = args;
883 INIT_LIST_HEAD(&exec->unref_list);
884
885 ret = vc4_cl_lookup_bos(dev, file_priv, exec);
886 if (ret)
887 goto fail;
888
889 if (exec->args->bin_cl_size != 0) {
890 ret = vc4_get_bcl(dev, exec);
891 if (ret)
892 goto fail;
893 } else {
894 exec->ct0ca = 0;
895 exec->ct0ea = 0;
896 }
897
898 ret = vc4_get_rcl(dev, exec);
899 if (ret)
900 goto fail;
901
902 /* Clear this out of the struct we'll be putting in the queue,
903 * since it's part of our stack.
904 */
905 exec->args = NULL;
906
907 vc4_queue_submit(dev, exec);
908
909 /* Return the seqno for our job. */
910 args->seqno = vc4->emit_seqno;
911
912 return 0;
913
914fail:
915 vc4_complete_exec(vc4->dev, exec);
916
917 return ret;
918}
919
920void
921vc4_gem_init(struct drm_device *dev)
922{
923 struct vc4_dev *vc4 = to_vc4_dev(dev);
924
925 INIT_LIST_HEAD(&vc4->bin_job_list);
926 INIT_LIST_HEAD(&vc4->render_job_list);
927 INIT_LIST_HEAD(&vc4->job_done_list);
928 INIT_LIST_HEAD(&vc4->seqno_cb_list);
929 spin_lock_init(&vc4->job_lock);
930
931 INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
932 setup_timer(&vc4->hangcheck.timer,
933 vc4_hangcheck_elapsed,
934 (unsigned long)dev);
935
936 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
937
938 mutex_init(&vc4->power_lock);
939}
940
941void
942vc4_gem_destroy(struct drm_device *dev)
943{
944 struct vc4_dev *vc4 = to_vc4_dev(dev);
945
946 /* Waiting for exec to finish would need to be done before
947 * unregistering V3D.
948 */
949 WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
950
951 /* V3D should already have disabled its interrupt and cleared
952 * the overflow allocation registers. Now free the object.
953 */
954 if (vc4->overflow_mem) {
955 drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
956 vc4->overflow_mem = NULL;
957 }
958
959 if (vc4->hang_state)
960 vc4_free_hang_state(dev, vc4->hang_state);
961
962 vc4_bo_cache_destroy(dev);
963}