Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include "drm/drm_drv.h"
8
9#include "msm_gpu.h"
10#include "msm_gem.h"
11#include "msm_mmu.h"
12#include "msm_fence.h"
13#include "msm_gpu_trace.h"
14#include "adreno/adreno_gpu.h"
15
16#include <generated/utsrelease.h>
17#include <linux/string_helpers.h>
18#include <linux/devcoredump.h>
19#include <linux/sched/task.h>
20
21/*
22 * Power Management:
23 */
24
25static int enable_pwrrail(struct msm_gpu *gpu)
26{
27 struct drm_device *dev = gpu->dev;
28 int ret = 0;
29
30 if (gpu->gpu_reg) {
31 ret = regulator_enable(gpu->gpu_reg);
32 if (ret) {
33 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
34 return ret;
35 }
36 }
37
38 if (gpu->gpu_cx) {
39 ret = regulator_enable(gpu->gpu_cx);
40 if (ret) {
41 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
42 return ret;
43 }
44 }
45
46 return 0;
47}
48
49static int disable_pwrrail(struct msm_gpu *gpu)
50{
51 if (gpu->gpu_cx)
52 regulator_disable(gpu->gpu_cx);
53 if (gpu->gpu_reg)
54 regulator_disable(gpu->gpu_reg);
55 return 0;
56}
57
58static int enable_clk(struct msm_gpu *gpu)
59{
60 if (gpu->core_clk && gpu->fast_rate)
61 dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate);
62
63 /* Set the RBBM timer rate to 19.2Mhz */
64 if (gpu->rbbmtimer_clk)
65 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
66
67 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
68}
69
70static int disable_clk(struct msm_gpu *gpu)
71{
72 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
73
74 /*
75 * Set the clock to a deliberately low rate. On older targets the clock
76 * speed had to be non zero to avoid problems. On newer targets this
77 * will be rounded down to zero anyway so it all works out.
78 */
79 if (gpu->core_clk)
80 dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000);
81
82 if (gpu->rbbmtimer_clk)
83 clk_set_rate(gpu->rbbmtimer_clk, 0);
84
85 return 0;
86}
87
88static int enable_axi(struct msm_gpu *gpu)
89{
90 return clk_prepare_enable(gpu->ebi1_clk);
91}
92
93static int disable_axi(struct msm_gpu *gpu)
94{
95 clk_disable_unprepare(gpu->ebi1_clk);
96 return 0;
97}
98
99int msm_gpu_pm_resume(struct msm_gpu *gpu)
100{
101 int ret;
102
103 DBG("%s", gpu->name);
104 trace_msm_gpu_resume(0);
105
106 ret = enable_pwrrail(gpu);
107 if (ret)
108 return ret;
109
110 ret = enable_clk(gpu);
111 if (ret)
112 return ret;
113
114 ret = enable_axi(gpu);
115 if (ret)
116 return ret;
117
118 msm_devfreq_resume(gpu);
119
120 gpu->needs_hw_init = true;
121
122 return 0;
123}
124
125int msm_gpu_pm_suspend(struct msm_gpu *gpu)
126{
127 int ret;
128
129 DBG("%s", gpu->name);
130 trace_msm_gpu_suspend(0);
131
132 msm_devfreq_suspend(gpu);
133
134 ret = disable_axi(gpu);
135 if (ret)
136 return ret;
137
138 ret = disable_clk(gpu);
139 if (ret)
140 return ret;
141
142 ret = disable_pwrrail(gpu);
143 if (ret)
144 return ret;
145
146 gpu->suspend_count++;
147
148 return 0;
149}
150
151void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
152 struct drm_printer *p)
153{
154 drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
155 drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
156 drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
157}
158
159int msm_gpu_hw_init(struct msm_gpu *gpu)
160{
161 int ret;
162
163 WARN_ON(!mutex_is_locked(&gpu->lock));
164
165 if (!gpu->needs_hw_init)
166 return 0;
167
168 disable_irq(gpu->irq);
169 ret = gpu->funcs->hw_init(gpu);
170 if (!ret)
171 gpu->needs_hw_init = false;
172 enable_irq(gpu->irq);
173
174 return ret;
175}
176
177#ifdef CONFIG_DEV_COREDUMP
178static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
179 size_t count, void *data, size_t datalen)
180{
181 struct msm_gpu *gpu = data;
182 struct drm_print_iterator iter;
183 struct drm_printer p;
184 struct msm_gpu_state *state;
185
186 state = msm_gpu_crashstate_get(gpu);
187 if (!state)
188 return 0;
189
190 iter.data = buffer;
191 iter.offset = 0;
192 iter.start = offset;
193 iter.remain = count;
194
195 p = drm_coredump_printer(&iter);
196
197 drm_printf(&p, "---\n");
198 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
199 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
200 drm_printf(&p, "time: %lld.%09ld\n",
201 state->time.tv_sec, state->time.tv_nsec);
202 if (state->comm)
203 drm_printf(&p, "comm: %s\n", state->comm);
204 if (state->cmd)
205 drm_printf(&p, "cmdline: %s\n", state->cmd);
206
207 gpu->funcs->show(gpu, state, &p);
208
209 msm_gpu_crashstate_put(gpu);
210
211 return count - iter.remain;
212}
213
214static void msm_gpu_devcoredump_free(void *data)
215{
216 struct msm_gpu *gpu = data;
217
218 msm_gpu_crashstate_put(gpu);
219}
220
221static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
222 struct drm_gem_object *obj, u64 iova, bool full)
223{
224 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
225
226 /* Don't record write only objects */
227 state_bo->size = obj->size;
228 state_bo->iova = iova;
229
230 BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(to_msm_bo(obj)->name));
231
232 memcpy(state_bo->name, to_msm_bo(obj)->name, sizeof(state_bo->name));
233
234 if (full) {
235 void *ptr;
236
237 state_bo->data = kvmalloc(obj->size, GFP_KERNEL);
238 if (!state_bo->data)
239 goto out;
240
241 msm_gem_lock(obj);
242 ptr = msm_gem_get_vaddr_active(obj);
243 msm_gem_unlock(obj);
244 if (IS_ERR(ptr)) {
245 kvfree(state_bo->data);
246 state_bo->data = NULL;
247 goto out;
248 }
249
250 memcpy(state_bo->data, ptr, obj->size);
251 msm_gem_put_vaddr(obj);
252 }
253out:
254 state->nr_bos++;
255}
256
257static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
258 struct msm_gem_submit *submit, char *comm, char *cmd)
259{
260 struct msm_gpu_state *state;
261
262 /* Check if the target supports capturing crash state */
263 if (!gpu->funcs->gpu_state_get)
264 return;
265
266 /* Only save one crash state at a time */
267 if (gpu->crashstate)
268 return;
269
270 state = gpu->funcs->gpu_state_get(gpu);
271 if (IS_ERR_OR_NULL(state))
272 return;
273
274 /* Fill in the additional crash state information */
275 state->comm = kstrdup(comm, GFP_KERNEL);
276 state->cmd = kstrdup(cmd, GFP_KERNEL);
277 state->fault_info = gpu->fault_info;
278
279 if (submit) {
280 int i;
281
282 state->bos = kcalloc(submit->nr_bos,
283 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
284
285 for (i = 0; state->bos && i < submit->nr_bos; i++) {
286 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
287 submit->bos[i].iova,
288 should_dump(submit, i));
289 }
290 }
291
292 /* Set the active crash state to be dumped on failure */
293 gpu->crashstate = state;
294
295 dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
296 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
297}
298#else
299static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
300 struct msm_gem_submit *submit, char *comm, char *cmd)
301{
302}
303#endif
304
305/*
306 * Hangcheck detection for locked gpu:
307 */
308
309static struct msm_gem_submit *
310find_submit(struct msm_ringbuffer *ring, uint32_t fence)
311{
312 struct msm_gem_submit *submit;
313 unsigned long flags;
314
315 spin_lock_irqsave(&ring->submit_lock, flags);
316 list_for_each_entry(submit, &ring->submits, node) {
317 if (submit->seqno == fence) {
318 spin_unlock_irqrestore(&ring->submit_lock, flags);
319 return submit;
320 }
321 }
322 spin_unlock_irqrestore(&ring->submit_lock, flags);
323
324 return NULL;
325}
326
327static void retire_submits(struct msm_gpu *gpu);
328
329static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
330{
331 struct msm_file_private *ctx = submit->queue->ctx;
332 struct task_struct *task;
333
334 WARN_ON(!mutex_is_locked(&submit->gpu->lock));
335
336 /* Note that kstrdup will return NULL if argument is NULL: */
337 *comm = kstrdup(ctx->comm, GFP_KERNEL);
338 *cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
339
340 task = get_pid_task(submit->pid, PIDTYPE_PID);
341 if (!task)
342 return;
343
344 if (!*comm)
345 *comm = kstrdup(task->comm, GFP_KERNEL);
346
347 if (!*cmd)
348 *cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
349
350 put_task_struct(task);
351}
352
353static void recover_worker(struct kthread_work *work)
354{
355 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
356 struct drm_device *dev = gpu->dev;
357 struct msm_drm_private *priv = dev->dev_private;
358 struct msm_gem_submit *submit;
359 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
360 char *comm = NULL, *cmd = NULL;
361 int i;
362
363 mutex_lock(&gpu->lock);
364
365 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
366
367 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
368
369 /*
370 * If the submit retired while we were waiting for the worker to run,
371 * or waiting to acquire the gpu lock, then nothing more to do.
372 */
373 if (!submit)
374 goto out_unlock;
375
376 /* Increment the fault counts */
377 submit->queue->faults++;
378 if (submit->aspace)
379 submit->aspace->faults++;
380
381 get_comm_cmdline(submit, &comm, &cmd);
382
383 if (comm && cmd) {
384 DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
385 gpu->name, comm, cmd);
386
387 msm_rd_dump_submit(priv->hangrd, submit,
388 "offending task: %s (%s)", comm, cmd);
389 } else {
390 DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name);
391
392 msm_rd_dump_submit(priv->hangrd, submit, NULL);
393 }
394
395 /* Record the crash state */
396 pm_runtime_get_sync(&gpu->pdev->dev);
397 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
398
399 kfree(cmd);
400 kfree(comm);
401
402 /*
403 * Update all the rings with the latest and greatest fence.. this
404 * needs to happen after msm_rd_dump_submit() to ensure that the
405 * bo's referenced by the offending submit are still around.
406 */
407 for (i = 0; i < gpu->nr_rings; i++) {
408 struct msm_ringbuffer *ring = gpu->rb[i];
409
410 uint32_t fence = ring->memptrs->fence;
411
412 /*
413 * For the current (faulting?) ring/submit advance the fence by
414 * one more to clear the faulting submit
415 */
416 if (ring == cur_ring)
417 ring->memptrs->fence = ++fence;
418
419 msm_update_fence(ring->fctx, fence);
420 }
421
422 if (msm_gpu_active(gpu)) {
423 /* retire completed submits, plus the one that hung: */
424 retire_submits(gpu);
425
426 gpu->funcs->recover(gpu);
427
428 /*
429 * Replay all remaining submits starting with highest priority
430 * ring
431 */
432 for (i = 0; i < gpu->nr_rings; i++) {
433 struct msm_ringbuffer *ring = gpu->rb[i];
434 unsigned long flags;
435
436 spin_lock_irqsave(&ring->submit_lock, flags);
437 list_for_each_entry(submit, &ring->submits, node)
438 gpu->funcs->submit(gpu, submit);
439 spin_unlock_irqrestore(&ring->submit_lock, flags);
440 }
441 }
442
443 pm_runtime_put(&gpu->pdev->dev);
444
445out_unlock:
446 mutex_unlock(&gpu->lock);
447
448 msm_gpu_retire(gpu);
449}
450
451static void fault_worker(struct kthread_work *work)
452{
453 struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
454 struct msm_gem_submit *submit;
455 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
456 char *comm = NULL, *cmd = NULL;
457
458 mutex_lock(&gpu->lock);
459
460 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
461 if (submit && submit->fault_dumped)
462 goto resume_smmu;
463
464 if (submit) {
465 get_comm_cmdline(submit, &comm, &cmd);
466
467 /*
468 * When we get GPU iova faults, we can get 1000s of them,
469 * but we really only want to log the first one.
470 */
471 submit->fault_dumped = true;
472 }
473
474 /* Record the crash state */
475 pm_runtime_get_sync(&gpu->pdev->dev);
476 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
477 pm_runtime_put_sync(&gpu->pdev->dev);
478
479 kfree(cmd);
480 kfree(comm);
481
482resume_smmu:
483 memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
484 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
485
486 mutex_unlock(&gpu->lock);
487}
488
489static void hangcheck_timer_reset(struct msm_gpu *gpu)
490{
491 struct msm_drm_private *priv = gpu->dev->dev_private;
492 mod_timer(&gpu->hangcheck_timer,
493 round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
494}
495
496static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
497{
498 if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
499 return false;
500
501 if (!gpu->funcs->progress)
502 return false;
503
504 if (!gpu->funcs->progress(gpu, ring))
505 return false;
506
507 ring->hangcheck_progress_retries++;
508 return true;
509}
510
511static void hangcheck_handler(struct timer_list *t)
512{
513 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
514 struct drm_device *dev = gpu->dev;
515 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
516 uint32_t fence = ring->memptrs->fence;
517
518 if (fence != ring->hangcheck_fence) {
519 /* some progress has been made.. ya! */
520 ring->hangcheck_fence = fence;
521 ring->hangcheck_progress_retries = 0;
522 } else if (fence_before(fence, ring->fctx->last_fence) &&
523 !made_progress(gpu, ring)) {
524 /* no progress and not done.. hung! */
525 ring->hangcheck_fence = fence;
526 ring->hangcheck_progress_retries = 0;
527 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
528 gpu->name, ring->id);
529 DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
530 gpu->name, fence);
531 DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
532 gpu->name, ring->fctx->last_fence);
533
534 kthread_queue_work(gpu->worker, &gpu->recover_work);
535 }
536
537 /* if still more pending work, reset the hangcheck timer: */
538 if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
539 hangcheck_timer_reset(gpu);
540
541 /* workaround for missing irq: */
542 msm_gpu_retire(gpu);
543}
544
545/*
546 * Performance Counters:
547 */
548
549/* called under perf_lock */
550static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
551{
552 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
553 int i, n = min(ncntrs, gpu->num_perfcntrs);
554
555 /* read current values: */
556 for (i = 0; i < gpu->num_perfcntrs; i++)
557 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
558
559 /* update cntrs: */
560 for (i = 0; i < n; i++)
561 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
562
563 /* save current values: */
564 for (i = 0; i < gpu->num_perfcntrs; i++)
565 gpu->last_cntrs[i] = current_cntrs[i];
566
567 return n;
568}
569
570static void update_sw_cntrs(struct msm_gpu *gpu)
571{
572 ktime_t time;
573 uint32_t elapsed;
574 unsigned long flags;
575
576 spin_lock_irqsave(&gpu->perf_lock, flags);
577 if (!gpu->perfcntr_active)
578 goto out;
579
580 time = ktime_get();
581 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
582
583 gpu->totaltime += elapsed;
584 if (gpu->last_sample.active)
585 gpu->activetime += elapsed;
586
587 gpu->last_sample.active = msm_gpu_active(gpu);
588 gpu->last_sample.time = time;
589
590out:
591 spin_unlock_irqrestore(&gpu->perf_lock, flags);
592}
593
594void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
595{
596 unsigned long flags;
597
598 pm_runtime_get_sync(&gpu->pdev->dev);
599
600 spin_lock_irqsave(&gpu->perf_lock, flags);
601 /* we could dynamically enable/disable perfcntr registers too.. */
602 gpu->last_sample.active = msm_gpu_active(gpu);
603 gpu->last_sample.time = ktime_get();
604 gpu->activetime = gpu->totaltime = 0;
605 gpu->perfcntr_active = true;
606 update_hw_cntrs(gpu, 0, NULL);
607 spin_unlock_irqrestore(&gpu->perf_lock, flags);
608}
609
610void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
611{
612 gpu->perfcntr_active = false;
613 pm_runtime_put_sync(&gpu->pdev->dev);
614}
615
616/* returns -errno or # of cntrs sampled */
617int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
618 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
619{
620 unsigned long flags;
621 int ret;
622
623 spin_lock_irqsave(&gpu->perf_lock, flags);
624
625 if (!gpu->perfcntr_active) {
626 ret = -EINVAL;
627 goto out;
628 }
629
630 *activetime = gpu->activetime;
631 *totaltime = gpu->totaltime;
632
633 gpu->activetime = gpu->totaltime = 0;
634
635 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
636
637out:
638 spin_unlock_irqrestore(&gpu->perf_lock, flags);
639
640 return ret;
641}
642
643/*
644 * Cmdstream submission/retirement:
645 */
646
647static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
648 struct msm_gem_submit *submit)
649{
650 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
651 volatile struct msm_gpu_submit_stats *stats;
652 u64 elapsed, clock = 0, cycles;
653 unsigned long flags;
654
655 stats = &ring->memptrs->stats[index];
656 /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
657 elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
658 do_div(elapsed, 192);
659
660 cycles = stats->cpcycles_end - stats->cpcycles_start;
661
662 /* Calculate the clock frequency from the number of CP cycles */
663 if (elapsed) {
664 clock = cycles * 1000;
665 do_div(clock, elapsed);
666 }
667
668 submit->queue->ctx->elapsed_ns += elapsed;
669 submit->queue->ctx->cycles += cycles;
670
671 trace_msm_gpu_submit_retired(submit, elapsed, clock,
672 stats->alwayson_start, stats->alwayson_end);
673
674 msm_submit_retire(submit);
675
676 pm_runtime_mark_last_busy(&gpu->pdev->dev);
677
678 spin_lock_irqsave(&ring->submit_lock, flags);
679 list_del(&submit->node);
680 spin_unlock_irqrestore(&ring->submit_lock, flags);
681
682 /* Update devfreq on transition from active->idle: */
683 mutex_lock(&gpu->active_lock);
684 gpu->active_submits--;
685 WARN_ON(gpu->active_submits < 0);
686 if (!gpu->active_submits) {
687 msm_devfreq_idle(gpu);
688 pm_runtime_put_autosuspend(&gpu->pdev->dev);
689 }
690
691 mutex_unlock(&gpu->active_lock);
692
693 msm_gem_submit_put(submit);
694}
695
696static void retire_submits(struct msm_gpu *gpu)
697{
698 int i;
699
700 /* Retire the commits starting with highest priority */
701 for (i = 0; i < gpu->nr_rings; i++) {
702 struct msm_ringbuffer *ring = gpu->rb[i];
703
704 while (true) {
705 struct msm_gem_submit *submit = NULL;
706 unsigned long flags;
707
708 spin_lock_irqsave(&ring->submit_lock, flags);
709 submit = list_first_entry_or_null(&ring->submits,
710 struct msm_gem_submit, node);
711 spin_unlock_irqrestore(&ring->submit_lock, flags);
712
713 /*
714 * If no submit, we are done. If submit->fence hasn't
715 * been signalled, then later submits are not signalled
716 * either, so we are also done.
717 */
718 if (submit && dma_fence_is_signaled(submit->hw_fence)) {
719 retire_submit(gpu, ring, submit);
720 } else {
721 break;
722 }
723 }
724 }
725
726 wake_up_all(&gpu->retire_event);
727}
728
729static void retire_worker(struct kthread_work *work)
730{
731 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
732
733 retire_submits(gpu);
734}
735
736/* call from irq handler to schedule work to retire bo's */
737void msm_gpu_retire(struct msm_gpu *gpu)
738{
739 int i;
740
741 for (i = 0; i < gpu->nr_rings; i++)
742 msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
743
744 kthread_queue_work(gpu->worker, &gpu->retire_work);
745 update_sw_cntrs(gpu);
746}
747
748/* add bo's to gpu's ring, and kick gpu: */
749void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
750{
751 struct msm_ringbuffer *ring = submit->ring;
752 unsigned long flags;
753
754 WARN_ON(!mutex_is_locked(&gpu->lock));
755
756 pm_runtime_get_sync(&gpu->pdev->dev);
757
758 msm_gpu_hw_init(gpu);
759
760 submit->seqno = submit->hw_fence->seqno;
761
762 update_sw_cntrs(gpu);
763
764 /*
765 * ring->submits holds a ref to the submit, to deal with the case
766 * that a submit completes before msm_ioctl_gem_submit() returns.
767 */
768 msm_gem_submit_get(submit);
769
770 spin_lock_irqsave(&ring->submit_lock, flags);
771 list_add_tail(&submit->node, &ring->submits);
772 spin_unlock_irqrestore(&ring->submit_lock, flags);
773
774 /* Update devfreq on transition from idle->active: */
775 mutex_lock(&gpu->active_lock);
776 if (!gpu->active_submits) {
777 pm_runtime_get(&gpu->pdev->dev);
778 msm_devfreq_active(gpu);
779 }
780 gpu->active_submits++;
781 mutex_unlock(&gpu->active_lock);
782
783 gpu->funcs->submit(gpu, submit);
784 gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
785
786 pm_runtime_put(&gpu->pdev->dev);
787 hangcheck_timer_reset(gpu);
788}
789
790/*
791 * Init/Cleanup:
792 */
793
794static irqreturn_t irq_handler(int irq, void *data)
795{
796 struct msm_gpu *gpu = data;
797 return gpu->funcs->irq(gpu);
798}
799
800static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
801{
802 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
803
804 if (ret < 1) {
805 gpu->nr_clocks = 0;
806 return ret;
807 }
808
809 gpu->nr_clocks = ret;
810
811 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
812 gpu->nr_clocks, "core");
813
814 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
815 gpu->nr_clocks, "rbbmtimer");
816
817 return 0;
818}
819
820/* Return a new address space for a msm_drm_private instance */
821struct msm_gem_address_space *
822msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
823{
824 struct msm_gem_address_space *aspace = NULL;
825 if (!gpu)
826 return NULL;
827
828 /*
829 * If the target doesn't support private address spaces then return
830 * the global one
831 */
832 if (gpu->funcs->create_private_address_space) {
833 aspace = gpu->funcs->create_private_address_space(gpu);
834 if (!IS_ERR(aspace))
835 aspace->pid = get_pid(task_pid(task));
836 }
837
838 if (IS_ERR_OR_NULL(aspace))
839 aspace = msm_gem_address_space_get(gpu->aspace);
840
841 return aspace;
842}
843
844int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
845 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
846 const char *name, struct msm_gpu_config *config)
847{
848 struct msm_drm_private *priv = drm->dev_private;
849 int i, ret, nr_rings = config->nr_rings;
850 void *memptrs;
851 uint64_t memptrs_iova;
852
853 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
854 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
855
856 gpu->dev = drm;
857 gpu->funcs = funcs;
858 gpu->name = name;
859
860 gpu->worker = kthread_create_worker(0, "gpu-worker");
861 if (IS_ERR(gpu->worker)) {
862 ret = PTR_ERR(gpu->worker);
863 gpu->worker = NULL;
864 goto fail;
865 }
866
867 sched_set_fifo_low(gpu->worker->task);
868
869 mutex_init(&gpu->active_lock);
870 mutex_init(&gpu->lock);
871 init_waitqueue_head(&gpu->retire_event);
872 kthread_init_work(&gpu->retire_work, retire_worker);
873 kthread_init_work(&gpu->recover_work, recover_worker);
874 kthread_init_work(&gpu->fault_work, fault_worker);
875
876 priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
877
878 /*
879 * If progress detection is supported, halve the hangcheck timer
880 * duration, as it takes two iterations of the hangcheck handler
881 * to detect a hang.
882 */
883 if (funcs->progress)
884 priv->hangcheck_period /= 2;
885
886 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
887
888 spin_lock_init(&gpu->perf_lock);
889
890
891 /* Map registers: */
892 gpu->mmio = msm_ioremap(pdev, config->ioname);
893 if (IS_ERR(gpu->mmio)) {
894 ret = PTR_ERR(gpu->mmio);
895 goto fail;
896 }
897
898 /* Get Interrupt: */
899 gpu->irq = platform_get_irq(pdev, 0);
900 if (gpu->irq < 0) {
901 ret = gpu->irq;
902 goto fail;
903 }
904
905 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
906 IRQF_TRIGGER_HIGH, "gpu-irq", gpu);
907 if (ret) {
908 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
909 goto fail;
910 }
911
912 ret = get_clocks(pdev, gpu);
913 if (ret)
914 goto fail;
915
916 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
917 DBG("ebi1_clk: %p", gpu->ebi1_clk);
918 if (IS_ERR(gpu->ebi1_clk))
919 gpu->ebi1_clk = NULL;
920
921 /* Acquire regulators: */
922 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
923 DBG("gpu_reg: %p", gpu->gpu_reg);
924 if (IS_ERR(gpu->gpu_reg))
925 gpu->gpu_reg = NULL;
926
927 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
928 DBG("gpu_cx: %p", gpu->gpu_cx);
929 if (IS_ERR(gpu->gpu_cx))
930 gpu->gpu_cx = NULL;
931
932 gpu->pdev = pdev;
933 platform_set_drvdata(pdev, &gpu->adreno_smmu);
934
935 msm_devfreq_init(gpu);
936
937
938 gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
939
940 if (gpu->aspace == NULL)
941 DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
942 else if (IS_ERR(gpu->aspace)) {
943 ret = PTR_ERR(gpu->aspace);
944 goto fail;
945 }
946
947 memptrs = msm_gem_kernel_new(drm,
948 sizeof(struct msm_rbmemptrs) * nr_rings,
949 check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo,
950 &memptrs_iova);
951
952 if (IS_ERR(memptrs)) {
953 ret = PTR_ERR(memptrs);
954 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
955 goto fail;
956 }
957
958 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
959
960 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
961 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
962 ARRAY_SIZE(gpu->rb));
963 nr_rings = ARRAY_SIZE(gpu->rb);
964 }
965
966 /* Create ringbuffer(s): */
967 for (i = 0; i < nr_rings; i++) {
968 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
969
970 if (IS_ERR(gpu->rb[i])) {
971 ret = PTR_ERR(gpu->rb[i]);
972 DRM_DEV_ERROR(drm->dev,
973 "could not create ringbuffer %d: %d\n", i, ret);
974 goto fail;
975 }
976
977 memptrs += sizeof(struct msm_rbmemptrs);
978 memptrs_iova += sizeof(struct msm_rbmemptrs);
979 }
980
981 gpu->nr_rings = nr_rings;
982
983 refcount_set(&gpu->sysprof_active, 1);
984
985 return 0;
986
987fail:
988 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
989 msm_ringbuffer_destroy(gpu->rb[i]);
990 gpu->rb[i] = NULL;
991 }
992
993 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
994
995 platform_set_drvdata(pdev, NULL);
996 return ret;
997}
998
999void msm_gpu_cleanup(struct msm_gpu *gpu)
1000{
1001 int i;
1002
1003 DBG("%s", gpu->name);
1004
1005 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1006 msm_ringbuffer_destroy(gpu->rb[i]);
1007 gpu->rb[i] = NULL;
1008 }
1009
1010 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
1011
1012 if (!IS_ERR_OR_NULL(gpu->aspace)) {
1013 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
1014 msm_gem_address_space_put(gpu->aspace);
1015 }
1016
1017 if (gpu->worker) {
1018 kthread_destroy_worker(gpu->worker);
1019 }
1020
1021 msm_devfreq_cleanup(gpu);
1022
1023 platform_set_drvdata(gpu->pdev, NULL);
1024}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include "msm_gpu.h"
8#include "msm_gem.h"
9#include "msm_mmu.h"
10#include "msm_fence.h"
11#include "msm_gpu_trace.h"
12#include "adreno/adreno_gpu.h"
13
14#include <generated/utsrelease.h>
15#include <linux/string_helpers.h>
16#include <linux/devfreq.h>
17#include <linux/devcoredump.h>
18#include <linux/sched/task.h>
19
20/*
21 * Power Management:
22 */
23
24static int msm_devfreq_target(struct device *dev, unsigned long *freq,
25 u32 flags)
26{
27 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
28 struct dev_pm_opp *opp;
29
30 opp = devfreq_recommended_opp(dev, freq, flags);
31
32 if (IS_ERR(opp))
33 return PTR_ERR(opp);
34
35 if (gpu->funcs->gpu_set_freq)
36 gpu->funcs->gpu_set_freq(gpu, opp);
37 else
38 clk_set_rate(gpu->core_clk, *freq);
39
40 dev_pm_opp_put(opp);
41
42 return 0;
43}
44
45static int msm_devfreq_get_dev_status(struct device *dev,
46 struct devfreq_dev_status *status)
47{
48 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
49 ktime_t time;
50
51 if (gpu->funcs->gpu_get_freq)
52 status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
53 else
54 status->current_frequency = clk_get_rate(gpu->core_clk);
55
56 status->busy_time = gpu->funcs->gpu_busy(gpu);
57
58 time = ktime_get();
59 status->total_time = ktime_us_delta(time, gpu->devfreq.time);
60 gpu->devfreq.time = time;
61
62 return 0;
63}
64
65static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
66{
67 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
68
69 if (gpu->funcs->gpu_get_freq)
70 *freq = gpu->funcs->gpu_get_freq(gpu);
71 else
72 *freq = clk_get_rate(gpu->core_clk);
73
74 return 0;
75}
76
77static struct devfreq_dev_profile msm_devfreq_profile = {
78 .polling_ms = 10,
79 .target = msm_devfreq_target,
80 .get_dev_status = msm_devfreq_get_dev_status,
81 .get_cur_freq = msm_devfreq_get_cur_freq,
82};
83
84static void msm_devfreq_init(struct msm_gpu *gpu)
85{
86 /* We need target support to do devfreq */
87 if (!gpu->funcs->gpu_busy)
88 return;
89
90 msm_devfreq_profile.initial_freq = gpu->fast_rate;
91
92 /*
93 * Don't set the freq_table or max_state and let devfreq build the table
94 * from OPP
95 * After a deferred probe, these may have be left to non-zero values,
96 * so set them back to zero before creating the devfreq device
97 */
98 msm_devfreq_profile.freq_table = NULL;
99 msm_devfreq_profile.max_state = 0;
100
101 gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
102 &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
103 NULL);
104
105 if (IS_ERR(gpu->devfreq.devfreq)) {
106 DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
107 gpu->devfreq.devfreq = NULL;
108 }
109
110 devfreq_suspend_device(gpu->devfreq.devfreq);
111}
112
113static int enable_pwrrail(struct msm_gpu *gpu)
114{
115 struct drm_device *dev = gpu->dev;
116 int ret = 0;
117
118 if (gpu->gpu_reg) {
119 ret = regulator_enable(gpu->gpu_reg);
120 if (ret) {
121 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
122 return ret;
123 }
124 }
125
126 if (gpu->gpu_cx) {
127 ret = regulator_enable(gpu->gpu_cx);
128 if (ret) {
129 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
130 return ret;
131 }
132 }
133
134 return 0;
135}
136
137static int disable_pwrrail(struct msm_gpu *gpu)
138{
139 if (gpu->gpu_cx)
140 regulator_disable(gpu->gpu_cx);
141 if (gpu->gpu_reg)
142 regulator_disable(gpu->gpu_reg);
143 return 0;
144}
145
146static int enable_clk(struct msm_gpu *gpu)
147{
148 if (gpu->core_clk && gpu->fast_rate)
149 clk_set_rate(gpu->core_clk, gpu->fast_rate);
150
151 /* Set the RBBM timer rate to 19.2Mhz */
152 if (gpu->rbbmtimer_clk)
153 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
154
155 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
156}
157
158static int disable_clk(struct msm_gpu *gpu)
159{
160 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
161
162 /*
163 * Set the clock to a deliberately low rate. On older targets the clock
164 * speed had to be non zero to avoid problems. On newer targets this
165 * will be rounded down to zero anyway so it all works out.
166 */
167 if (gpu->core_clk)
168 clk_set_rate(gpu->core_clk, 27000000);
169
170 if (gpu->rbbmtimer_clk)
171 clk_set_rate(gpu->rbbmtimer_clk, 0);
172
173 return 0;
174}
175
176static int enable_axi(struct msm_gpu *gpu)
177{
178 if (gpu->ebi1_clk)
179 clk_prepare_enable(gpu->ebi1_clk);
180 return 0;
181}
182
183static int disable_axi(struct msm_gpu *gpu)
184{
185 if (gpu->ebi1_clk)
186 clk_disable_unprepare(gpu->ebi1_clk);
187 return 0;
188}
189
190void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
191{
192 gpu->devfreq.busy_cycles = 0;
193 gpu->devfreq.time = ktime_get();
194
195 devfreq_resume_device(gpu->devfreq.devfreq);
196}
197
198int msm_gpu_pm_resume(struct msm_gpu *gpu)
199{
200 int ret;
201
202 DBG("%s", gpu->name);
203
204 ret = enable_pwrrail(gpu);
205 if (ret)
206 return ret;
207
208 ret = enable_clk(gpu);
209 if (ret)
210 return ret;
211
212 ret = enable_axi(gpu);
213 if (ret)
214 return ret;
215
216 msm_gpu_resume_devfreq(gpu);
217
218 gpu->needs_hw_init = true;
219
220 return 0;
221}
222
223int msm_gpu_pm_suspend(struct msm_gpu *gpu)
224{
225 int ret;
226
227 DBG("%s", gpu->name);
228
229 devfreq_suspend_device(gpu->devfreq.devfreq);
230
231 ret = disable_axi(gpu);
232 if (ret)
233 return ret;
234
235 ret = disable_clk(gpu);
236 if (ret)
237 return ret;
238
239 ret = disable_pwrrail(gpu);
240 if (ret)
241 return ret;
242
243 return 0;
244}
245
246int msm_gpu_hw_init(struct msm_gpu *gpu)
247{
248 int ret;
249
250 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
251
252 if (!gpu->needs_hw_init)
253 return 0;
254
255 disable_irq(gpu->irq);
256 ret = gpu->funcs->hw_init(gpu);
257 if (!ret)
258 gpu->needs_hw_init = false;
259 enable_irq(gpu->irq);
260
261 return ret;
262}
263
264#ifdef CONFIG_DEV_COREDUMP
265static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
266 size_t count, void *data, size_t datalen)
267{
268 struct msm_gpu *gpu = data;
269 struct drm_print_iterator iter;
270 struct drm_printer p;
271 struct msm_gpu_state *state;
272
273 state = msm_gpu_crashstate_get(gpu);
274 if (!state)
275 return 0;
276
277 iter.data = buffer;
278 iter.offset = 0;
279 iter.start = offset;
280 iter.remain = count;
281
282 p = drm_coredump_printer(&iter);
283
284 drm_printf(&p, "---\n");
285 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
286 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
287 drm_printf(&p, "time: %lld.%09ld\n",
288 state->time.tv_sec, state->time.tv_nsec);
289 if (state->comm)
290 drm_printf(&p, "comm: %s\n", state->comm);
291 if (state->cmd)
292 drm_printf(&p, "cmdline: %s\n", state->cmd);
293
294 gpu->funcs->show(gpu, state, &p);
295
296 msm_gpu_crashstate_put(gpu);
297
298 return count - iter.remain;
299}
300
301static void msm_gpu_devcoredump_free(void *data)
302{
303 struct msm_gpu *gpu = data;
304
305 msm_gpu_crashstate_put(gpu);
306}
307
308static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
309 struct msm_gem_object *obj, u64 iova, u32 flags)
310{
311 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
312
313 /* Don't record write only objects */
314 state_bo->size = obj->base.size;
315 state_bo->iova = iova;
316
317 /* Only store data for non imported buffer objects marked for read */
318 if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
319 void *ptr;
320
321 state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
322 if (!state_bo->data)
323 goto out;
324
325 ptr = msm_gem_get_vaddr_active(&obj->base);
326 if (IS_ERR(ptr)) {
327 kvfree(state_bo->data);
328 state_bo->data = NULL;
329 goto out;
330 }
331
332 memcpy(state_bo->data, ptr, obj->base.size);
333 msm_gem_put_vaddr(&obj->base);
334 }
335out:
336 state->nr_bos++;
337}
338
339static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
340 struct msm_gem_submit *submit, char *comm, char *cmd)
341{
342 struct msm_gpu_state *state;
343
344 /* Check if the target supports capturing crash state */
345 if (!gpu->funcs->gpu_state_get)
346 return;
347
348 /* Only save one crash state at a time */
349 if (gpu->crashstate)
350 return;
351
352 state = gpu->funcs->gpu_state_get(gpu);
353 if (IS_ERR_OR_NULL(state))
354 return;
355
356 /* Fill in the additional crash state information */
357 state->comm = kstrdup(comm, GFP_KERNEL);
358 state->cmd = kstrdup(cmd, GFP_KERNEL);
359
360 if (submit) {
361 int i, nr = 0;
362
363 /* count # of buffers to dump: */
364 for (i = 0; i < submit->nr_bos; i++)
365 if (should_dump(submit, i))
366 nr++;
367 /* always dump cmd bo's, but don't double count them: */
368 for (i = 0; i < submit->nr_cmds; i++)
369 if (!should_dump(submit, submit->cmd[i].idx))
370 nr++;
371
372 state->bos = kcalloc(nr,
373 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
374
375 for (i = 0; i < submit->nr_bos; i++) {
376 if (should_dump(submit, i)) {
377 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
378 submit->bos[i].iova, submit->bos[i].flags);
379 }
380 }
381
382 for (i = 0; state->bos && i < submit->nr_cmds; i++) {
383 int idx = submit->cmd[i].idx;
384
385 if (!should_dump(submit, submit->cmd[i].idx)) {
386 msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
387 submit->bos[idx].iova, submit->bos[idx].flags);
388 }
389 }
390 }
391
392 /* Set the active crash state to be dumped on failure */
393 gpu->crashstate = state;
394
395 /* FIXME: Release the crashstate if this errors out? */
396 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
397 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
398}
399#else
400static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
401 struct msm_gem_submit *submit, char *comm, char *cmd)
402{
403}
404#endif
405
406/*
407 * Hangcheck detection for locked gpu:
408 */
409
410static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
411 uint32_t fence)
412{
413 struct msm_gem_submit *submit;
414
415 list_for_each_entry(submit, &ring->submits, node) {
416 if (submit->seqno > fence)
417 break;
418
419 msm_update_fence(submit->ring->fctx,
420 submit->fence->seqno);
421 }
422}
423
424static struct msm_gem_submit *
425find_submit(struct msm_ringbuffer *ring, uint32_t fence)
426{
427 struct msm_gem_submit *submit;
428
429 WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
430
431 list_for_each_entry(submit, &ring->submits, node)
432 if (submit->seqno == fence)
433 return submit;
434
435 return NULL;
436}
437
438static void retire_submits(struct msm_gpu *gpu);
439
440static void recover_worker(struct work_struct *work)
441{
442 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
443 struct drm_device *dev = gpu->dev;
444 struct msm_drm_private *priv = dev->dev_private;
445 struct msm_gem_submit *submit;
446 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
447 char *comm = NULL, *cmd = NULL;
448 int i;
449
450 mutex_lock(&dev->struct_mutex);
451
452 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
453
454 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
455 if (submit) {
456 struct task_struct *task;
457
458 /* Increment the fault counts */
459 gpu->global_faults++;
460 submit->queue->faults++;
461
462 task = get_pid_task(submit->pid, PIDTYPE_PID);
463 if (task) {
464 comm = kstrdup(task->comm, GFP_KERNEL);
465 cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
466 put_task_struct(task);
467 }
468
469 if (comm && cmd) {
470 DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
471 gpu->name, comm, cmd);
472
473 msm_rd_dump_submit(priv->hangrd, submit,
474 "offending task: %s (%s)", comm, cmd);
475 } else
476 msm_rd_dump_submit(priv->hangrd, submit, NULL);
477 }
478
479 /* Record the crash state */
480 pm_runtime_get_sync(&gpu->pdev->dev);
481 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
482 pm_runtime_put_sync(&gpu->pdev->dev);
483
484 kfree(cmd);
485 kfree(comm);
486
487 /*
488 * Update all the rings with the latest and greatest fence.. this
489 * needs to happen after msm_rd_dump_submit() to ensure that the
490 * bo's referenced by the offending submit are still around.
491 */
492 for (i = 0; i < gpu->nr_rings; i++) {
493 struct msm_ringbuffer *ring = gpu->rb[i];
494
495 uint32_t fence = ring->memptrs->fence;
496
497 /*
498 * For the current (faulting?) ring/submit advance the fence by
499 * one more to clear the faulting submit
500 */
501 if (ring == cur_ring)
502 fence++;
503
504 update_fences(gpu, ring, fence);
505 }
506
507 if (msm_gpu_active(gpu)) {
508 /* retire completed submits, plus the one that hung: */
509 retire_submits(gpu);
510
511 pm_runtime_get_sync(&gpu->pdev->dev);
512 gpu->funcs->recover(gpu);
513 pm_runtime_put_sync(&gpu->pdev->dev);
514
515 /*
516 * Replay all remaining submits starting with highest priority
517 * ring
518 */
519 for (i = 0; i < gpu->nr_rings; i++) {
520 struct msm_ringbuffer *ring = gpu->rb[i];
521
522 list_for_each_entry(submit, &ring->submits, node)
523 gpu->funcs->submit(gpu, submit, NULL);
524 }
525 }
526
527 mutex_unlock(&dev->struct_mutex);
528
529 msm_gpu_retire(gpu);
530}
531
532static void hangcheck_timer_reset(struct msm_gpu *gpu)
533{
534 DBG("%s", gpu->name);
535 mod_timer(&gpu->hangcheck_timer,
536 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
537}
538
539static void hangcheck_handler(struct timer_list *t)
540{
541 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
542 struct drm_device *dev = gpu->dev;
543 struct msm_drm_private *priv = dev->dev_private;
544 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
545 uint32_t fence = ring->memptrs->fence;
546
547 if (fence != ring->hangcheck_fence) {
548 /* some progress has been made.. ya! */
549 ring->hangcheck_fence = fence;
550 } else if (fence < ring->seqno) {
551 /* no progress and not done.. hung! */
552 ring->hangcheck_fence = fence;
553 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
554 gpu->name, ring->id);
555 DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
556 gpu->name, fence);
557 DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
558 gpu->name, ring->seqno);
559
560 queue_work(priv->wq, &gpu->recover_work);
561 }
562
563 /* if still more pending work, reset the hangcheck timer: */
564 if (ring->seqno > ring->hangcheck_fence)
565 hangcheck_timer_reset(gpu);
566
567 /* workaround for missing irq: */
568 queue_work(priv->wq, &gpu->retire_work);
569}
570
571/*
572 * Performance Counters:
573 */
574
575/* called under perf_lock */
576static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
577{
578 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
579 int i, n = min(ncntrs, gpu->num_perfcntrs);
580
581 /* read current values: */
582 for (i = 0; i < gpu->num_perfcntrs; i++)
583 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
584
585 /* update cntrs: */
586 for (i = 0; i < n; i++)
587 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
588
589 /* save current values: */
590 for (i = 0; i < gpu->num_perfcntrs; i++)
591 gpu->last_cntrs[i] = current_cntrs[i];
592
593 return n;
594}
595
596static void update_sw_cntrs(struct msm_gpu *gpu)
597{
598 ktime_t time;
599 uint32_t elapsed;
600 unsigned long flags;
601
602 spin_lock_irqsave(&gpu->perf_lock, flags);
603 if (!gpu->perfcntr_active)
604 goto out;
605
606 time = ktime_get();
607 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
608
609 gpu->totaltime += elapsed;
610 if (gpu->last_sample.active)
611 gpu->activetime += elapsed;
612
613 gpu->last_sample.active = msm_gpu_active(gpu);
614 gpu->last_sample.time = time;
615
616out:
617 spin_unlock_irqrestore(&gpu->perf_lock, flags);
618}
619
620void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
621{
622 unsigned long flags;
623
624 pm_runtime_get_sync(&gpu->pdev->dev);
625
626 spin_lock_irqsave(&gpu->perf_lock, flags);
627 /* we could dynamically enable/disable perfcntr registers too.. */
628 gpu->last_sample.active = msm_gpu_active(gpu);
629 gpu->last_sample.time = ktime_get();
630 gpu->activetime = gpu->totaltime = 0;
631 gpu->perfcntr_active = true;
632 update_hw_cntrs(gpu, 0, NULL);
633 spin_unlock_irqrestore(&gpu->perf_lock, flags);
634}
635
636void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
637{
638 gpu->perfcntr_active = false;
639 pm_runtime_put_sync(&gpu->pdev->dev);
640}
641
642/* returns -errno or # of cntrs sampled */
643int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
644 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
645{
646 unsigned long flags;
647 int ret;
648
649 spin_lock_irqsave(&gpu->perf_lock, flags);
650
651 if (!gpu->perfcntr_active) {
652 ret = -EINVAL;
653 goto out;
654 }
655
656 *activetime = gpu->activetime;
657 *totaltime = gpu->totaltime;
658
659 gpu->activetime = gpu->totaltime = 0;
660
661 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
662
663out:
664 spin_unlock_irqrestore(&gpu->perf_lock, flags);
665
666 return ret;
667}
668
669/*
670 * Cmdstream submission/retirement:
671 */
672
673static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
674 struct msm_gem_submit *submit)
675{
676 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
677 volatile struct msm_gpu_submit_stats *stats;
678 u64 elapsed, clock = 0;
679 int i;
680
681 stats = &ring->memptrs->stats[index];
682 /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
683 elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
684 do_div(elapsed, 192);
685
686 /* Calculate the clock frequency from the number of CP cycles */
687 if (elapsed) {
688 clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
689 do_div(clock, elapsed);
690 }
691
692 trace_msm_gpu_submit_retired(submit, elapsed, clock,
693 stats->alwayson_start, stats->alwayson_end);
694
695 for (i = 0; i < submit->nr_bos; i++) {
696 struct msm_gem_object *msm_obj = submit->bos[i].obj;
697 /* move to inactive: */
698 msm_gem_move_to_inactive(&msm_obj->base);
699 msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
700 drm_gem_object_put_locked(&msm_obj->base);
701 }
702
703 pm_runtime_mark_last_busy(&gpu->pdev->dev);
704 pm_runtime_put_autosuspend(&gpu->pdev->dev);
705 msm_gem_submit_free(submit);
706}
707
708static void retire_submits(struct msm_gpu *gpu)
709{
710 struct drm_device *dev = gpu->dev;
711 struct msm_gem_submit *submit, *tmp;
712 int i;
713
714 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
715
716 /* Retire the commits starting with highest priority */
717 for (i = 0; i < gpu->nr_rings; i++) {
718 struct msm_ringbuffer *ring = gpu->rb[i];
719
720 list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
721 if (dma_fence_is_signaled(submit->fence))
722 retire_submit(gpu, ring, submit);
723 }
724 }
725}
726
727static void retire_worker(struct work_struct *work)
728{
729 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
730 struct drm_device *dev = gpu->dev;
731 int i;
732
733 for (i = 0; i < gpu->nr_rings; i++)
734 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
735
736 mutex_lock(&dev->struct_mutex);
737 retire_submits(gpu);
738 mutex_unlock(&dev->struct_mutex);
739}
740
741/* call from irq handler to schedule work to retire bo's */
742void msm_gpu_retire(struct msm_gpu *gpu)
743{
744 struct msm_drm_private *priv = gpu->dev->dev_private;
745 queue_work(priv->wq, &gpu->retire_work);
746 update_sw_cntrs(gpu);
747}
748
749/* add bo's to gpu's ring, and kick gpu: */
750void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
751 struct msm_file_private *ctx)
752{
753 struct drm_device *dev = gpu->dev;
754 struct msm_drm_private *priv = dev->dev_private;
755 struct msm_ringbuffer *ring = submit->ring;
756 int i;
757
758 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
759
760 pm_runtime_get_sync(&gpu->pdev->dev);
761
762 msm_gpu_hw_init(gpu);
763
764 submit->seqno = ++ring->seqno;
765
766 list_add_tail(&submit->node, &ring->submits);
767
768 msm_rd_dump_submit(priv->rd, submit, NULL);
769
770 update_sw_cntrs(gpu);
771
772 for (i = 0; i < submit->nr_bos; i++) {
773 struct msm_gem_object *msm_obj = submit->bos[i].obj;
774 uint64_t iova;
775
776 /* can't happen yet.. but when we add 2d support we'll have
777 * to deal w/ cross-ring synchronization:
778 */
779 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
780
781 /* submit takes a reference to the bo and iova until retired: */
782 drm_gem_object_get(&msm_obj->base);
783 msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
784
785 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
786 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
787 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
788 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
789 }
790
791 gpu->funcs->submit(gpu, submit, ctx);
792 priv->lastctx = ctx;
793
794 hangcheck_timer_reset(gpu);
795}
796
797/*
798 * Init/Cleanup:
799 */
800
801static irqreturn_t irq_handler(int irq, void *data)
802{
803 struct msm_gpu *gpu = data;
804 return gpu->funcs->irq(gpu);
805}
806
807static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
808{
809 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
810
811 if (ret < 1) {
812 gpu->nr_clocks = 0;
813 return ret;
814 }
815
816 gpu->nr_clocks = ret;
817
818 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
819 gpu->nr_clocks, "core");
820
821 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
822 gpu->nr_clocks, "rbbmtimer");
823
824 return 0;
825}
826
827int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
828 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
829 const char *name, struct msm_gpu_config *config)
830{
831 int i, ret, nr_rings = config->nr_rings;
832 void *memptrs;
833 uint64_t memptrs_iova;
834
835 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
836 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
837
838 gpu->dev = drm;
839 gpu->funcs = funcs;
840 gpu->name = name;
841
842 INIT_LIST_HEAD(&gpu->active_list);
843 INIT_WORK(&gpu->retire_work, retire_worker);
844 INIT_WORK(&gpu->recover_work, recover_worker);
845
846
847 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
848
849 spin_lock_init(&gpu->perf_lock);
850
851
852 /* Map registers: */
853 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
854 if (IS_ERR(gpu->mmio)) {
855 ret = PTR_ERR(gpu->mmio);
856 goto fail;
857 }
858
859 /* Get Interrupt: */
860 gpu->irq = platform_get_irq(pdev, 0);
861 if (gpu->irq < 0) {
862 ret = gpu->irq;
863 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
864 goto fail;
865 }
866
867 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
868 IRQF_TRIGGER_HIGH, gpu->name, gpu);
869 if (ret) {
870 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
871 goto fail;
872 }
873
874 ret = get_clocks(pdev, gpu);
875 if (ret)
876 goto fail;
877
878 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
879 DBG("ebi1_clk: %p", gpu->ebi1_clk);
880 if (IS_ERR(gpu->ebi1_clk))
881 gpu->ebi1_clk = NULL;
882
883 /* Acquire regulators: */
884 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
885 DBG("gpu_reg: %p", gpu->gpu_reg);
886 if (IS_ERR(gpu->gpu_reg))
887 gpu->gpu_reg = NULL;
888
889 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
890 DBG("gpu_cx: %p", gpu->gpu_cx);
891 if (IS_ERR(gpu->gpu_cx))
892 gpu->gpu_cx = NULL;
893
894 gpu->pdev = pdev;
895 platform_set_drvdata(pdev, gpu);
896
897 msm_devfreq_init(gpu);
898
899
900 gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
901
902 if (gpu->aspace == NULL)
903 DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
904 else if (IS_ERR(gpu->aspace)) {
905 ret = PTR_ERR(gpu->aspace);
906 goto fail;
907 }
908
909 memptrs = msm_gem_kernel_new(drm,
910 sizeof(struct msm_rbmemptrs) * nr_rings,
911 check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
912 &memptrs_iova);
913
914 if (IS_ERR(memptrs)) {
915 ret = PTR_ERR(memptrs);
916 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
917 goto fail;
918 }
919
920 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
921
922 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
923 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
924 ARRAY_SIZE(gpu->rb));
925 nr_rings = ARRAY_SIZE(gpu->rb);
926 }
927
928 /* Create ringbuffer(s): */
929 for (i = 0; i < nr_rings; i++) {
930 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
931
932 if (IS_ERR(gpu->rb[i])) {
933 ret = PTR_ERR(gpu->rb[i]);
934 DRM_DEV_ERROR(drm->dev,
935 "could not create ringbuffer %d: %d\n", i, ret);
936 goto fail;
937 }
938
939 memptrs += sizeof(struct msm_rbmemptrs);
940 memptrs_iova += sizeof(struct msm_rbmemptrs);
941 }
942
943 gpu->nr_rings = nr_rings;
944
945 return 0;
946
947fail:
948 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
949 msm_ringbuffer_destroy(gpu->rb[i]);
950 gpu->rb[i] = NULL;
951 }
952
953 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
954
955 platform_set_drvdata(pdev, NULL);
956 return ret;
957}
958
959void msm_gpu_cleanup(struct msm_gpu *gpu)
960{
961 int i;
962
963 DBG("%s", gpu->name);
964
965 WARN_ON(!list_empty(&gpu->active_list));
966
967 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
968 msm_ringbuffer_destroy(gpu->rb[i]);
969 gpu->rb[i] = NULL;
970 }
971
972 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
973
974 if (!IS_ERR_OR_NULL(gpu->aspace)) {
975 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
976 msm_gem_address_space_put(gpu->aspace);
977 }
978}