Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2017 Etnaviv Project
  4 */
  5
  6#include <linux/moduleparam.h>
  7
  8#include "etnaviv_drv.h"
  9#include "etnaviv_dump.h"
 10#include "etnaviv_gem.h"
 11#include "etnaviv_gpu.h"
 12#include "etnaviv_sched.h"
 13#include "state.xml.h"
 14#include "state_hi.xml.h"
 15
 16static int etnaviv_job_hang_limit = 0;
 17module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
 18static int etnaviv_hw_jobs_limit = 4;
 19module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
 20
 21static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
 22{
 23	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
 24	struct dma_fence *fence = NULL;
 25
 26	if (likely(!sched_job->s_fence->finished.error))
 27		fence = etnaviv_gpu_submit(submit);
 28	else
 29		dev_dbg(submit->gpu->dev, "skipping bad job\n");
 30
 31	return fence;
 32}
 33
 34static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
 35							  *sched_job)
 36{
 37	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
 38	struct etnaviv_gpu *gpu = submit->gpu;
 39	u32 dma_addr, primid = 0;
 40	int change;
 41
 
 
 
 42	/*
 43	 * If the GPU managed to complete this jobs fence, the timout is
 44	 * spurious. Bail out.
 45	 */
 46	if (dma_fence_is_signaled(submit->out_fence))
 47		goto out_no_timeout;
 48
 49	/*
 50	 * If the GPU is still making forward progress on the front-end (which
 51	 * should never loop) we shift out the timeout to give it a chance to
 52	 * finish the job.
 53	 */
 54	dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 55	change = dma_addr - gpu->hangcheck_dma_addr;
 56	if (submit->exec_state == ETNA_PIPE_3D) {
 57		/* guard against concurrent usage from perfmon_sample */
 58		mutex_lock(&gpu->lock);
 59		gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0,
 60			  VIVS_MC_PROFILE_CONFIG0_FE_CURRENT_PRIM <<
 61			  VIVS_MC_PROFILE_CONFIG0_FE__SHIFT);
 62		primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ);
 63		mutex_unlock(&gpu->lock);
 64	}
 65	if (gpu->state == ETNA_GPU_STATE_RUNNING &&
 66	    (gpu->completed_fence != gpu->hangcheck_fence ||
 67	     change < 0 || change > 16 ||
 68	     (submit->exec_state == ETNA_PIPE_3D &&
 69	      gpu->hangcheck_primid != primid))) {
 70		gpu->hangcheck_dma_addr = dma_addr;
 71		gpu->hangcheck_primid = primid;
 72		gpu->hangcheck_fence = gpu->completed_fence;
 73		goto out_no_timeout;
 74	}
 75
 76	/* block scheduler */
 77	drm_sched_stop(&gpu->sched, sched_job);
 78
 79	if(sched_job)
 80		drm_sched_increase_karma(sched_job);
 81
 82	/* get the GPU back into the init state */
 83	etnaviv_core_dump(submit);
 84	etnaviv_gpu_recover_hang(submit);
 85
 86	drm_sched_resubmit_jobs(&gpu->sched);
 87
 88	drm_sched_start(&gpu->sched, 0);
 89	return DRM_GPU_SCHED_STAT_NOMINAL;
 90
 91out_no_timeout:
 92	list_add(&sched_job->list, &sched_job->sched->pending_list);
 
 93	return DRM_GPU_SCHED_STAT_NOMINAL;
 94}
 95
 96static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
 97{
 98	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
 99
100	drm_sched_job_cleanup(sched_job);
101
102	etnaviv_submit_put(submit);
103}
104
105static const struct drm_sched_backend_ops etnaviv_sched_ops = {
106	.run_job = etnaviv_sched_run_job,
107	.timedout_job = etnaviv_sched_timedout_job,
108	.free_job = etnaviv_sched_free_job,
109};
110
111int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
112{
113	struct etnaviv_gpu *gpu = submit->gpu;
114	int ret;
115
116	/*
117	 * Hold the sched lock across the whole operation to avoid jobs being
118	 * pushed out of order with regard to their sched fence seqnos as
119	 * allocated in drm_sched_job_arm.
120	 */
121	mutex_lock(&gpu->sched_lock);
122
123	drm_sched_job_arm(&submit->sched_job);
124
125	submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
126	ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
127			      submit->out_fence, xa_limit_32b,
128			      &gpu->next_user_fence, GFP_KERNEL);
129	if (ret < 0) {
130		drm_sched_job_cleanup(&submit->sched_job);
131		goto out_unlock;
132	}
133
134	/* the scheduler holds on to the job now */
135	kref_get(&submit->refcount);
136
137	drm_sched_entity_push_job(&submit->sched_job);
138
139out_unlock:
140	mutex_unlock(&gpu->sched_lock);
141
142	return ret;
143}
144
145int etnaviv_sched_init(struct etnaviv_gpu *gpu)
146{
147	int ret;
148
149	ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL,
150			     DRM_SCHED_PRIORITY_COUNT,
151			     etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
152			     msecs_to_jiffies(500), NULL, NULL,
153			     dev_name(gpu->dev), gpu->dev);
154	if (ret)
155		return ret;
156
157	return 0;
158}
159
160void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
161{
162	drm_sched_fini(&gpu->sched);
163}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2017 Etnaviv Project
  4 */
  5
  6#include <linux/moduleparam.h>
  7
  8#include "etnaviv_drv.h"
  9#include "etnaviv_dump.h"
 10#include "etnaviv_gem.h"
 11#include "etnaviv_gpu.h"
 12#include "etnaviv_sched.h"
 13#include "state.xml.h"
 
 14
 15static int etnaviv_job_hang_limit = 0;
 16module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
 17static int etnaviv_hw_jobs_limit = 4;
 18module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
 19
 20static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
 21{
 22	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
 23	struct dma_fence *fence = NULL;
 24
 25	if (likely(!sched_job->s_fence->finished.error))
 26		fence = etnaviv_gpu_submit(submit);
 27	else
 28		dev_dbg(submit->gpu->dev, "skipping bad job\n");
 29
 30	return fence;
 31}
 32
 33static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
 34							  *sched_job)
 35{
 36	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
 37	struct etnaviv_gpu *gpu = submit->gpu;
 38	u32 dma_addr;
 39	int change;
 40
 41	/* block scheduler */
 42	drm_sched_stop(&gpu->sched, sched_job);
 43
 44	/*
 45	 * If the GPU managed to complete this jobs fence, the timout is
 46	 * spurious. Bail out.
 47	 */
 48	if (dma_fence_is_signaled(submit->out_fence))
 49		goto out_no_timeout;
 50
 51	/*
 52	 * If the GPU is still making forward progress on the front-end (which
 53	 * should never loop) we shift out the timeout to give it a chance to
 54	 * finish the job.
 55	 */
 56	dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 57	change = dma_addr - gpu->hangcheck_dma_addr;
 
 
 
 
 
 
 
 
 
 58	if (gpu->state == ETNA_GPU_STATE_RUNNING &&
 59	    (gpu->completed_fence != gpu->hangcheck_fence ||
 60	     change < 0 || change > 16)) {
 
 
 61		gpu->hangcheck_dma_addr = dma_addr;
 
 62		gpu->hangcheck_fence = gpu->completed_fence;
 63		goto out_no_timeout;
 64	}
 65
 
 
 
 66	if(sched_job)
 67		drm_sched_increase_karma(sched_job);
 68
 69	/* get the GPU back into the init state */
 70	etnaviv_core_dump(submit);
 71	etnaviv_gpu_recover_hang(submit);
 72
 73	drm_sched_resubmit_jobs(&gpu->sched);
 74
 75	drm_sched_start(&gpu->sched, true);
 76	return DRM_GPU_SCHED_STAT_NOMINAL;
 77
 78out_no_timeout:
 79	/* restart scheduler after GPU is usable again */
 80	drm_sched_start(&gpu->sched, true);
 81	return DRM_GPU_SCHED_STAT_NOMINAL;
 82}
 83
 84static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
 85{
 86	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
 87
 88	drm_sched_job_cleanup(sched_job);
 89
 90	etnaviv_submit_put(submit);
 91}
 92
 93static const struct drm_sched_backend_ops etnaviv_sched_ops = {
 94	.run_job = etnaviv_sched_run_job,
 95	.timedout_job = etnaviv_sched_timedout_job,
 96	.free_job = etnaviv_sched_free_job,
 97};
 98
 99int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
100{
101	struct etnaviv_gpu *gpu = submit->gpu;
102	int ret;
103
104	/*
105	 * Hold the sched lock across the whole operation to avoid jobs being
106	 * pushed out of order with regard to their sched fence seqnos as
107	 * allocated in drm_sched_job_arm.
108	 */
109	mutex_lock(&gpu->sched_lock);
110
111	drm_sched_job_arm(&submit->sched_job);
112
113	submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
114	ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
115			      submit->out_fence, xa_limit_32b,
116			      &gpu->next_user_fence, GFP_KERNEL);
117	if (ret < 0) {
118		drm_sched_job_cleanup(&submit->sched_job);
119		goto out_unlock;
120	}
121
122	/* the scheduler holds on to the job now */
123	kref_get(&submit->refcount);
124
125	drm_sched_entity_push_job(&submit->sched_job);
126
127out_unlock:
128	mutex_unlock(&gpu->sched_lock);
129
130	return ret;
131}
132
133int etnaviv_sched_init(struct etnaviv_gpu *gpu)
134{
135	int ret;
136
137	ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL,
138			     DRM_SCHED_PRIORITY_COUNT,
139			     etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
140			     msecs_to_jiffies(500), NULL, NULL,
141			     dev_name(gpu->dev), gpu->dev);
142	if (ret)
143		return ret;
144
145	return 0;
146}
147
148void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
149{
150	drm_sched_fini(&gpu->sched);
151}