Linux Audio

Check our new training course

Loading...
v4.6
 
 1/*
 2 * Copyright (C) 2013 Red Hat
 3 * Author: Rob Clark <robdclark@gmail.com>
 4 *
 5 * This program is free software; you can redistribute it and/or modify it
 6 * under the terms of the GNU General Public License version 2 as published by
 7 * the Free Software Foundation.
 8 *
 9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_ringbuffer.h"
19#include "msm_gpu.h"
20
21struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22{
23	struct msm_ringbuffer *ring;
 
 
24	int ret;
25
26	size = ALIGN(size, 4);   /* size should be dword aligned */
 
27
28	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
29	if (!ring) {
30		ret = -ENOMEM;
31		goto fail;
32	}
33
34	ring->gpu = gpu;
35	ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
36	if (IS_ERR(ring->bo)) {
37		ret = PTR_ERR(ring->bo);
38		ring->bo = NULL;
 
 
 
 
 
39		goto fail;
40	}
41
42	ring->start = msm_gem_vaddr_locked(ring->bo);
43	ring->end   = ring->start + (size / 4);
 
 
44	ring->cur   = ring->start;
45
46	ring->size = size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
48	return ring;
49
50fail:
51	if (ring)
52		msm_ringbuffer_destroy(ring);
53	return ERR_PTR(ret);
54}
55
56void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
57{
58	if (ring->bo)
59		drm_gem_object_unreference_unlocked(ring->bo);
 
 
 
 
 
 
 
60	kfree(ring);
61}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2013 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include "msm_ringbuffer.h"
  8#include "msm_gpu.h"
  9
 10static uint num_hw_submissions = 8;
 11MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
 12module_param(num_hw_submissions, uint, 0600);
 13
 14static struct dma_fence *msm_job_run(struct drm_sched_job *job)
 15{
 16	struct msm_gem_submit *submit = to_msm_submit(job);
 17	struct msm_fence_context *fctx = submit->ring->fctx;
 18	struct msm_gpu *gpu = submit->gpu;
 19	struct msm_drm_private *priv = gpu->dev->dev_private;
 20	int i;
 21
 22	msm_fence_init(submit->hw_fence, fctx);
 23
 24	mutex_lock(&priv->lru.lock);
 25
 26	for (i = 0; i < submit->nr_bos; i++) {
 27		struct drm_gem_object *obj = submit->bos[i].obj;
 28
 29		msm_gem_unpin_active(obj);
 30	}
 31
 32	submit->bos_pinned = false;
 33
 34	mutex_unlock(&priv->lru.lock);
 35
 36	/* TODO move submit path over to using a per-ring lock.. */
 37	mutex_lock(&gpu->lock);
 38
 39	msm_gpu_submit(gpu, submit);
 40
 41	mutex_unlock(&gpu->lock);
 42
 43	return dma_fence_get(submit->hw_fence);
 44}
 45
 46static void msm_job_free(struct drm_sched_job *job)
 47{
 48	struct msm_gem_submit *submit = to_msm_submit(job);
 49
 50	drm_sched_job_cleanup(job);
 51	msm_gem_submit_put(submit);
 52}
 53
 54static const struct drm_sched_backend_ops msm_sched_ops = {
 55	.run_job = msm_job_run,
 56	.free_job = msm_job_free
 57};
 58
 59struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
 60		void *memptrs, uint64_t memptrs_iova)
 61{
 62	struct msm_ringbuffer *ring;
 63	long sched_timeout;
 64	char name[32];
 65	int ret;
 66
 67	/* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
 68	BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
 69
 70	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 71	if (!ring) {
 72		ret = -ENOMEM;
 73		goto fail;
 74	}
 75
 76	ring->gpu = gpu;
 77	ring->id = id;
 78
 79	ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
 80		check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
 81		gpu->aspace, &ring->bo, &ring->iova);
 82
 83	if (IS_ERR(ring->start)) {
 84		ret = PTR_ERR(ring->start);
 85		ring->start = NULL;
 86		goto fail;
 87	}
 88
 89	msm_gem_object_set_name(ring->bo, "ring%d", id);
 90
 91	ring->end   = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
 92	ring->next  = ring->start;
 93	ring->cur   = ring->start;
 94
 95	ring->memptrs = memptrs;
 96	ring->memptrs_iova = memptrs_iova;
 97
 98	 /* currently managing hangcheck ourselves: */
 99	sched_timeout = MAX_SCHEDULE_TIMEOUT;
100
101	ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL,
102			     DRM_SCHED_PRIORITY_COUNT,
103			     num_hw_submissions, 0, sched_timeout,
104			     NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
105	if (ret) {
106		goto fail;
107	}
108
109	INIT_LIST_HEAD(&ring->submits);
110	spin_lock_init(&ring->submit_lock);
111	spin_lock_init(&ring->preempt_lock);
112
113	snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
114
115	ring->fctx = msm_fence_context_alloc(gpu->dev, &ring->memptrs->fence, name);
116
117	return ring;
118
119fail:
120	msm_ringbuffer_destroy(ring);
 
121	return ERR_PTR(ret);
122}
123
124void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
125{
126	if (IS_ERR_OR_NULL(ring))
127		return;
128
129	drm_sched_fini(&ring->sched);
130
131	msm_fence_context_free(ring->fctx);
132
133	msm_gem_kernel_put(ring->bo, ring->gpu->aspace);
134
135	kfree(ring);
136}