Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: monk liu <monk.liu@amd.com>
 23 */
 24
 25#include <drm/drmP.h>
 26#include "amdgpu.h"
 27
 28static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
 29{
 30	unsigned i, j;
 31	int r;
 32
 33	memset(ctx, 0, sizeof(*ctx));
 34	ctx->adev = adev;
 35	kref_init(&ctx->refcount);
 36	spin_lock_init(&ctx->ring_lock);
 37	ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
 38			      sizeof(struct fence*), GFP_KERNEL);
 39	if (!ctx->fences)
 40		return -ENOMEM;
 41
 42	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 43		ctx->rings[i].sequence = 1;
 44		ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
 45	}
 46	/* create context entity for each ring */
 47	for (i = 0; i < adev->num_rings; i++) {
 48		struct amdgpu_ring *ring = adev->rings[i];
 49		struct amd_sched_rq *rq;
 50
 51		rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
 52		r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
 53					  rq, amdgpu_sched_jobs);
 54		if (r)
 55			break;
 56	}
 57
 58	if (i < adev->num_rings) {
 59		for (j = 0; j < i; j++)
 60			amd_sched_entity_fini(&adev->rings[j]->sched,
 61					      &ctx->rings[j].entity);
 62		kfree(ctx->fences);
 63		return r;
 64	}
 65	return 0;
 66}
 67
 68static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
 69{
 70	struct amdgpu_device *adev = ctx->adev;
 71	unsigned i, j;
 72
 73	if (!adev)
 74		return;
 75
 76	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 77		for (j = 0; j < amdgpu_sched_jobs; ++j)
 78			fence_put(ctx->rings[i].fences[j]);
 79	kfree(ctx->fences);
 80
 81	for (i = 0; i < adev->num_rings; i++)
 82		amd_sched_entity_fini(&adev->rings[i]->sched,
 83				      &ctx->rings[i].entity);
 84}
 85
 86static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
 87			    struct amdgpu_fpriv *fpriv,
 88			    uint32_t *id)
 89{
 90	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
 91	struct amdgpu_ctx *ctx;
 92	int r;
 93
 94	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 95	if (!ctx)
 96		return -ENOMEM;
 97
 98	mutex_lock(&mgr->lock);
 99	r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
100	if (r < 0) {
101		mutex_unlock(&mgr->lock);
102		kfree(ctx);
103		return r;
104	}
105	*id = (uint32_t)r;
106	r = amdgpu_ctx_init(adev, ctx);
107	if (r) {
108		idr_remove(&mgr->ctx_handles, *id);
109		*id = 0;
110		kfree(ctx);
111	}
112	mutex_unlock(&mgr->lock);
113	return r;
114}
115
116static void amdgpu_ctx_do_release(struct kref *ref)
117{
118	struct amdgpu_ctx *ctx;
119
120	ctx = container_of(ref, struct amdgpu_ctx, refcount);
121
122	amdgpu_ctx_fini(ctx);
123
124	kfree(ctx);
125}
126
127static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
128{
129	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
130	struct amdgpu_ctx *ctx;
131
132	mutex_lock(&mgr->lock);
133	ctx = idr_find(&mgr->ctx_handles, id);
134	if (ctx) {
135		idr_remove(&mgr->ctx_handles, id);
136		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
137		mutex_unlock(&mgr->lock);
138		return 0;
139	}
140	mutex_unlock(&mgr->lock);
141	return -EINVAL;
142}
143
144static int amdgpu_ctx_query(struct amdgpu_device *adev,
145			    struct amdgpu_fpriv *fpriv, uint32_t id,
146			    union drm_amdgpu_ctx_out *out)
147{
148	struct amdgpu_ctx *ctx;
149	struct amdgpu_ctx_mgr *mgr;
150	unsigned reset_counter;
151
152	if (!fpriv)
153		return -EINVAL;
154
155	mgr = &fpriv->ctx_mgr;
156	mutex_lock(&mgr->lock);
157	ctx = idr_find(&mgr->ctx_handles, id);
158	if (!ctx) {
159		mutex_unlock(&mgr->lock);
160		return -EINVAL;
161	}
162
163	/* TODO: these two are always zero */
164	out->state.flags = 0x0;
165	out->state.hangs = 0x0;
166
167	/* determine if a GPU reset has occured since the last call */
168	reset_counter = atomic_read(&adev->gpu_reset_counter);
169	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
170	if (ctx->reset_counter == reset_counter)
171		out->state.reset_status = AMDGPU_CTX_NO_RESET;
172	else
173		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
174	ctx->reset_counter = reset_counter;
175
176	mutex_unlock(&mgr->lock);
177	return 0;
178}
179
180int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
181		     struct drm_file *filp)
182{
183	int r;
184	uint32_t id;
185
186	union drm_amdgpu_ctx *args = data;
187	struct amdgpu_device *adev = dev->dev_private;
188	struct amdgpu_fpriv *fpriv = filp->driver_priv;
189
190	r = 0;
191	id = args->in.ctx_id;
192
193	switch (args->in.op) {
194	case AMDGPU_CTX_OP_ALLOC_CTX:
195		r = amdgpu_ctx_alloc(adev, fpriv, &id);
196		args->out.alloc.ctx_id = id;
197		break;
198	case AMDGPU_CTX_OP_FREE_CTX:
199		r = amdgpu_ctx_free(fpriv, id);
200		break;
201	case AMDGPU_CTX_OP_QUERY_STATE:
202		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
203		break;
204	default:
205		return -EINVAL;
206	}
207
208	return r;
209}
210
211struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
212{
213	struct amdgpu_ctx *ctx;
214	struct amdgpu_ctx_mgr *mgr;
215
216	if (!fpriv)
217		return NULL;
218
219	mgr = &fpriv->ctx_mgr;
220
221	mutex_lock(&mgr->lock);
222	ctx = idr_find(&mgr->ctx_handles, id);
223	if (ctx)
224		kref_get(&ctx->refcount);
225	mutex_unlock(&mgr->lock);
226	return ctx;
227}
228
229int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
230{
231	if (ctx == NULL)
232		return -EINVAL;
233
234	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
235	return 0;
236}
237
238uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
239			      struct fence *fence)
240{
241	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
242	uint64_t seq = cring->sequence;
243	unsigned idx = 0;
244	struct fence *other = NULL;
245
246	idx = seq & (amdgpu_sched_jobs - 1);
247	other = cring->fences[idx];
248	if (other) {
249		signed long r;
250		r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
251		if (r < 0)
252			DRM_ERROR("Error (%ld) waiting for fence!\n", r);
253	}
254
255	fence_get(fence);
256
257	spin_lock(&ctx->ring_lock);
258	cring->fences[idx] = fence;
259	cring->sequence++;
260	spin_unlock(&ctx->ring_lock);
261
262	fence_put(other);
263
264	return seq;
265}
266
267struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
268				   struct amdgpu_ring *ring, uint64_t seq)
269{
270	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
271	struct fence *fence;
272
273	spin_lock(&ctx->ring_lock);
274
275	if (seq >= cring->sequence) {
276		spin_unlock(&ctx->ring_lock);
277		return ERR_PTR(-EINVAL);
278	}
279
280
281	if (seq + amdgpu_sched_jobs < cring->sequence) {
282		spin_unlock(&ctx->ring_lock);
283		return NULL;
284	}
285
286	fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
287	spin_unlock(&ctx->ring_lock);
288
289	return fence;
290}
291
292void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
293{
294	mutex_init(&mgr->lock);
295	idr_init(&mgr->ctx_handles);
296}
297
298void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
299{
300	struct amdgpu_ctx *ctx;
301	struct idr *idp;
302	uint32_t id;
303
304	idp = &mgr->ctx_handles;
305
306	idr_for_each_entry(idp, ctx, id) {
307		if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
308			DRM_ERROR("ctx %p is still alive\n", ctx);
309	}
310
311	idr_destroy(&mgr->ctx_handles);
312	mutex_destroy(&mgr->lock);
313}