Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: monk liu <monk.liu@amd.com>
 23 */
 24
 25#include <drm/drm_auth.h>
 
 26#include "amdgpu.h"
 27#include "amdgpu_sched.h"
 28#include "amdgpu_ras.h"
 29#include <linux/nospec.h>
 30
 31#define to_amdgpu_ctx_entity(e)	\
 32	container_of((e), struct amdgpu_ctx_entity, entity)
 33
 34const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
 35	[AMDGPU_HW_IP_GFX]	=	1,
 36	[AMDGPU_HW_IP_COMPUTE]	=	4,
 37	[AMDGPU_HW_IP_DMA]	=	2,
 38	[AMDGPU_HW_IP_UVD]	=	1,
 39	[AMDGPU_HW_IP_VCE]	=	1,
 40	[AMDGPU_HW_IP_UVD_ENC]	=	1,
 41	[AMDGPU_HW_IP_VCN_DEC]	=	1,
 42	[AMDGPU_HW_IP_VCN_ENC]	=	1,
 43	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
 44};
 45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46static int amdgpu_ctx_priority_permit(struct drm_file *filp,
 47				      enum drm_sched_priority priority)
 48{
 49	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
 50		return -EINVAL;
 51
 52	/* NORMAL and below are accessible by everyone */
 53	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
 54		return 0;
 55
 56	if (capable(CAP_SYS_NICE))
 57		return 0;
 58
 59	if (drm_is_current_master(filp))
 60		return 0;
 61
 62	return -EACCES;
 63}
 64
 65static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
 66{
 67	switch (prio) {
 68	case DRM_SCHED_PRIORITY_HIGH_HW:
 69	case DRM_SCHED_PRIORITY_KERNEL:
 70		return AMDGPU_GFX_PIPE_PRIO_HIGH;
 71	default:
 72		return AMDGPU_GFX_PIPE_PRIO_NORMAL;
 73	}
 74}
 75
 76static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
 77						 enum drm_sched_priority prio,
 78						 u32 hw_ip)
 79{
 
 
 
 
 
 
 
 
 
 
 
 
 
 80	unsigned int hw_prio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 81
 82	hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
 83			amdgpu_ctx_sched_prio_to_compute_prio(prio) :
 84			AMDGPU_RING_PRIO_DEFAULT;
 85	hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
 86	if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
 87		hw_prio = AMDGPU_RING_PRIO_DEFAULT;
 88
 89	return hw_prio;
 90}
 91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
 93				   const u32 ring)
 94{
 95	struct amdgpu_device *adev = ctx->adev;
 96	struct amdgpu_ctx_entity *entity;
 97	struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
 98	unsigned num_scheds = 0;
 99	unsigned int hw_prio;
100	enum drm_sched_priority priority;
 
 
101	int r;
102
103	entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
104			 GFP_KERNEL);
105	if (!entity)
106		return  -ENOMEM;
107
 
 
 
108	entity->sequence = 1;
109	priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
110				ctx->init_priority : ctx->override_priority;
111	hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
112
113	hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
114	scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
115	num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
116
117	if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) {
 
 
 
 
118		sched = drm_sched_pick_best(scheds, num_scheds);
119		scheds = &sched;
120		num_scheds = 1;
121	}
122
123	r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
124				  &ctx->guilty);
125	if (r)
126		goto error_free_entity;
127
128	ctx->entities[hw_ip][ring] = entity;
 
 
 
129	return 0;
130
 
 
 
131error_free_entity:
132	kfree(entity);
133
134	return r;
135}
136
137static int amdgpu_ctx_init(struct amdgpu_device *adev,
138			   enum drm_sched_priority priority,
139			   struct drm_file *filp,
140			   struct amdgpu_ctx *ctx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142	int r;
143
144	r = amdgpu_ctx_priority_permit(filp, priority);
145	if (r)
146		return r;
147
148	memset(ctx, 0, sizeof(*ctx));
149
150	ctx->adev = adev;
151
152	kref_init(&ctx->refcount);
 
153	spin_lock_init(&ctx->ring_lock);
154	mutex_init(&ctx->lock);
155
156	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
157	ctx->reset_counter_query = ctx->reset_counter;
158	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
159	ctx->init_priority = priority;
160	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
 
 
 
 
 
 
 
 
 
161
162	return 0;
163}
164
165static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
 
166{
 
 
 
 
167
168	int i;
 
 
 
 
169
170	if (!entity)
171		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
173	for (i = 0; i < amdgpu_sched_jobs; ++i)
174		dma_fence_put(entity->fences[i]);
175
176	kfree(entity);
 
 
 
 
 
 
 
177}
178
179static void amdgpu_ctx_fini(struct kref *ref)
180{
181	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
182	struct amdgpu_device *adev = ctx->adev;
183	unsigned i, j;
 
184
185	if (!adev)
186		return;
187
188	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
189		for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
190			amdgpu_ctx_fini_entity(ctx->entities[i][j]);
191			ctx->entities[i][j] = NULL;
 
 
192		}
193	}
194
195	mutex_destroy(&ctx->lock);
 
 
 
 
196	kfree(ctx);
197}
198
199int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
200			  u32 ring, struct drm_sched_entity **entity)
201{
202	int r;
203
204	if (hw_ip >= AMDGPU_HW_IP_NUM) {
205		DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
206		return -EINVAL;
207	}
208
209	/* Right now all IPs have only one instance - multiple rings. */
210	if (instance != 0) {
211		DRM_DEBUG("invalid ip instance: %d\n", instance);
212		return -EINVAL;
213	}
214
215	if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
216		DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
217		return -EINVAL;
218	}
219
220	if (ctx->entities[hw_ip][ring] == NULL) {
221		r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
222		if (r)
223			return r;
224	}
225
226	*entity = &ctx->entities[hw_ip][ring]->entity;
227	return 0;
228}
229
230static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
231			    struct amdgpu_fpriv *fpriv,
232			    struct drm_file *filp,
233			    enum drm_sched_priority priority,
234			    uint32_t *id)
235{
236	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
237	struct amdgpu_ctx *ctx;
238	int r;
239
240	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
241	if (!ctx)
242		return -ENOMEM;
243
244	mutex_lock(&mgr->lock);
245	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
246	if (r < 0) {
247		mutex_unlock(&mgr->lock);
248		kfree(ctx);
249		return r;
250	}
251
252	*id = (uint32_t)r;
253	r = amdgpu_ctx_init(adev, priority, filp, ctx);
254	if (r) {
255		idr_remove(&mgr->ctx_handles, *id);
256		*id = 0;
257		kfree(ctx);
258	}
259	mutex_unlock(&mgr->lock);
260	return r;
261}
262
263static void amdgpu_ctx_do_release(struct kref *ref)
264{
265	struct amdgpu_ctx *ctx;
266	u32 i, j;
267
268	ctx = container_of(ref, struct amdgpu_ctx, refcount);
269	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
270		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
271			if (!ctx->entities[i][j])
272				continue;
273
274			drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
275		}
276	}
277
278	amdgpu_ctx_fini(ref);
279}
280
281static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
282{
283	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
284	struct amdgpu_ctx *ctx;
285
286	mutex_lock(&mgr->lock);
287	ctx = idr_remove(&mgr->ctx_handles, id);
288	if (ctx)
289		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
290	mutex_unlock(&mgr->lock);
291	return ctx ? 0 : -EINVAL;
292}
293
294static int amdgpu_ctx_query(struct amdgpu_device *adev,
295			    struct amdgpu_fpriv *fpriv, uint32_t id,
296			    union drm_amdgpu_ctx_out *out)
297{
298	struct amdgpu_ctx *ctx;
299	struct amdgpu_ctx_mgr *mgr;
300	unsigned reset_counter;
301
302	if (!fpriv)
303		return -EINVAL;
304
305	mgr = &fpriv->ctx_mgr;
306	mutex_lock(&mgr->lock);
307	ctx = idr_find(&mgr->ctx_handles, id);
308	if (!ctx) {
309		mutex_unlock(&mgr->lock);
310		return -EINVAL;
311	}
312
313	/* TODO: these two are always zero */
314	out->state.flags = 0x0;
315	out->state.hangs = 0x0;
316
317	/* determine if a GPU reset has occured since the last call */
318	reset_counter = atomic_read(&adev->gpu_reset_counter);
319	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
320	if (ctx->reset_counter_query == reset_counter)
321		out->state.reset_status = AMDGPU_CTX_NO_RESET;
322	else
323		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
324	ctx->reset_counter_query = reset_counter;
325
326	mutex_unlock(&mgr->lock);
327	return 0;
328}
329
 
 
330static int amdgpu_ctx_query2(struct amdgpu_device *adev,
331	struct amdgpu_fpriv *fpriv, uint32_t id,
332	union drm_amdgpu_ctx_out *out)
333{
 
334	struct amdgpu_ctx *ctx;
335	struct amdgpu_ctx_mgr *mgr;
336	unsigned long ras_counter;
337
338	if (!fpriv)
339		return -EINVAL;
340
341	mgr = &fpriv->ctx_mgr;
342	mutex_lock(&mgr->lock);
343	ctx = idr_find(&mgr->ctx_handles, id);
344	if (!ctx) {
345		mutex_unlock(&mgr->lock);
346		return -EINVAL;
347	}
348
349	out->state.flags = 0x0;
350	out->state.hangs = 0x0;
351
352	if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
353		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
354
355	if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
356		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
357
358	if (atomic_read(&ctx->guilty))
359		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
360
361	/*query ue count*/
362	ras_counter = amdgpu_ras_query_error_count(adev, false);
363	/*ras counter is monotonic increasing*/
364	if (ras_counter != ctx->ras_counter_ue) {
365		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
366		ctx->ras_counter_ue = ras_counter;
367	}
 
 
 
 
 
 
 
368
369	/*query ce count*/
370	ras_counter = amdgpu_ras_query_error_count(adev, true);
371	if (ras_counter != ctx->ras_counter_ce) {
372		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
373		ctx->ras_counter_ce = ras_counter;
 
 
374	}
375
376	mutex_unlock(&mgr->lock);
377	return 0;
378}
379
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
381		     struct drm_file *filp)
382{
383	int r;
384	uint32_t id;
385	enum drm_sched_priority priority;
386
387	union drm_amdgpu_ctx *args = data;
388	struct amdgpu_device *adev = dev->dev_private;
389	struct amdgpu_fpriv *fpriv = filp->driver_priv;
390
391	r = 0;
392	id = args->in.ctx_id;
393	priority = amdgpu_to_sched_priority(args->in.priority);
394
395	/* For backwards compatibility reasons, we need to accept
396	 * ioctls with garbage in the priority field */
397	if (priority == DRM_SCHED_PRIORITY_INVALID)
398		priority = DRM_SCHED_PRIORITY_NORMAL;
399
400	switch (args->in.op) {
401	case AMDGPU_CTX_OP_ALLOC_CTX:
402		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
403		args->out.alloc.ctx_id = id;
404		break;
405	case AMDGPU_CTX_OP_FREE_CTX:
406		r = amdgpu_ctx_free(fpriv, id);
407		break;
408	case AMDGPU_CTX_OP_QUERY_STATE:
409		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
410		break;
411	case AMDGPU_CTX_OP_QUERY_STATE2:
412		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
413		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
414	default:
415		return -EINVAL;
416	}
417
418	return r;
419}
420
421struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
422{
423	struct amdgpu_ctx *ctx;
424	struct amdgpu_ctx_mgr *mgr;
425
426	if (!fpriv)
427		return NULL;
428
429	mgr = &fpriv->ctx_mgr;
430
431	mutex_lock(&mgr->lock);
432	ctx = idr_find(&mgr->ctx_handles, id);
433	if (ctx)
434		kref_get(&ctx->refcount);
435	mutex_unlock(&mgr->lock);
436	return ctx;
437}
438
439int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
440{
441	if (ctx == NULL)
442		return -EINVAL;
443
444	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
445	return 0;
446}
447
448void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
449			  struct drm_sched_entity *entity,
450			  struct dma_fence *fence, uint64_t* handle)
451{
452	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
453	uint64_t seq = centity->sequence;
454	struct dma_fence *other = NULL;
455	unsigned idx = 0;
456
457	idx = seq & (amdgpu_sched_jobs - 1);
458	other = centity->fences[idx];
459	if (other)
460		BUG_ON(!dma_fence_is_signaled(other));
461
462	dma_fence_get(fence);
463
464	spin_lock(&ctx->ring_lock);
465	centity->fences[idx] = fence;
466	centity->sequence++;
467	spin_unlock(&ctx->ring_lock);
468
 
 
 
469	dma_fence_put(other);
470	if (handle)
471		*handle = seq;
472}
473
474struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
475				       struct drm_sched_entity *entity,
476				       uint64_t seq)
477{
478	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
479	struct dma_fence *fence;
480
481	spin_lock(&ctx->ring_lock);
482
483	if (seq == ~0ull)
484		seq = centity->sequence - 1;
485
486	if (seq >= centity->sequence) {
487		spin_unlock(&ctx->ring_lock);
488		return ERR_PTR(-EINVAL);
489	}
490
491
492	if (seq + amdgpu_sched_jobs < centity->sequence) {
493		spin_unlock(&ctx->ring_lock);
494		return NULL;
495	}
496
497	fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
498	spin_unlock(&ctx->ring_lock);
499
500	return fence;
501}
502
503static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
504					    struct amdgpu_ctx_entity *aentity,
505					    int hw_ip,
506					    enum drm_sched_priority priority)
507{
508	struct amdgpu_device *adev = ctx->adev;
509	unsigned int hw_prio;
510	struct drm_gpu_scheduler **scheds = NULL;
511	unsigned num_scheds;
512
513	/* set sw priority */
514	drm_sched_entity_set_priority(&aentity->entity, priority);
 
515
516	/* set hw priority */
517	if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
518		hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority,
519						      AMDGPU_HW_IP_COMPUTE);
520		hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
521		scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
522		num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
523		drm_sched_entity_modify_sched(&aentity->entity, scheds,
524					      num_scheds);
525	}
526}
527
528void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
529				  enum drm_sched_priority priority)
530{
531	enum drm_sched_priority ctx_prio;
532	unsigned i, j;
533
534	ctx->override_priority = priority;
535
536	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
537			ctx->init_priority : ctx->override_priority;
538	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
539		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
540			if (!ctx->entities[i][j])
541				continue;
542
543			amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
544						       i, ctx_prio);
545		}
546	}
547}
548
549int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
550			       struct drm_sched_entity *entity)
551{
552	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
553	struct dma_fence *other;
554	unsigned idx;
555	long r;
556
557	spin_lock(&ctx->ring_lock);
558	idx = centity->sequence & (amdgpu_sched_jobs - 1);
559	other = dma_fence_get(centity->fences[idx]);
560	spin_unlock(&ctx->ring_lock);
561
562	if (!other)
563		return 0;
564
565	r = dma_fence_wait(other, true);
566	if (r < 0 && r != -ERESTARTSYS)
567		DRM_ERROR("Error (%ld) waiting for fence!\n", r);
568
569	dma_fence_put(other);
570	return r;
571}
572
573void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
 
574{
 
 
 
575	mutex_init(&mgr->lock);
576	idr_init(&mgr->ctx_handles);
 
 
 
577}
578
579long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
580{
581	struct amdgpu_ctx *ctx;
582	struct idr *idp;
583	uint32_t id, i, j;
584
585	idp = &mgr->ctx_handles;
586
587	mutex_lock(&mgr->lock);
588	idr_for_each_entry(idp, ctx, id) {
589		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
590			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
591				struct drm_sched_entity *entity;
592
593				if (!ctx->entities[i][j])
594					continue;
595
596				entity = &ctx->entities[i][j]->entity;
597				timeout = drm_sched_entity_flush(entity, timeout);
598			}
599		}
600	}
601	mutex_unlock(&mgr->lock);
602	return timeout;
603}
604
605void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
606{
607	struct amdgpu_ctx *ctx;
608	struct idr *idp;
609	uint32_t id, i, j;
610
611	idp = &mgr->ctx_handles;
612
613	idr_for_each_entry(idp, ctx, id) {
614		if (kref_read(&ctx->refcount) != 1) {
615			DRM_ERROR("ctx %p is still alive\n", ctx);
616			continue;
617		}
618
619		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
620			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
621				struct drm_sched_entity *entity;
622
623				if (!ctx->entities[i][j])
624					continue;
625
626				entity = &ctx->entities[i][j]->entity;
627				drm_sched_entity_fini(entity);
628			}
629		}
630	}
631}
632
633void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
634{
635	struct amdgpu_ctx *ctx;
636	struct idr *idp;
637	uint32_t id;
638
639	amdgpu_ctx_mgr_entity_fini(mgr);
640
641	idp = &mgr->ctx_handles;
642
643	idr_for_each_entry(idp, ctx, id) {
644		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
645			DRM_ERROR("ctx %p is still alive\n", ctx);
646	}
647
648	idr_destroy(&mgr->ctx_handles);
649	mutex_destroy(&mgr->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
650}
v6.2
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: monk liu <monk.liu@amd.com>
 23 */
 24
 25#include <drm/drm_auth.h>
 26#include <drm/drm_drv.h>
 27#include "amdgpu.h"
 28#include "amdgpu_sched.h"
 29#include "amdgpu_ras.h"
 30#include <linux/nospec.h>
 31
 32#define to_amdgpu_ctx_entity(e)	\
 33	container_of((e), struct amdgpu_ctx_entity, entity)
 34
 35const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
 36	[AMDGPU_HW_IP_GFX]	=	1,
 37	[AMDGPU_HW_IP_COMPUTE]	=	4,
 38	[AMDGPU_HW_IP_DMA]	=	2,
 39	[AMDGPU_HW_IP_UVD]	=	1,
 40	[AMDGPU_HW_IP_VCE]	=	1,
 41	[AMDGPU_HW_IP_UVD_ENC]	=	1,
 42	[AMDGPU_HW_IP_VCN_DEC]	=	1,
 43	[AMDGPU_HW_IP_VCN_ENC]	=	1,
 44	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
 45};
 46
 47bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
 48{
 49	switch (ctx_prio) {
 50	case AMDGPU_CTX_PRIORITY_UNSET:
 51	case AMDGPU_CTX_PRIORITY_VERY_LOW:
 52	case AMDGPU_CTX_PRIORITY_LOW:
 53	case AMDGPU_CTX_PRIORITY_NORMAL:
 54	case AMDGPU_CTX_PRIORITY_HIGH:
 55	case AMDGPU_CTX_PRIORITY_VERY_HIGH:
 56		return true;
 57	default:
 58		return false;
 59	}
 60}
 61
 62static enum drm_sched_priority
 63amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
 64{
 65	switch (ctx_prio) {
 66	case AMDGPU_CTX_PRIORITY_UNSET:
 67		return DRM_SCHED_PRIORITY_UNSET;
 68
 69	case AMDGPU_CTX_PRIORITY_VERY_LOW:
 70		return DRM_SCHED_PRIORITY_MIN;
 71
 72	case AMDGPU_CTX_PRIORITY_LOW:
 73		return DRM_SCHED_PRIORITY_MIN;
 74
 75	case AMDGPU_CTX_PRIORITY_NORMAL:
 76		return DRM_SCHED_PRIORITY_NORMAL;
 77
 78	case AMDGPU_CTX_PRIORITY_HIGH:
 79		return DRM_SCHED_PRIORITY_HIGH;
 80
 81	case AMDGPU_CTX_PRIORITY_VERY_HIGH:
 82		return DRM_SCHED_PRIORITY_HIGH;
 83
 84	/* This should not happen as we sanitized userspace provided priority
 85	 * already, WARN if this happens.
 86	 */
 87	default:
 88		WARN(1, "Invalid context priority %d\n", ctx_prio);
 89		return DRM_SCHED_PRIORITY_NORMAL;
 90	}
 91
 92}
 93
 94static int amdgpu_ctx_priority_permit(struct drm_file *filp,
 95				      int32_t priority)
 96{
 97	if (!amdgpu_ctx_priority_is_valid(priority))
 98		return -EINVAL;
 99
100	/* NORMAL and below are accessible by everyone */
101	if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
102		return 0;
103
104	if (capable(CAP_SYS_NICE))
105		return 0;
106
107	if (drm_is_current_master(filp))
108		return 0;
109
110	return -EACCES;
111}
112
113static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_gfx_pipe_prio(int32_t prio)
114{
115	switch (prio) {
116	case AMDGPU_CTX_PRIORITY_HIGH:
117	case AMDGPU_CTX_PRIORITY_VERY_HIGH:
118		return AMDGPU_GFX_PIPE_PRIO_HIGH;
119	default:
120		return AMDGPU_GFX_PIPE_PRIO_NORMAL;
121	}
122}
123
124static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)
 
 
125{
126	switch (prio) {
127	case AMDGPU_CTX_PRIORITY_HIGH:
128		return AMDGPU_RING_PRIO_1;
129	case AMDGPU_CTX_PRIORITY_VERY_HIGH:
130		return AMDGPU_RING_PRIO_2;
131	default:
132		return AMDGPU_RING_PRIO_0;
133	}
134}
135
136static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
137{
138	struct amdgpu_device *adev = ctx->mgr->adev;
139	unsigned int hw_prio;
140	int32_t ctx_prio;
141
142	ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
143			ctx->init_priority : ctx->override_priority;
144
145	switch (hw_ip) {
146	case AMDGPU_HW_IP_GFX:
147	case AMDGPU_HW_IP_COMPUTE:
148		hw_prio = amdgpu_ctx_prio_to_gfx_pipe_prio(ctx_prio);
149		break;
150	case AMDGPU_HW_IP_VCE:
151	case AMDGPU_HW_IP_VCN_ENC:
152		hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);
153		break;
154	default:
155		hw_prio = AMDGPU_RING_PRIO_DEFAULT;
156		break;
157	}
158
 
 
 
159	hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
160	if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
161		hw_prio = AMDGPU_RING_PRIO_DEFAULT;
162
163	return hw_prio;
164}
165
166/* Calculate the time spend on the hw */
167static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)
168{
169	struct drm_sched_fence *s_fence;
170
171	if (!fence)
172		return ns_to_ktime(0);
173
174	/* When the fence is not even scheduled it can't have spend time */
175	s_fence = to_drm_sched_fence(fence);
176	if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))
177		return ns_to_ktime(0);
178
179	/* When it is still running account how much already spend */
180	if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags))
181		return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
182
183	return ktime_sub(s_fence->finished.timestamp,
184			 s_fence->scheduled.timestamp);
185}
186
187static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx,
188				      struct amdgpu_ctx_entity *centity)
189{
190	ktime_t res = ns_to_ktime(0);
191	uint32_t i;
192
193	spin_lock(&ctx->ring_lock);
194	for (i = 0; i < amdgpu_sched_jobs; i++) {
195		res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));
196	}
197	spin_unlock(&ctx->ring_lock);
198	return res;
199}
200
201static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
202				  const u32 ring)
203{
 
 
204	struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
205	struct amdgpu_device *adev = ctx->mgr->adev;
206	struct amdgpu_ctx_entity *entity;
207	enum drm_sched_priority drm_prio;
208	unsigned int hw_prio, num_scheds;
209	int32_t ctx_prio;
210	int r;
211
212	entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
213			 GFP_KERNEL);
214	if (!entity)
215		return  -ENOMEM;
216
217	ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
218			ctx->init_priority : ctx->override_priority;
219	entity->hw_ip = hw_ip;
220	entity->sequence = 1;
221	hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
222	drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
 
223
224	hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
225	scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
226	num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
227
228	/* disable load balance if the hw engine retains context among dependent jobs */
229	if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
230	    hw_ip == AMDGPU_HW_IP_VCN_DEC ||
231	    hw_ip == AMDGPU_HW_IP_UVD_ENC ||
232	    hw_ip == AMDGPU_HW_IP_UVD) {
233		sched = drm_sched_pick_best(scheds, num_scheds);
234		scheds = &sched;
235		num_scheds = 1;
236	}
237
238	r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,
239				  &ctx->guilty);
240	if (r)
241		goto error_free_entity;
242
243	/* It's not an error if we fail to install the new entity */
244	if (cmpxchg(&ctx->entities[hw_ip][ring], NULL, entity))
245		goto cleanup_entity;
246
247	return 0;
248
249cleanup_entity:
250	drm_sched_entity_fini(&entity->entity);
251
252error_free_entity:
253	kfree(entity);
254
255	return r;
256}
257
258static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
259{
260	ktime_t res = ns_to_ktime(0);
261	int i;
262
263	if (!entity)
264		return res;
265
266	for (i = 0; i < amdgpu_sched_jobs; ++i) {
267		res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i]));
268		dma_fence_put(entity->fences[i]);
269	}
270
271	kfree(entity);
272	return res;
273}
274
275static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
276					u32 *stable_pstate)
277{
278	struct amdgpu_device *adev = ctx->mgr->adev;
279	enum amd_dpm_forced_level current_level;
280
281	current_level = amdgpu_dpm_get_performance_level(adev);
282
283	switch (current_level) {
284	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
285		*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD;
286		break;
287	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
288		*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
289		break;
290	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
291		*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
292		break;
293	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
294		*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK;
295		break;
296	default:
297		*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
298		break;
299	}
300	return 0;
301}
302
303static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
304			   struct drm_file *filp, struct amdgpu_ctx *ctx)
305{
306	u32 current_stable_pstate;
307	int r;
308
309	r = amdgpu_ctx_priority_permit(filp, priority);
310	if (r)
311		return r;
312
313	memset(ctx, 0, sizeof(*ctx));
314
 
 
315	kref_init(&ctx->refcount);
316	ctx->mgr = mgr;
317	spin_lock_init(&ctx->ring_lock);
 
318
319	ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
320	ctx->reset_counter_query = ctx->reset_counter;
321	ctx->vram_lost_counter = atomic_read(&mgr->adev->vram_lost_counter);
322	ctx->init_priority = priority;
323	ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
324
325	r = amdgpu_ctx_get_stable_pstate(ctx, &current_stable_pstate);
326	if (r)
327		return r;
328
329	if (mgr->adev->pm.stable_pstate_ctx)
330		ctx->stable_pstate = mgr->adev->pm.stable_pstate_ctx->stable_pstate;
331	else
332		ctx->stable_pstate = current_stable_pstate;
333
334	return 0;
335}
336
337static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
338					u32 stable_pstate)
339{
340	struct amdgpu_device *adev = ctx->mgr->adev;
341	enum amd_dpm_forced_level level;
342	u32 current_stable_pstate;
343	int r;
344
345	mutex_lock(&adev->pm.stable_pstate_ctx_lock);
346	if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) {
347		r = -EBUSY;
348		goto done;
349	}
350
351	r = amdgpu_ctx_get_stable_pstate(ctx, &current_stable_pstate);
352	if (r || (stable_pstate == current_stable_pstate))
353		goto done;
354
355	switch (stable_pstate) {
356	case AMDGPU_CTX_STABLE_PSTATE_NONE:
357		level = AMD_DPM_FORCED_LEVEL_AUTO;
358		break;
359	case AMDGPU_CTX_STABLE_PSTATE_STANDARD:
360		level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
361		break;
362	case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK:
363		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
364		break;
365	case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK:
366		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
367		break;
368	case AMDGPU_CTX_STABLE_PSTATE_PEAK:
369		level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
370		break;
371	default:
372		r = -EINVAL;
373		goto done;
374	}
375
376	r = amdgpu_dpm_force_performance_level(adev, level);
 
377
378	if (level == AMD_DPM_FORCED_LEVEL_AUTO)
379		adev->pm.stable_pstate_ctx = NULL;
380	else
381		adev->pm.stable_pstate_ctx = ctx;
382done:
383	mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
384
385	return r;
386}
387
388static void amdgpu_ctx_fini(struct kref *ref)
389{
390	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
391	struct amdgpu_ctx_mgr *mgr = ctx->mgr;
392	struct amdgpu_device *adev = mgr->adev;
393	unsigned i, j, idx;
394
395	if (!adev)
396		return;
397
398	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
399		for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
400			ktime_t spend;
401
402			spend = amdgpu_ctx_fini_entity(ctx->entities[i][j]);
403			atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]);
404		}
405	}
406
407	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
408		amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate);
409		drm_dev_exit(idx);
410	}
411
412	kfree(ctx);
413}
414
415int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
416			  u32 ring, struct drm_sched_entity **entity)
417{
418	int r;
419
420	if (hw_ip >= AMDGPU_HW_IP_NUM) {
421		DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
422		return -EINVAL;
423	}
424
425	/* Right now all IPs have only one instance - multiple rings. */
426	if (instance != 0) {
427		DRM_DEBUG("invalid ip instance: %d\n", instance);
428		return -EINVAL;
429	}
430
431	if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
432		DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
433		return -EINVAL;
434	}
435
436	if (ctx->entities[hw_ip][ring] == NULL) {
437		r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
438		if (r)
439			return r;
440	}
441
442	*entity = &ctx->entities[hw_ip][ring]->entity;
443	return 0;
444}
445
446static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
447			    struct amdgpu_fpriv *fpriv,
448			    struct drm_file *filp,
449			    int32_t priority,
450			    uint32_t *id)
451{
452	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
453	struct amdgpu_ctx *ctx;
454	int r;
455
456	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
457	if (!ctx)
458		return -ENOMEM;
459
460	mutex_lock(&mgr->lock);
461	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
462	if (r < 0) {
463		mutex_unlock(&mgr->lock);
464		kfree(ctx);
465		return r;
466	}
467
468	*id = (uint32_t)r;
469	r = amdgpu_ctx_init(mgr, priority, filp, ctx);
470	if (r) {
471		idr_remove(&mgr->ctx_handles, *id);
472		*id = 0;
473		kfree(ctx);
474	}
475	mutex_unlock(&mgr->lock);
476	return r;
477}
478
479static void amdgpu_ctx_do_release(struct kref *ref)
480{
481	struct amdgpu_ctx *ctx;
482	u32 i, j;
483
484	ctx = container_of(ref, struct amdgpu_ctx, refcount);
485	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
486		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
487			if (!ctx->entities[i][j])
488				continue;
489
490			drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
491		}
492	}
493
494	amdgpu_ctx_fini(ref);
495}
496
497static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
498{
499	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
500	struct amdgpu_ctx *ctx;
501
502	mutex_lock(&mgr->lock);
503	ctx = idr_remove(&mgr->ctx_handles, id);
504	if (ctx)
505		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
506	mutex_unlock(&mgr->lock);
507	return ctx ? 0 : -EINVAL;
508}
509
510static int amdgpu_ctx_query(struct amdgpu_device *adev,
511			    struct amdgpu_fpriv *fpriv, uint32_t id,
512			    union drm_amdgpu_ctx_out *out)
513{
514	struct amdgpu_ctx *ctx;
515	struct amdgpu_ctx_mgr *mgr;
516	unsigned reset_counter;
517
518	if (!fpriv)
519		return -EINVAL;
520
521	mgr = &fpriv->ctx_mgr;
522	mutex_lock(&mgr->lock);
523	ctx = idr_find(&mgr->ctx_handles, id);
524	if (!ctx) {
525		mutex_unlock(&mgr->lock);
526		return -EINVAL;
527	}
528
529	/* TODO: these two are always zero */
530	out->state.flags = 0x0;
531	out->state.hangs = 0x0;
532
533	/* determine if a GPU reset has occured since the last call */
534	reset_counter = atomic_read(&adev->gpu_reset_counter);
535	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
536	if (ctx->reset_counter_query == reset_counter)
537		out->state.reset_status = AMDGPU_CTX_NO_RESET;
538	else
539		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
540	ctx->reset_counter_query = reset_counter;
541
542	mutex_unlock(&mgr->lock);
543	return 0;
544}
545
546#define AMDGPU_RAS_COUNTE_DELAY_MS 3000
547
548static int amdgpu_ctx_query2(struct amdgpu_device *adev,
549			     struct amdgpu_fpriv *fpriv, uint32_t id,
550			     union drm_amdgpu_ctx_out *out)
551{
552	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
553	struct amdgpu_ctx *ctx;
554	struct amdgpu_ctx_mgr *mgr;
 
555
556	if (!fpriv)
557		return -EINVAL;
558
559	mgr = &fpriv->ctx_mgr;
560	mutex_lock(&mgr->lock);
561	ctx = idr_find(&mgr->ctx_handles, id);
562	if (!ctx) {
563		mutex_unlock(&mgr->lock);
564		return -EINVAL;
565	}
566
567	out->state.flags = 0x0;
568	out->state.hangs = 0x0;
569
570	if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
571		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
572
573	if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
574		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
575
576	if (atomic_read(&ctx->guilty))
577		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
578
579	if (adev->ras_enabled && con) {
580		/* Return the cached values in O(1),
581		 * and schedule delayed work to cache
582		 * new vaues.
583		 */
584		int ce_count, ue_count;
585
586		ce_count = atomic_read(&con->ras_ce_count);
587		ue_count = atomic_read(&con->ras_ue_count);
588
589		if (ce_count != ctx->ras_counter_ce) {
590			ctx->ras_counter_ce = ce_count;
591			out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
592		}
593
594		if (ue_count != ctx->ras_counter_ue) {
595			ctx->ras_counter_ue = ue_count;
596			out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
597		}
598
599		schedule_delayed_work(&con->ras_counte_delay_work,
600				      msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS));
601	}
602
603	mutex_unlock(&mgr->lock);
604	return 0;
605}
606
607
608
609static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
610				    struct amdgpu_fpriv *fpriv, uint32_t id,
611				    bool set, u32 *stable_pstate)
612{
613	struct amdgpu_ctx *ctx;
614	struct amdgpu_ctx_mgr *mgr;
615	int r;
616
617	if (!fpriv)
618		return -EINVAL;
619
620	mgr = &fpriv->ctx_mgr;
621	mutex_lock(&mgr->lock);
622	ctx = idr_find(&mgr->ctx_handles, id);
623	if (!ctx) {
624		mutex_unlock(&mgr->lock);
625		return -EINVAL;
626	}
627
628	if (set)
629		r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate);
630	else
631		r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate);
632
633	mutex_unlock(&mgr->lock);
634	return r;
635}
636
637int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
638		     struct drm_file *filp)
639{
640	int r;
641	uint32_t id, stable_pstate;
642	int32_t priority;
643
644	union drm_amdgpu_ctx *args = data;
645	struct amdgpu_device *adev = drm_to_adev(dev);
646	struct amdgpu_fpriv *fpriv = filp->driver_priv;
647
 
648	id = args->in.ctx_id;
649	priority = args->in.priority;
650
651	/* For backwards compatibility reasons, we need to accept
652	 * ioctls with garbage in the priority field */
653	if (!amdgpu_ctx_priority_is_valid(priority))
654		priority = AMDGPU_CTX_PRIORITY_NORMAL;
655
656	switch (args->in.op) {
657	case AMDGPU_CTX_OP_ALLOC_CTX:
658		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
659		args->out.alloc.ctx_id = id;
660		break;
661	case AMDGPU_CTX_OP_FREE_CTX:
662		r = amdgpu_ctx_free(fpriv, id);
663		break;
664	case AMDGPU_CTX_OP_QUERY_STATE:
665		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
666		break;
667	case AMDGPU_CTX_OP_QUERY_STATE2:
668		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
669		break;
670	case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
671		if (args->in.flags)
672			return -EINVAL;
673		r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate);
674		if (!r)
675			args->out.pstate.flags = stable_pstate;
676		break;
677	case AMDGPU_CTX_OP_SET_STABLE_PSTATE:
678		if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK)
679			return -EINVAL;
680		stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK;
681		if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK)
682			return -EINVAL;
683		r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate);
684		break;
685	default:
686		return -EINVAL;
687	}
688
689	return r;
690}
691
692struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
693{
694	struct amdgpu_ctx *ctx;
695	struct amdgpu_ctx_mgr *mgr;
696
697	if (!fpriv)
698		return NULL;
699
700	mgr = &fpriv->ctx_mgr;
701
702	mutex_lock(&mgr->lock);
703	ctx = idr_find(&mgr->ctx_handles, id);
704	if (ctx)
705		kref_get(&ctx->refcount);
706	mutex_unlock(&mgr->lock);
707	return ctx;
708}
709
710int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
711{
712	if (ctx == NULL)
713		return -EINVAL;
714
715	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
716	return 0;
717}
718
719uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
720			      struct drm_sched_entity *entity,
721			      struct dma_fence *fence)
722{
723	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
724	uint64_t seq = centity->sequence;
725	struct dma_fence *other = NULL;
726	unsigned idx = 0;
727
728	idx = seq & (amdgpu_sched_jobs - 1);
729	other = centity->fences[idx];
730	WARN_ON(other && !dma_fence_is_signaled(other));
 
731
732	dma_fence_get(fence);
733
734	spin_lock(&ctx->ring_lock);
735	centity->fences[idx] = fence;
736	centity->sequence++;
737	spin_unlock(&ctx->ring_lock);
738
739	atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)),
740		     &ctx->mgr->time_spend[centity->hw_ip]);
741
742	dma_fence_put(other);
743	return seq;
 
744}
745
746struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
747				       struct drm_sched_entity *entity,
748				       uint64_t seq)
749{
750	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
751	struct dma_fence *fence;
752
753	spin_lock(&ctx->ring_lock);
754
755	if (seq == ~0ull)
756		seq = centity->sequence - 1;
757
758	if (seq >= centity->sequence) {
759		spin_unlock(&ctx->ring_lock);
760		return ERR_PTR(-EINVAL);
761	}
762
763
764	if (seq + amdgpu_sched_jobs < centity->sequence) {
765		spin_unlock(&ctx->ring_lock);
766		return NULL;
767	}
768
769	fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
770	spin_unlock(&ctx->ring_lock);
771
772	return fence;
773}
774
775static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
776					   struct amdgpu_ctx_entity *aentity,
777					   int hw_ip,
778					   int32_t priority)
779{
780	struct amdgpu_device *adev = ctx->mgr->adev;
781	unsigned int hw_prio;
782	struct drm_gpu_scheduler **scheds = NULL;
783	unsigned num_scheds;
784
785	/* set sw priority */
786	drm_sched_entity_set_priority(&aentity->entity,
787				      amdgpu_ctx_to_drm_sched_prio(priority));
788
789	/* set hw priority */
790	if (hw_ip == AMDGPU_HW_IP_COMPUTE || hw_ip == AMDGPU_HW_IP_GFX) {
791		hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
 
792		hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
793		scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
794		num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
795		drm_sched_entity_modify_sched(&aentity->entity, scheds,
796					      num_scheds);
797	}
798}
799
800void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
801				  int32_t priority)
802{
803	int32_t ctx_prio;
804	unsigned i, j;
805
806	ctx->override_priority = priority;
807
808	ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
809			ctx->init_priority : ctx->override_priority;
810	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
811		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
812			if (!ctx->entities[i][j])
813				continue;
814
815			amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
816						       i, ctx_prio);
817		}
818	}
819}
820
821int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
822			       struct drm_sched_entity *entity)
823{
824	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
825	struct dma_fence *other;
826	unsigned idx;
827	long r;
828
829	spin_lock(&ctx->ring_lock);
830	idx = centity->sequence & (amdgpu_sched_jobs - 1);
831	other = dma_fence_get(centity->fences[idx]);
832	spin_unlock(&ctx->ring_lock);
833
834	if (!other)
835		return 0;
836
837	r = dma_fence_wait(other, true);
838	if (r < 0 && r != -ERESTARTSYS)
839		DRM_ERROR("Error (%ld) waiting for fence!\n", r);
840
841	dma_fence_put(other);
842	return r;
843}
844
845void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
846			 struct amdgpu_device *adev)
847{
848	unsigned int i;
849
850	mgr->adev = adev;
851	mutex_init(&mgr->lock);
852	idr_init_base(&mgr->ctx_handles, 1);
853
854	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
855		atomic64_set(&mgr->time_spend[i], 0);
856}
857
858long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
859{
860	struct amdgpu_ctx *ctx;
861	struct idr *idp;
862	uint32_t id, i, j;
863
864	idp = &mgr->ctx_handles;
865
866	mutex_lock(&mgr->lock);
867	idr_for_each_entry(idp, ctx, id) {
868		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
869			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
870				struct drm_sched_entity *entity;
871
872				if (!ctx->entities[i][j])
873					continue;
874
875				entity = &ctx->entities[i][j]->entity;
876				timeout = drm_sched_entity_flush(entity, timeout);
877			}
878		}
879	}
880	mutex_unlock(&mgr->lock);
881	return timeout;
882}
883
884void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
885{
886	struct amdgpu_ctx *ctx;
887	struct idr *idp;
888	uint32_t id, i, j;
889
890	idp = &mgr->ctx_handles;
891
892	idr_for_each_entry(idp, ctx, id) {
893		if (kref_read(&ctx->refcount) != 1) {
894			DRM_ERROR("ctx %p is still alive\n", ctx);
895			continue;
896		}
897
898		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
899			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
900				struct drm_sched_entity *entity;
901
902				if (!ctx->entities[i][j])
903					continue;
904
905				entity = &ctx->entities[i][j]->entity;
906				drm_sched_entity_fini(entity);
907			}
908		}
909	}
910}
911
912void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
913{
914	struct amdgpu_ctx *ctx;
915	struct idr *idp;
916	uint32_t id;
917
918	amdgpu_ctx_mgr_entity_fini(mgr);
919
920	idp = &mgr->ctx_handles;
921
922	idr_for_each_entry(idp, ctx, id) {
923		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
924			DRM_ERROR("ctx %p is still alive\n", ctx);
925	}
926
927	idr_destroy(&mgr->ctx_handles);
928	mutex_destroy(&mgr->lock);
929}
930
931void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
932			  ktime_t usage[AMDGPU_HW_IP_NUM])
933{
934	struct amdgpu_ctx *ctx;
935	unsigned int hw_ip, i;
936	uint32_t id;
937
938	/*
939	 * This is a little bit racy because it can be that a ctx or a fence are
940	 * destroyed just in the moment we try to account them. But that is ok
941	 * since exactly that case is explicitely allowed by the interface.
942	 */
943	mutex_lock(&mgr->lock);
944	for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
945		uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]);
946
947		usage[hw_ip] = ns_to_ktime(ns);
948	}
949
950	idr_for_each_entry(&mgr->ctx_handles, ctx, id) {
951		for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
952			for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) {
953				struct amdgpu_ctx_entity *centity;
954				ktime_t spend;
955
956				centity = ctx->entities[hw_ip][i];
957				if (!centity)
958					continue;
959				spend = amdgpu_ctx_entity_time(ctx, centity);
960				usage[hw_ip] = ktime_add(usage[hw_ip], spend);
961			}
962		}
963	}
964	mutex_unlock(&mgr->lock);
965}