Loading...
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
25#include <drm/drm_auth.h>
26#include <drm/drm_drv.h>
27#include "amdgpu.h"
28#include "amdgpu_sched.h"
29#include "amdgpu_ras.h"
30#include <linux/nospec.h>
31
32#define to_amdgpu_ctx_entity(e) \
33 container_of((e), struct amdgpu_ctx_entity, entity)
34
35const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
36 [AMDGPU_HW_IP_GFX] = 1,
37 [AMDGPU_HW_IP_COMPUTE] = 4,
38 [AMDGPU_HW_IP_DMA] = 2,
39 [AMDGPU_HW_IP_UVD] = 1,
40 [AMDGPU_HW_IP_VCE] = 1,
41 [AMDGPU_HW_IP_UVD_ENC] = 1,
42 [AMDGPU_HW_IP_VCN_DEC] = 1,
43 [AMDGPU_HW_IP_VCN_ENC] = 1,
44 [AMDGPU_HW_IP_VCN_JPEG] = 1,
45};
46
47bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
48{
49 switch (ctx_prio) {
50 case AMDGPU_CTX_PRIORITY_UNSET:
51 case AMDGPU_CTX_PRIORITY_VERY_LOW:
52 case AMDGPU_CTX_PRIORITY_LOW:
53 case AMDGPU_CTX_PRIORITY_NORMAL:
54 case AMDGPU_CTX_PRIORITY_HIGH:
55 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
56 return true;
57 default:
58 return false;
59 }
60}
61
62static enum drm_sched_priority
63amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
64{
65 switch (ctx_prio) {
66 case AMDGPU_CTX_PRIORITY_UNSET:
67 return DRM_SCHED_PRIORITY_UNSET;
68
69 case AMDGPU_CTX_PRIORITY_VERY_LOW:
70 return DRM_SCHED_PRIORITY_MIN;
71
72 case AMDGPU_CTX_PRIORITY_LOW:
73 return DRM_SCHED_PRIORITY_MIN;
74
75 case AMDGPU_CTX_PRIORITY_NORMAL:
76 return DRM_SCHED_PRIORITY_NORMAL;
77
78 case AMDGPU_CTX_PRIORITY_HIGH:
79 return DRM_SCHED_PRIORITY_HIGH;
80
81 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
82 return DRM_SCHED_PRIORITY_HIGH;
83
84 /* This should not happen as we sanitized userspace provided priority
85 * already, WARN if this happens.
86 */
87 default:
88 WARN(1, "Invalid context priority %d\n", ctx_prio);
89 return DRM_SCHED_PRIORITY_NORMAL;
90 }
91
92}
93
94static int amdgpu_ctx_priority_permit(struct drm_file *filp,
95 int32_t priority)
96{
97 if (!amdgpu_ctx_priority_is_valid(priority))
98 return -EINVAL;
99
100 /* NORMAL and below are accessible by everyone */
101 if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
102 return 0;
103
104 if (capable(CAP_SYS_NICE))
105 return 0;
106
107 if (drm_is_current_master(filp))
108 return 0;
109
110 return -EACCES;
111}
112
113static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_gfx_pipe_prio(int32_t prio)
114{
115 switch (prio) {
116 case AMDGPU_CTX_PRIORITY_HIGH:
117 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
118 return AMDGPU_GFX_PIPE_PRIO_HIGH;
119 default:
120 return AMDGPU_GFX_PIPE_PRIO_NORMAL;
121 }
122}
123
124static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)
125{
126 switch (prio) {
127 case AMDGPU_CTX_PRIORITY_HIGH:
128 return AMDGPU_RING_PRIO_1;
129 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
130 return AMDGPU_RING_PRIO_2;
131 default:
132 return AMDGPU_RING_PRIO_0;
133 }
134}
135
136static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
137{
138 struct amdgpu_device *adev = ctx->mgr->adev;
139 unsigned int hw_prio;
140 int32_t ctx_prio;
141
142 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
143 ctx->init_priority : ctx->override_priority;
144
145 switch (hw_ip) {
146 case AMDGPU_HW_IP_GFX:
147 case AMDGPU_HW_IP_COMPUTE:
148 hw_prio = amdgpu_ctx_prio_to_gfx_pipe_prio(ctx_prio);
149 break;
150 case AMDGPU_HW_IP_VCE:
151 case AMDGPU_HW_IP_VCN_ENC:
152 hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);
153 break;
154 default:
155 hw_prio = AMDGPU_RING_PRIO_DEFAULT;
156 break;
157 }
158
159 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
160 if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
161 hw_prio = AMDGPU_RING_PRIO_DEFAULT;
162
163 return hw_prio;
164}
165
166/* Calculate the time spend on the hw */
167static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)
168{
169 struct drm_sched_fence *s_fence;
170
171 if (!fence)
172 return ns_to_ktime(0);
173
174 /* When the fence is not even scheduled it can't have spend time */
175 s_fence = to_drm_sched_fence(fence);
176 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))
177 return ns_to_ktime(0);
178
179 /* When it is still running account how much already spend */
180 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags))
181 return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
182
183 return ktime_sub(s_fence->finished.timestamp,
184 s_fence->scheduled.timestamp);
185}
186
187static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx,
188 struct amdgpu_ctx_entity *centity)
189{
190 ktime_t res = ns_to_ktime(0);
191 uint32_t i;
192
193 spin_lock(&ctx->ring_lock);
194 for (i = 0; i < amdgpu_sched_jobs; i++) {
195 res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));
196 }
197 spin_unlock(&ctx->ring_lock);
198 return res;
199}
200
201static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
202 const u32 ring)
203{
204 struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
205 struct amdgpu_device *adev = ctx->mgr->adev;
206 struct amdgpu_ctx_entity *entity;
207 enum drm_sched_priority drm_prio;
208 unsigned int hw_prio, num_scheds;
209 int32_t ctx_prio;
210 int r;
211
212 entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
213 GFP_KERNEL);
214 if (!entity)
215 return -ENOMEM;
216
217 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
218 ctx->init_priority : ctx->override_priority;
219 entity->hw_ip = hw_ip;
220 entity->sequence = 1;
221 hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
222 drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
223
224 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
225 scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
226 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
227
228 /* disable load balance if the hw engine retains context among dependent jobs */
229 if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
230 hw_ip == AMDGPU_HW_IP_VCN_DEC ||
231 hw_ip == AMDGPU_HW_IP_UVD_ENC ||
232 hw_ip == AMDGPU_HW_IP_UVD) {
233 sched = drm_sched_pick_best(scheds, num_scheds);
234 scheds = &sched;
235 num_scheds = 1;
236 }
237
238 r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,
239 &ctx->guilty);
240 if (r)
241 goto error_free_entity;
242
243 /* It's not an error if we fail to install the new entity */
244 if (cmpxchg(&ctx->entities[hw_ip][ring], NULL, entity))
245 goto cleanup_entity;
246
247 return 0;
248
249cleanup_entity:
250 drm_sched_entity_fini(&entity->entity);
251
252error_free_entity:
253 kfree(entity);
254
255 return r;
256}
257
258static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
259{
260 ktime_t res = ns_to_ktime(0);
261 int i;
262
263 if (!entity)
264 return res;
265
266 for (i = 0; i < amdgpu_sched_jobs; ++i) {
267 res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i]));
268 dma_fence_put(entity->fences[i]);
269 }
270
271 kfree(entity);
272 return res;
273}
274
275static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
276 u32 *stable_pstate)
277{
278 struct amdgpu_device *adev = ctx->mgr->adev;
279 enum amd_dpm_forced_level current_level;
280
281 current_level = amdgpu_dpm_get_performance_level(adev);
282
283 switch (current_level) {
284 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
285 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD;
286 break;
287 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
288 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
289 break;
290 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
291 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
292 break;
293 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
294 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK;
295 break;
296 default:
297 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
298 break;
299 }
300 return 0;
301}
302
303static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
304 struct drm_file *filp, struct amdgpu_ctx *ctx)
305{
306 u32 current_stable_pstate;
307 int r;
308
309 r = amdgpu_ctx_priority_permit(filp, priority);
310 if (r)
311 return r;
312
313 memset(ctx, 0, sizeof(*ctx));
314
315 kref_init(&ctx->refcount);
316 ctx->mgr = mgr;
317 spin_lock_init(&ctx->ring_lock);
318
319 ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
320 ctx->reset_counter_query = ctx->reset_counter;
321 ctx->vram_lost_counter = atomic_read(&mgr->adev->vram_lost_counter);
322 ctx->init_priority = priority;
323 ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
324
325 r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate);
326 if (r)
327 return r;
328
329 if (mgr->adev->pm.stable_pstate_ctx)
330 ctx->stable_pstate = mgr->adev->pm.stable_pstate_ctx->stable_pstate;
331 else
332 ctx->stable_pstate = current_stable_pstate;
333
334 return 0;
335}
336
337static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
338 u32 stable_pstate)
339{
340 struct amdgpu_device *adev = ctx->mgr->adev;
341 enum amd_dpm_forced_level level;
342 u32 current_stable_pstate;
343 int r;
344
345 mutex_lock(&adev->pm.stable_pstate_ctx_lock);
346 if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) {
347 r = -EBUSY;
348 goto done;
349 }
350
351 r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate);
352 if (r || (stable_pstate == current_stable_pstate))
353 goto done;
354
355 switch (stable_pstate) {
356 case AMDGPU_CTX_STABLE_PSTATE_NONE:
357 level = AMD_DPM_FORCED_LEVEL_AUTO;
358 break;
359 case AMDGPU_CTX_STABLE_PSTATE_STANDARD:
360 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
361 break;
362 case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK:
363 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
364 break;
365 case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK:
366 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
367 break;
368 case AMDGPU_CTX_STABLE_PSTATE_PEAK:
369 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
370 break;
371 default:
372 r = -EINVAL;
373 goto done;
374 }
375
376 r = amdgpu_dpm_force_performance_level(adev, level);
377
378 if (level == AMD_DPM_FORCED_LEVEL_AUTO)
379 adev->pm.stable_pstate_ctx = NULL;
380 else
381 adev->pm.stable_pstate_ctx = ctx;
382done:
383 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
384
385 return r;
386}
387
388static void amdgpu_ctx_fini(struct kref *ref)
389{
390 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
391 struct amdgpu_ctx_mgr *mgr = ctx->mgr;
392 struct amdgpu_device *adev = mgr->adev;
393 unsigned i, j, idx;
394
395 if (!adev)
396 return;
397
398 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
399 for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
400 ktime_t spend;
401
402 spend = amdgpu_ctx_fini_entity(ctx->entities[i][j]);
403 atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]);
404 }
405 }
406
407 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
408 amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate);
409 drm_dev_exit(idx);
410 }
411
412 kfree(ctx);
413}
414
415int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
416 u32 ring, struct drm_sched_entity **entity)
417{
418 int r;
419
420 if (hw_ip >= AMDGPU_HW_IP_NUM) {
421 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
422 return -EINVAL;
423 }
424
425 /* Right now all IPs have only one instance - multiple rings. */
426 if (instance != 0) {
427 DRM_DEBUG("invalid ip instance: %d\n", instance);
428 return -EINVAL;
429 }
430
431 if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
432 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
433 return -EINVAL;
434 }
435
436 if (ctx->entities[hw_ip][ring] == NULL) {
437 r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
438 if (r)
439 return r;
440 }
441
442 *entity = &ctx->entities[hw_ip][ring]->entity;
443 return 0;
444}
445
446static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
447 struct amdgpu_fpriv *fpriv,
448 struct drm_file *filp,
449 int32_t priority,
450 uint32_t *id)
451{
452 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
453 struct amdgpu_ctx *ctx;
454 int r;
455
456 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
457 if (!ctx)
458 return -ENOMEM;
459
460 mutex_lock(&mgr->lock);
461 r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
462 if (r < 0) {
463 mutex_unlock(&mgr->lock);
464 kfree(ctx);
465 return r;
466 }
467
468 *id = (uint32_t)r;
469 r = amdgpu_ctx_init(mgr, priority, filp, ctx);
470 if (r) {
471 idr_remove(&mgr->ctx_handles, *id);
472 *id = 0;
473 kfree(ctx);
474 }
475 mutex_unlock(&mgr->lock);
476 return r;
477}
478
479static void amdgpu_ctx_do_release(struct kref *ref)
480{
481 struct amdgpu_ctx *ctx;
482 u32 i, j;
483
484 ctx = container_of(ref, struct amdgpu_ctx, refcount);
485 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
486 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
487 if (!ctx->entities[i][j])
488 continue;
489
490 drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
491 }
492 }
493
494 amdgpu_ctx_fini(ref);
495}
496
497static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
498{
499 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
500 struct amdgpu_ctx *ctx;
501
502 mutex_lock(&mgr->lock);
503 ctx = idr_remove(&mgr->ctx_handles, id);
504 if (ctx)
505 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
506 mutex_unlock(&mgr->lock);
507 return ctx ? 0 : -EINVAL;
508}
509
510static int amdgpu_ctx_query(struct amdgpu_device *adev,
511 struct amdgpu_fpriv *fpriv, uint32_t id,
512 union drm_amdgpu_ctx_out *out)
513{
514 struct amdgpu_ctx *ctx;
515 struct amdgpu_ctx_mgr *mgr;
516 unsigned reset_counter;
517
518 if (!fpriv)
519 return -EINVAL;
520
521 mgr = &fpriv->ctx_mgr;
522 mutex_lock(&mgr->lock);
523 ctx = idr_find(&mgr->ctx_handles, id);
524 if (!ctx) {
525 mutex_unlock(&mgr->lock);
526 return -EINVAL;
527 }
528
529 /* TODO: these two are always zero */
530 out->state.flags = 0x0;
531 out->state.hangs = 0x0;
532
533 /* determine if a GPU reset has occured since the last call */
534 reset_counter = atomic_read(&adev->gpu_reset_counter);
535 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
536 if (ctx->reset_counter_query == reset_counter)
537 out->state.reset_status = AMDGPU_CTX_NO_RESET;
538 else
539 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
540 ctx->reset_counter_query = reset_counter;
541
542 mutex_unlock(&mgr->lock);
543 return 0;
544}
545
546#define AMDGPU_RAS_COUNTE_DELAY_MS 3000
547
548static int amdgpu_ctx_query2(struct amdgpu_device *adev,
549 struct amdgpu_fpriv *fpriv, uint32_t id,
550 union drm_amdgpu_ctx_out *out)
551{
552 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
553 struct amdgpu_ctx *ctx;
554 struct amdgpu_ctx_mgr *mgr;
555
556 if (!fpriv)
557 return -EINVAL;
558
559 mgr = &fpriv->ctx_mgr;
560 mutex_lock(&mgr->lock);
561 ctx = idr_find(&mgr->ctx_handles, id);
562 if (!ctx) {
563 mutex_unlock(&mgr->lock);
564 return -EINVAL;
565 }
566
567 out->state.flags = 0x0;
568 out->state.hangs = 0x0;
569
570 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
571 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
572
573 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
574 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
575
576 if (atomic_read(&ctx->guilty))
577 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
578
579 if (adev->ras_enabled && con) {
580 /* Return the cached values in O(1),
581 * and schedule delayed work to cache
582 * new vaues.
583 */
584 int ce_count, ue_count;
585
586 ce_count = atomic_read(&con->ras_ce_count);
587 ue_count = atomic_read(&con->ras_ue_count);
588
589 if (ce_count != ctx->ras_counter_ce) {
590 ctx->ras_counter_ce = ce_count;
591 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
592 }
593
594 if (ue_count != ctx->ras_counter_ue) {
595 ctx->ras_counter_ue = ue_count;
596 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
597 }
598
599 schedule_delayed_work(&con->ras_counte_delay_work,
600 msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS));
601 }
602
603 mutex_unlock(&mgr->lock);
604 return 0;
605}
606
607
608
609static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
610 struct amdgpu_fpriv *fpriv, uint32_t id,
611 bool set, u32 *stable_pstate)
612{
613 struct amdgpu_ctx *ctx;
614 struct amdgpu_ctx_mgr *mgr;
615 int r;
616
617 if (!fpriv)
618 return -EINVAL;
619
620 mgr = &fpriv->ctx_mgr;
621 mutex_lock(&mgr->lock);
622 ctx = idr_find(&mgr->ctx_handles, id);
623 if (!ctx) {
624 mutex_unlock(&mgr->lock);
625 return -EINVAL;
626 }
627
628 if (set)
629 r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate);
630 else
631 r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate);
632
633 mutex_unlock(&mgr->lock);
634 return r;
635}
636
637int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
638 struct drm_file *filp)
639{
640 int r;
641 uint32_t id, stable_pstate;
642 int32_t priority;
643
644 union drm_amdgpu_ctx *args = data;
645 struct amdgpu_device *adev = drm_to_adev(dev);
646 struct amdgpu_fpriv *fpriv = filp->driver_priv;
647
648 id = args->in.ctx_id;
649 priority = args->in.priority;
650
651 /* For backwards compatibility reasons, we need to accept
652 * ioctls with garbage in the priority field */
653 if (!amdgpu_ctx_priority_is_valid(priority))
654 priority = AMDGPU_CTX_PRIORITY_NORMAL;
655
656 switch (args->in.op) {
657 case AMDGPU_CTX_OP_ALLOC_CTX:
658 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
659 args->out.alloc.ctx_id = id;
660 break;
661 case AMDGPU_CTX_OP_FREE_CTX:
662 r = amdgpu_ctx_free(fpriv, id);
663 break;
664 case AMDGPU_CTX_OP_QUERY_STATE:
665 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
666 break;
667 case AMDGPU_CTX_OP_QUERY_STATE2:
668 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
669 break;
670 case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
671 if (args->in.flags)
672 return -EINVAL;
673 r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate);
674 if (!r)
675 args->out.pstate.flags = stable_pstate;
676 break;
677 case AMDGPU_CTX_OP_SET_STABLE_PSTATE:
678 if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK)
679 return -EINVAL;
680 stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK;
681 if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK)
682 return -EINVAL;
683 r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate);
684 break;
685 default:
686 return -EINVAL;
687 }
688
689 return r;
690}
691
692struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
693{
694 struct amdgpu_ctx *ctx;
695 struct amdgpu_ctx_mgr *mgr;
696
697 if (!fpriv)
698 return NULL;
699
700 mgr = &fpriv->ctx_mgr;
701
702 mutex_lock(&mgr->lock);
703 ctx = idr_find(&mgr->ctx_handles, id);
704 if (ctx)
705 kref_get(&ctx->refcount);
706 mutex_unlock(&mgr->lock);
707 return ctx;
708}
709
710int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
711{
712 if (ctx == NULL)
713 return -EINVAL;
714
715 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
716 return 0;
717}
718
719uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
720 struct drm_sched_entity *entity,
721 struct dma_fence *fence)
722{
723 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
724 uint64_t seq = centity->sequence;
725 struct dma_fence *other = NULL;
726 unsigned idx = 0;
727
728 idx = seq & (amdgpu_sched_jobs - 1);
729 other = centity->fences[idx];
730 WARN_ON(other && !dma_fence_is_signaled(other));
731
732 dma_fence_get(fence);
733
734 spin_lock(&ctx->ring_lock);
735 centity->fences[idx] = fence;
736 centity->sequence++;
737 spin_unlock(&ctx->ring_lock);
738
739 atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)),
740 &ctx->mgr->time_spend[centity->hw_ip]);
741
742 dma_fence_put(other);
743 return seq;
744}
745
746struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
747 struct drm_sched_entity *entity,
748 uint64_t seq)
749{
750 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
751 struct dma_fence *fence;
752
753 spin_lock(&ctx->ring_lock);
754
755 if (seq == ~0ull)
756 seq = centity->sequence - 1;
757
758 if (seq >= centity->sequence) {
759 spin_unlock(&ctx->ring_lock);
760 return ERR_PTR(-EINVAL);
761 }
762
763
764 if (seq + amdgpu_sched_jobs < centity->sequence) {
765 spin_unlock(&ctx->ring_lock);
766 return NULL;
767 }
768
769 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
770 spin_unlock(&ctx->ring_lock);
771
772 return fence;
773}
774
775static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
776 struct amdgpu_ctx_entity *aentity,
777 int hw_ip,
778 int32_t priority)
779{
780 struct amdgpu_device *adev = ctx->mgr->adev;
781 unsigned int hw_prio;
782 struct drm_gpu_scheduler **scheds = NULL;
783 unsigned num_scheds;
784
785 /* set sw priority */
786 drm_sched_entity_set_priority(&aentity->entity,
787 amdgpu_ctx_to_drm_sched_prio(priority));
788
789 /* set hw priority */
790 if (hw_ip == AMDGPU_HW_IP_COMPUTE || hw_ip == AMDGPU_HW_IP_GFX) {
791 hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
792 hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
793 scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
794 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
795 drm_sched_entity_modify_sched(&aentity->entity, scheds,
796 num_scheds);
797 }
798}
799
800void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
801 int32_t priority)
802{
803 int32_t ctx_prio;
804 unsigned i, j;
805
806 ctx->override_priority = priority;
807
808 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
809 ctx->init_priority : ctx->override_priority;
810 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
811 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
812 if (!ctx->entities[i][j])
813 continue;
814
815 amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
816 i, ctx_prio);
817 }
818 }
819}
820
821int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
822 struct drm_sched_entity *entity)
823{
824 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
825 struct dma_fence *other;
826 unsigned idx;
827 long r;
828
829 spin_lock(&ctx->ring_lock);
830 idx = centity->sequence & (amdgpu_sched_jobs - 1);
831 other = dma_fence_get(centity->fences[idx]);
832 spin_unlock(&ctx->ring_lock);
833
834 if (!other)
835 return 0;
836
837 r = dma_fence_wait(other, true);
838 if (r < 0 && r != -ERESTARTSYS)
839 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
840
841 dma_fence_put(other);
842 return r;
843}
844
845void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
846 struct amdgpu_device *adev)
847{
848 unsigned int i;
849
850 mgr->adev = adev;
851 mutex_init(&mgr->lock);
852 idr_init_base(&mgr->ctx_handles, 1);
853
854 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
855 atomic64_set(&mgr->time_spend[i], 0);
856}
857
858long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
859{
860 struct amdgpu_ctx *ctx;
861 struct idr *idp;
862 uint32_t id, i, j;
863
864 idp = &mgr->ctx_handles;
865
866 mutex_lock(&mgr->lock);
867 idr_for_each_entry(idp, ctx, id) {
868 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
869 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
870 struct drm_sched_entity *entity;
871
872 if (!ctx->entities[i][j])
873 continue;
874
875 entity = &ctx->entities[i][j]->entity;
876 timeout = drm_sched_entity_flush(entity, timeout);
877 }
878 }
879 }
880 mutex_unlock(&mgr->lock);
881 return timeout;
882}
883
884void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
885{
886 struct amdgpu_ctx *ctx;
887 struct idr *idp;
888 uint32_t id, i, j;
889
890 idp = &mgr->ctx_handles;
891
892 idr_for_each_entry(idp, ctx, id) {
893 if (kref_read(&ctx->refcount) != 1) {
894 DRM_ERROR("ctx %p is still alive\n", ctx);
895 continue;
896 }
897
898 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
899 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
900 struct drm_sched_entity *entity;
901
902 if (!ctx->entities[i][j])
903 continue;
904
905 entity = &ctx->entities[i][j]->entity;
906 drm_sched_entity_fini(entity);
907 }
908 }
909 }
910}
911
912void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
913{
914 struct amdgpu_ctx *ctx;
915 struct idr *idp;
916 uint32_t id;
917
918 amdgpu_ctx_mgr_entity_fini(mgr);
919
920 idp = &mgr->ctx_handles;
921
922 idr_for_each_entry(idp, ctx, id) {
923 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
924 DRM_ERROR("ctx %p is still alive\n", ctx);
925 }
926
927 idr_destroy(&mgr->ctx_handles);
928 mutex_destroy(&mgr->lock);
929}
930
931void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
932 ktime_t usage[AMDGPU_HW_IP_NUM])
933{
934 struct amdgpu_ctx *ctx;
935 unsigned int hw_ip, i;
936 uint32_t id;
937
938 /*
939 * This is a little bit racy because it can be that a ctx or a fence are
940 * destroyed just in the moment we try to account them. But that is ok
941 * since exactly that case is explicitely allowed by the interface.
942 */
943 mutex_lock(&mgr->lock);
944 for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
945 uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]);
946
947 usage[hw_ip] = ns_to_ktime(ns);
948 }
949
950 idr_for_each_entry(&mgr->ctx_handles, ctx, id) {
951 for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
952 for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) {
953 struct amdgpu_ctx_entity *centity;
954 ktime_t spend;
955
956 centity = ctx->entities[hw_ip][i];
957 if (!centity)
958 continue;
959 spend = amdgpu_ctx_entity_time(ctx, centity);
960 usage[hw_ip] = ktime_add(usage[hw_ip], spend);
961 }
962 }
963 }
964 mutex_unlock(&mgr->lock);
965}
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
25#include <drm/drm_auth.h>
26#include "amdgpu.h"
27#include "amdgpu_sched.h"
28#include "amdgpu_ras.h"
29
30#define to_amdgpu_ctx_entity(e) \
31 container_of((e), struct amdgpu_ctx_entity, entity)
32
33const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
34 [AMDGPU_HW_IP_GFX] = 1,
35 [AMDGPU_HW_IP_COMPUTE] = 4,
36 [AMDGPU_HW_IP_DMA] = 2,
37 [AMDGPU_HW_IP_UVD] = 1,
38 [AMDGPU_HW_IP_VCE] = 1,
39 [AMDGPU_HW_IP_UVD_ENC] = 1,
40 [AMDGPU_HW_IP_VCN_DEC] = 1,
41 [AMDGPU_HW_IP_VCN_ENC] = 1,
42 [AMDGPU_HW_IP_VCN_JPEG] = 1,
43};
44
45static int amdgpu_ctx_total_num_entities(void)
46{
47 unsigned i, num_entities = 0;
48
49 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
50 num_entities += amdgpu_ctx_num_entities[i];
51
52 return num_entities;
53}
54
55static int amdgpu_ctx_priority_permit(struct drm_file *filp,
56 enum drm_sched_priority priority)
57{
58 /* NORMAL and below are accessible by everyone */
59 if (priority <= DRM_SCHED_PRIORITY_NORMAL)
60 return 0;
61
62 if (capable(CAP_SYS_NICE))
63 return 0;
64
65 if (drm_is_current_master(filp))
66 return 0;
67
68 return -EACCES;
69}
70
71static int amdgpu_ctx_init(struct amdgpu_device *adev,
72 enum drm_sched_priority priority,
73 struct drm_file *filp,
74 struct amdgpu_ctx *ctx)
75{
76 unsigned num_entities = amdgpu_ctx_total_num_entities();
77 unsigned i, j, k;
78 int r;
79
80 if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
81 return -EINVAL;
82
83 r = amdgpu_ctx_priority_permit(filp, priority);
84 if (r)
85 return r;
86
87 memset(ctx, 0, sizeof(*ctx));
88 ctx->adev = adev;
89
90 ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
91 sizeof(struct dma_fence*), GFP_KERNEL);
92 if (!ctx->fences)
93 return -ENOMEM;
94
95 ctx->entities[0] = kcalloc(num_entities,
96 sizeof(struct amdgpu_ctx_entity),
97 GFP_KERNEL);
98 if (!ctx->entities[0]) {
99 r = -ENOMEM;
100 goto error_free_fences;
101 }
102
103 for (i = 0; i < num_entities; ++i) {
104 struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
105
106 entity->sequence = 1;
107 entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
108 }
109 for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
110 ctx->entities[i] = ctx->entities[i - 1] +
111 amdgpu_ctx_num_entities[i - 1];
112
113 kref_init(&ctx->refcount);
114 spin_lock_init(&ctx->ring_lock);
115 mutex_init(&ctx->lock);
116
117 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
118 ctx->reset_counter_query = ctx->reset_counter;
119 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
120 ctx->init_priority = priority;
121 ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
122
123 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
124 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
125 struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
126 unsigned num_rings = 0;
127 unsigned num_rqs = 0;
128
129 switch (i) {
130 case AMDGPU_HW_IP_GFX:
131 rings[0] = &adev->gfx.gfx_ring[0];
132 num_rings = 1;
133 break;
134 case AMDGPU_HW_IP_COMPUTE:
135 for (j = 0; j < adev->gfx.num_compute_rings; ++j)
136 rings[j] = &adev->gfx.compute_ring[j];
137 num_rings = adev->gfx.num_compute_rings;
138 break;
139 case AMDGPU_HW_IP_DMA:
140 for (j = 0; j < adev->sdma.num_instances; ++j)
141 rings[j] = &adev->sdma.instance[j].ring;
142 num_rings = adev->sdma.num_instances;
143 break;
144 case AMDGPU_HW_IP_UVD:
145 rings[0] = &adev->uvd.inst[0].ring;
146 num_rings = 1;
147 break;
148 case AMDGPU_HW_IP_VCE:
149 rings[0] = &adev->vce.ring[0];
150 num_rings = 1;
151 break;
152 case AMDGPU_HW_IP_UVD_ENC:
153 rings[0] = &adev->uvd.inst[0].ring_enc[0];
154 num_rings = 1;
155 break;
156 case AMDGPU_HW_IP_VCN_DEC:
157 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
158 if (adev->vcn.harvest_config & (1 << j))
159 continue;
160 rings[num_rings++] = &adev->vcn.inst[j].ring_dec;
161 }
162 break;
163 case AMDGPU_HW_IP_VCN_ENC:
164 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
165 if (adev->vcn.harvest_config & (1 << j))
166 continue;
167 for (k = 0; k < adev->vcn.num_enc_rings; ++k)
168 rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k];
169 }
170 break;
171 case AMDGPU_HW_IP_VCN_JPEG:
172 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
173 if (adev->vcn.harvest_config & (1 << j))
174 continue;
175 rings[num_rings++] = &adev->vcn.inst[j].ring_jpeg;
176 }
177 break;
178 }
179
180 for (j = 0; j < num_rings; ++j) {
181 if (!rings[j]->adev)
182 continue;
183
184 rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
185 }
186
187 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
188 r = drm_sched_entity_init(&ctx->entities[i][j].entity,
189 rqs, num_rqs, &ctx->guilty);
190 if (r)
191 goto error_cleanup_entities;
192 }
193
194 return 0;
195
196error_cleanup_entities:
197 for (i = 0; i < num_entities; ++i)
198 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
199 kfree(ctx->entities[0]);
200
201error_free_fences:
202 kfree(ctx->fences);
203 ctx->fences = NULL;
204 return r;
205}
206
207static void amdgpu_ctx_fini(struct kref *ref)
208{
209 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
210 unsigned num_entities = amdgpu_ctx_total_num_entities();
211 struct amdgpu_device *adev = ctx->adev;
212 unsigned i, j;
213
214 if (!adev)
215 return;
216
217 for (i = 0; i < num_entities; ++i)
218 for (j = 0; j < amdgpu_sched_jobs; ++j)
219 dma_fence_put(ctx->entities[0][i].fences[j]);
220 kfree(ctx->fences);
221 kfree(ctx->entities[0]);
222
223 mutex_destroy(&ctx->lock);
224
225 kfree(ctx);
226}
227
228int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
229 u32 ring, struct drm_sched_entity **entity)
230{
231 if (hw_ip >= AMDGPU_HW_IP_NUM) {
232 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
233 return -EINVAL;
234 }
235
236 /* Right now all IPs have only one instance - multiple rings. */
237 if (instance != 0) {
238 DRM_DEBUG("invalid ip instance: %d\n", instance);
239 return -EINVAL;
240 }
241
242 if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
243 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
244 return -EINVAL;
245 }
246
247 *entity = &ctx->entities[hw_ip][ring].entity;
248 return 0;
249}
250
251static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
252 struct amdgpu_fpriv *fpriv,
253 struct drm_file *filp,
254 enum drm_sched_priority priority,
255 uint32_t *id)
256{
257 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
258 struct amdgpu_ctx *ctx;
259 int r;
260
261 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
262 if (!ctx)
263 return -ENOMEM;
264
265 mutex_lock(&mgr->lock);
266 r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
267 if (r < 0) {
268 mutex_unlock(&mgr->lock);
269 kfree(ctx);
270 return r;
271 }
272
273 *id = (uint32_t)r;
274 r = amdgpu_ctx_init(adev, priority, filp, ctx);
275 if (r) {
276 idr_remove(&mgr->ctx_handles, *id);
277 *id = 0;
278 kfree(ctx);
279 }
280 mutex_unlock(&mgr->lock);
281 return r;
282}
283
284static void amdgpu_ctx_do_release(struct kref *ref)
285{
286 struct amdgpu_ctx *ctx;
287 unsigned num_entities;
288 u32 i;
289
290 ctx = container_of(ref, struct amdgpu_ctx, refcount);
291
292 num_entities = amdgpu_ctx_total_num_entities();
293 for (i = 0; i < num_entities; i++)
294 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
295
296 amdgpu_ctx_fini(ref);
297}
298
299static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
300{
301 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
302 struct amdgpu_ctx *ctx;
303
304 mutex_lock(&mgr->lock);
305 ctx = idr_remove(&mgr->ctx_handles, id);
306 if (ctx)
307 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
308 mutex_unlock(&mgr->lock);
309 return ctx ? 0 : -EINVAL;
310}
311
312static int amdgpu_ctx_query(struct amdgpu_device *adev,
313 struct amdgpu_fpriv *fpriv, uint32_t id,
314 union drm_amdgpu_ctx_out *out)
315{
316 struct amdgpu_ctx *ctx;
317 struct amdgpu_ctx_mgr *mgr;
318 unsigned reset_counter;
319
320 if (!fpriv)
321 return -EINVAL;
322
323 mgr = &fpriv->ctx_mgr;
324 mutex_lock(&mgr->lock);
325 ctx = idr_find(&mgr->ctx_handles, id);
326 if (!ctx) {
327 mutex_unlock(&mgr->lock);
328 return -EINVAL;
329 }
330
331 /* TODO: these two are always zero */
332 out->state.flags = 0x0;
333 out->state.hangs = 0x0;
334
335 /* determine if a GPU reset has occured since the last call */
336 reset_counter = atomic_read(&adev->gpu_reset_counter);
337 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
338 if (ctx->reset_counter_query == reset_counter)
339 out->state.reset_status = AMDGPU_CTX_NO_RESET;
340 else
341 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
342 ctx->reset_counter_query = reset_counter;
343
344 mutex_unlock(&mgr->lock);
345 return 0;
346}
347
348static int amdgpu_ctx_query2(struct amdgpu_device *adev,
349 struct amdgpu_fpriv *fpriv, uint32_t id,
350 union drm_amdgpu_ctx_out *out)
351{
352 struct amdgpu_ctx *ctx;
353 struct amdgpu_ctx_mgr *mgr;
354 unsigned long ras_counter;
355
356 if (!fpriv)
357 return -EINVAL;
358
359 mgr = &fpriv->ctx_mgr;
360 mutex_lock(&mgr->lock);
361 ctx = idr_find(&mgr->ctx_handles, id);
362 if (!ctx) {
363 mutex_unlock(&mgr->lock);
364 return -EINVAL;
365 }
366
367 out->state.flags = 0x0;
368 out->state.hangs = 0x0;
369
370 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
371 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
372
373 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
374 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
375
376 if (atomic_read(&ctx->guilty))
377 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
378
379 /*query ue count*/
380 ras_counter = amdgpu_ras_query_error_count(adev, false);
381 /*ras counter is monotonic increasing*/
382 if (ras_counter != ctx->ras_counter_ue) {
383 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
384 ctx->ras_counter_ue = ras_counter;
385 }
386
387 /*query ce count*/
388 ras_counter = amdgpu_ras_query_error_count(adev, true);
389 if (ras_counter != ctx->ras_counter_ce) {
390 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
391 ctx->ras_counter_ce = ras_counter;
392 }
393
394 mutex_unlock(&mgr->lock);
395 return 0;
396}
397
398int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
399 struct drm_file *filp)
400{
401 int r;
402 uint32_t id;
403 enum drm_sched_priority priority;
404
405 union drm_amdgpu_ctx *args = data;
406 struct amdgpu_device *adev = dev->dev_private;
407 struct amdgpu_fpriv *fpriv = filp->driver_priv;
408
409 r = 0;
410 id = args->in.ctx_id;
411 priority = amdgpu_to_sched_priority(args->in.priority);
412
413 /* For backwards compatibility reasons, we need to accept
414 * ioctls with garbage in the priority field */
415 if (priority == DRM_SCHED_PRIORITY_INVALID)
416 priority = DRM_SCHED_PRIORITY_NORMAL;
417
418 switch (args->in.op) {
419 case AMDGPU_CTX_OP_ALLOC_CTX:
420 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
421 args->out.alloc.ctx_id = id;
422 break;
423 case AMDGPU_CTX_OP_FREE_CTX:
424 r = amdgpu_ctx_free(fpriv, id);
425 break;
426 case AMDGPU_CTX_OP_QUERY_STATE:
427 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
428 break;
429 case AMDGPU_CTX_OP_QUERY_STATE2:
430 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
431 break;
432 default:
433 return -EINVAL;
434 }
435
436 return r;
437}
438
439struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
440{
441 struct amdgpu_ctx *ctx;
442 struct amdgpu_ctx_mgr *mgr;
443
444 if (!fpriv)
445 return NULL;
446
447 mgr = &fpriv->ctx_mgr;
448
449 mutex_lock(&mgr->lock);
450 ctx = idr_find(&mgr->ctx_handles, id);
451 if (ctx)
452 kref_get(&ctx->refcount);
453 mutex_unlock(&mgr->lock);
454 return ctx;
455}
456
457int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
458{
459 if (ctx == NULL)
460 return -EINVAL;
461
462 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
463 return 0;
464}
465
466void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
467 struct drm_sched_entity *entity,
468 struct dma_fence *fence, uint64_t* handle)
469{
470 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
471 uint64_t seq = centity->sequence;
472 struct dma_fence *other = NULL;
473 unsigned idx = 0;
474
475 idx = seq & (amdgpu_sched_jobs - 1);
476 other = centity->fences[idx];
477 if (other)
478 BUG_ON(!dma_fence_is_signaled(other));
479
480 dma_fence_get(fence);
481
482 spin_lock(&ctx->ring_lock);
483 centity->fences[idx] = fence;
484 centity->sequence++;
485 spin_unlock(&ctx->ring_lock);
486
487 dma_fence_put(other);
488 if (handle)
489 *handle = seq;
490}
491
492struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
493 struct drm_sched_entity *entity,
494 uint64_t seq)
495{
496 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
497 struct dma_fence *fence;
498
499 spin_lock(&ctx->ring_lock);
500
501 if (seq == ~0ull)
502 seq = centity->sequence - 1;
503
504 if (seq >= centity->sequence) {
505 spin_unlock(&ctx->ring_lock);
506 return ERR_PTR(-EINVAL);
507 }
508
509
510 if (seq + amdgpu_sched_jobs < centity->sequence) {
511 spin_unlock(&ctx->ring_lock);
512 return NULL;
513 }
514
515 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
516 spin_unlock(&ctx->ring_lock);
517
518 return fence;
519}
520
521void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
522 enum drm_sched_priority priority)
523{
524 unsigned num_entities = amdgpu_ctx_total_num_entities();
525 enum drm_sched_priority ctx_prio;
526 unsigned i;
527
528 ctx->override_priority = priority;
529
530 ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
531 ctx->init_priority : ctx->override_priority;
532
533 for (i = 0; i < num_entities; i++) {
534 struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
535
536 drm_sched_entity_set_priority(entity, ctx_prio);
537 }
538}
539
540int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
541 struct drm_sched_entity *entity)
542{
543 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
544 struct dma_fence *other;
545 unsigned idx;
546 long r;
547
548 spin_lock(&ctx->ring_lock);
549 idx = centity->sequence & (amdgpu_sched_jobs - 1);
550 other = dma_fence_get(centity->fences[idx]);
551 spin_unlock(&ctx->ring_lock);
552
553 if (!other)
554 return 0;
555
556 r = dma_fence_wait(other, true);
557 if (r < 0 && r != -ERESTARTSYS)
558 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
559
560 dma_fence_put(other);
561 return r;
562}
563
564void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
565{
566 mutex_init(&mgr->lock);
567 idr_init(&mgr->ctx_handles);
568}
569
570long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
571{
572 unsigned num_entities = amdgpu_ctx_total_num_entities();
573 struct amdgpu_ctx *ctx;
574 struct idr *idp;
575 uint32_t id, i;
576
577 idp = &mgr->ctx_handles;
578
579 mutex_lock(&mgr->lock);
580 idr_for_each_entry(idp, ctx, id) {
581 for (i = 0; i < num_entities; i++) {
582 struct drm_sched_entity *entity;
583
584 entity = &ctx->entities[0][i].entity;
585 timeout = drm_sched_entity_flush(entity, timeout);
586 }
587 }
588 mutex_unlock(&mgr->lock);
589 return timeout;
590}
591
592void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
593{
594 unsigned num_entities = amdgpu_ctx_total_num_entities();
595 struct amdgpu_ctx *ctx;
596 struct idr *idp;
597 uint32_t id, i;
598
599 idp = &mgr->ctx_handles;
600
601 idr_for_each_entry(idp, ctx, id) {
602 if (kref_read(&ctx->refcount) != 1) {
603 DRM_ERROR("ctx %p is still alive\n", ctx);
604 continue;
605 }
606
607 for (i = 0; i < num_entities; i++) {
608 mutex_lock(&ctx->adev->lock_reset);
609 drm_sched_entity_fini(&ctx->entities[0][i].entity);
610 mutex_unlock(&ctx->adev->lock_reset);
611 }
612 }
613}
614
615void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
616{
617 struct amdgpu_ctx *ctx;
618 struct idr *idp;
619 uint32_t id;
620
621 amdgpu_ctx_mgr_entity_fini(mgr);
622
623 idp = &mgr->ctx_handles;
624
625 idr_for_each_entry(idp, ctx, id) {
626 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
627 DRM_ERROR("ctx %p is still alive\n", ctx);
628 }
629
630 idr_destroy(&mgr->ctx_handles);
631 mutex_destroy(&mgr->lock);
632}