Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drm_exec.h>
26
27#include "amdgpu_mes.h"
28#include "amdgpu.h"
29#include "soc15_common.h"
30#include "amdgpu_mes_ctx.h"
31
32#define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33#define AMDGPU_ONE_DOORBELL_SIZE 8
34
35int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36{
37 return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 PAGE_SIZE);
40}
41
42static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 struct amdgpu_mes_process *process,
44 int ip_type, uint64_t *doorbell_index)
45{
46 unsigned int offset, found;
47 struct amdgpu_mes *mes = &adev->mes;
48
49 if (ip_type == AMDGPU_RING_TYPE_SDMA)
50 offset = adev->doorbell_index.sdma_engine[0];
51 else
52 offset = 0;
53
54 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
55 if (found >= mes->num_mes_dbs) {
56 DRM_WARN("No doorbell available\n");
57 return -ENOSPC;
58 }
59
60 set_bit(found, mes->doorbell_bitmap);
61
62 /* Get the absolute doorbell index on BAR */
63 *doorbell_index = mes->db_start_dw_offset + found * 2;
64 return 0;
65}
66
67static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
68 struct amdgpu_mes_process *process,
69 uint32_t doorbell_index)
70{
71 unsigned int old, rel_index;
72 struct amdgpu_mes *mes = &adev->mes;
73
74 /* Find the relative index of the doorbell in this object */
75 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
76 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
77 WARN_ON(!old);
78}
79
80static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
81{
82 int i;
83 struct amdgpu_mes *mes = &adev->mes;
84
85 /* Bitmap for dynamic allocation of kernel doorbells */
86 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
87 if (!mes->doorbell_bitmap) {
88 DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
89 return -ENOMEM;
90 }
91
92 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
93 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
94 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
95 set_bit(i, mes->doorbell_bitmap);
96 }
97
98 return 0;
99}
100
101static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
102{
103 int r;
104
105 if (!amdgpu_mes_log_enable)
106 return 0;
107
108 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
109 AMDGPU_GEM_DOMAIN_GTT,
110 &adev->mes.event_log_gpu_obj,
111 &adev->mes.event_log_gpu_addr,
112 &adev->mes.event_log_cpu_addr);
113 if (r) {
114 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
115 return r;
116 }
117
118 memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
119
120 return 0;
121
122}
123
124static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
125{
126 bitmap_free(adev->mes.doorbell_bitmap);
127}
128
129int amdgpu_mes_init(struct amdgpu_device *adev)
130{
131 int i, r;
132
133 adev->mes.adev = adev;
134
135 idr_init(&adev->mes.pasid_idr);
136 idr_init(&adev->mes.gang_id_idr);
137 idr_init(&adev->mes.queue_id_idr);
138 ida_init(&adev->mes.doorbell_ida);
139 spin_lock_init(&adev->mes.queue_id_lock);
140 spin_lock_init(&adev->mes.ring_lock);
141 mutex_init(&adev->mes.mutex_hidden);
142
143 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
144 adev->mes.vmid_mask_mmhub = 0xffffff00;
145 adev->mes.vmid_mask_gfxhub = 0xffffff00;
146
147 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
148 /* use only 1st MEC pipes */
149 if (i >= 4)
150 continue;
151 adev->mes.compute_hqd_mask[i] = 0xc;
152 }
153
154 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
155 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
156
157 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
158 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
159 IP_VERSION(6, 0, 0))
160 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
161 /* zero sdma_hqd_mask for non-existent engine */
162 else if (adev->sdma.num_instances == 1)
163 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
164 else
165 adev->mes.sdma_hqd_mask[i] = 0xfc;
166 }
167
168 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
169 if (r) {
170 dev_err(adev->dev,
171 "(%d) ring trail_fence_offs wb alloc failed\n", r);
172 goto error_ids;
173 }
174 adev->mes.sch_ctx_gpu_addr =
175 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
176 adev->mes.sch_ctx_ptr =
177 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
178
179 r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
180 if (r) {
181 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
182 dev_err(adev->dev,
183 "(%d) query_status_fence_offs wb alloc failed\n", r);
184 goto error_ids;
185 }
186 adev->mes.query_status_fence_gpu_addr =
187 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
188 adev->mes.query_status_fence_ptr =
189 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
190
191 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
192 if (r) {
193 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
194 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
195 dev_err(adev->dev,
196 "(%d) read_val_offs alloc failed\n", r);
197 goto error_ids;
198 }
199 adev->mes.read_val_gpu_addr =
200 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
201 adev->mes.read_val_ptr =
202 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
203
204 r = amdgpu_mes_doorbell_init(adev);
205 if (r)
206 goto error;
207
208 r = amdgpu_mes_event_log_init(adev);
209 if (r)
210 goto error_doorbell;
211
212 return 0;
213
214error_doorbell:
215 amdgpu_mes_doorbell_free(adev);
216error:
217 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
218 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
219 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
220error_ids:
221 idr_destroy(&adev->mes.pasid_idr);
222 idr_destroy(&adev->mes.gang_id_idr);
223 idr_destroy(&adev->mes.queue_id_idr);
224 ida_destroy(&adev->mes.doorbell_ida);
225 mutex_destroy(&adev->mes.mutex_hidden);
226 return r;
227}
228
229void amdgpu_mes_fini(struct amdgpu_device *adev)
230{
231 amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
232 &adev->mes.event_log_gpu_addr,
233 &adev->mes.event_log_cpu_addr);
234
235 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
236 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
237 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
238 amdgpu_mes_doorbell_free(adev);
239
240 idr_destroy(&adev->mes.pasid_idr);
241 idr_destroy(&adev->mes.gang_id_idr);
242 idr_destroy(&adev->mes.queue_id_idr);
243 ida_destroy(&adev->mes.doorbell_ida);
244 mutex_destroy(&adev->mes.mutex_hidden);
245}
246
247static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
248{
249 amdgpu_bo_free_kernel(&q->mqd_obj,
250 &q->mqd_gpu_addr,
251 &q->mqd_cpu_ptr);
252}
253
254int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
255 struct amdgpu_vm *vm)
256{
257 struct amdgpu_mes_process *process;
258 int r;
259
260 /* allocate the mes process buffer */
261 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
262 if (!process) {
263 DRM_ERROR("no more memory to create mes process\n");
264 return -ENOMEM;
265 }
266
267 /* allocate the process context bo and map it */
268 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
269 AMDGPU_GEM_DOMAIN_GTT,
270 &process->proc_ctx_bo,
271 &process->proc_ctx_gpu_addr,
272 &process->proc_ctx_cpu_ptr);
273 if (r) {
274 DRM_ERROR("failed to allocate process context bo\n");
275 goto clean_up_memory;
276 }
277 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
278
279 /*
280 * Avoid taking any other locks under MES lock to avoid circular
281 * lock dependencies.
282 */
283 amdgpu_mes_lock(&adev->mes);
284
285 /* add the mes process to idr list */
286 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
287 GFP_KERNEL);
288 if (r < 0) {
289 DRM_ERROR("failed to lock pasid=%d\n", pasid);
290 goto clean_up_ctx;
291 }
292
293 INIT_LIST_HEAD(&process->gang_list);
294 process->vm = vm;
295 process->pasid = pasid;
296 process->process_quantum = adev->mes.default_process_quantum;
297 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
298
299 amdgpu_mes_unlock(&adev->mes);
300 return 0;
301
302clean_up_ctx:
303 amdgpu_mes_unlock(&adev->mes);
304 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
305 &process->proc_ctx_gpu_addr,
306 &process->proc_ctx_cpu_ptr);
307clean_up_memory:
308 kfree(process);
309 return r;
310}
311
312void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
313{
314 struct amdgpu_mes_process *process;
315 struct amdgpu_mes_gang *gang, *tmp1;
316 struct amdgpu_mes_queue *queue, *tmp2;
317 struct mes_remove_queue_input queue_input;
318 unsigned long flags;
319 int r;
320
321 /*
322 * Avoid taking any other locks under MES lock to avoid circular
323 * lock dependencies.
324 */
325 amdgpu_mes_lock(&adev->mes);
326
327 process = idr_find(&adev->mes.pasid_idr, pasid);
328 if (!process) {
329 DRM_WARN("pasid %d doesn't exist\n", pasid);
330 amdgpu_mes_unlock(&adev->mes);
331 return;
332 }
333
334 /* Remove all queues from hardware */
335 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
336 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
337 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
338 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
339 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
340
341 queue_input.doorbell_offset = queue->doorbell_off;
342 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
343
344 r = adev->mes.funcs->remove_hw_queue(&adev->mes,
345 &queue_input);
346 if (r)
347 DRM_WARN("failed to remove hardware queue\n");
348 }
349
350 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
351 }
352
353 idr_remove(&adev->mes.pasid_idr, pasid);
354 amdgpu_mes_unlock(&adev->mes);
355
356 /* free all memory allocated by the process */
357 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
358 /* free all queues in the gang */
359 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
360 amdgpu_mes_queue_free_mqd(queue);
361 list_del(&queue->list);
362 kfree(queue);
363 }
364 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
365 &gang->gang_ctx_gpu_addr,
366 &gang->gang_ctx_cpu_ptr);
367 list_del(&gang->list);
368 kfree(gang);
369
370 }
371 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
372 &process->proc_ctx_gpu_addr,
373 &process->proc_ctx_cpu_ptr);
374 kfree(process);
375}
376
377int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
378 struct amdgpu_mes_gang_properties *gprops,
379 int *gang_id)
380{
381 struct amdgpu_mes_process *process;
382 struct amdgpu_mes_gang *gang;
383 int r;
384
385 /* allocate the mes gang buffer */
386 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
387 if (!gang) {
388 return -ENOMEM;
389 }
390
391 /* allocate the gang context bo and map it to cpu space */
392 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
393 AMDGPU_GEM_DOMAIN_GTT,
394 &gang->gang_ctx_bo,
395 &gang->gang_ctx_gpu_addr,
396 &gang->gang_ctx_cpu_ptr);
397 if (r) {
398 DRM_ERROR("failed to allocate process context bo\n");
399 goto clean_up_mem;
400 }
401 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
402
403 /*
404 * Avoid taking any other locks under MES lock to avoid circular
405 * lock dependencies.
406 */
407 amdgpu_mes_lock(&adev->mes);
408
409 process = idr_find(&adev->mes.pasid_idr, pasid);
410 if (!process) {
411 DRM_ERROR("pasid %d doesn't exist\n", pasid);
412 r = -EINVAL;
413 goto clean_up_ctx;
414 }
415
416 /* add the mes gang to idr list */
417 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
418 GFP_KERNEL);
419 if (r < 0) {
420 DRM_ERROR("failed to allocate idr for gang\n");
421 goto clean_up_ctx;
422 }
423
424 gang->gang_id = r;
425 *gang_id = r;
426
427 INIT_LIST_HEAD(&gang->queue_list);
428 gang->process = process;
429 gang->priority = gprops->priority;
430 gang->gang_quantum = gprops->gang_quantum ?
431 gprops->gang_quantum : adev->mes.default_gang_quantum;
432 gang->global_priority_level = gprops->global_priority_level;
433 gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
434 list_add_tail(&gang->list, &process->gang_list);
435
436 amdgpu_mes_unlock(&adev->mes);
437 return 0;
438
439clean_up_ctx:
440 amdgpu_mes_unlock(&adev->mes);
441 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
442 &gang->gang_ctx_gpu_addr,
443 &gang->gang_ctx_cpu_ptr);
444clean_up_mem:
445 kfree(gang);
446 return r;
447}
448
449int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
450{
451 struct amdgpu_mes_gang *gang;
452
453 /*
454 * Avoid taking any other locks under MES lock to avoid circular
455 * lock dependencies.
456 */
457 amdgpu_mes_lock(&adev->mes);
458
459 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
460 if (!gang) {
461 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
462 amdgpu_mes_unlock(&adev->mes);
463 return -EINVAL;
464 }
465
466 if (!list_empty(&gang->queue_list)) {
467 DRM_ERROR("queue list is not empty\n");
468 amdgpu_mes_unlock(&adev->mes);
469 return -EBUSY;
470 }
471
472 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
473 list_del(&gang->list);
474 amdgpu_mes_unlock(&adev->mes);
475
476 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
477 &gang->gang_ctx_gpu_addr,
478 &gang->gang_ctx_cpu_ptr);
479
480 kfree(gang);
481
482 return 0;
483}
484
485int amdgpu_mes_suspend(struct amdgpu_device *adev)
486{
487 struct idr *idp;
488 struct amdgpu_mes_process *process;
489 struct amdgpu_mes_gang *gang;
490 struct mes_suspend_gang_input input;
491 int r, pasid;
492
493 /*
494 * Avoid taking any other locks under MES lock to avoid circular
495 * lock dependencies.
496 */
497 amdgpu_mes_lock(&adev->mes);
498
499 idp = &adev->mes.pasid_idr;
500
501 idr_for_each_entry(idp, process, pasid) {
502 list_for_each_entry(gang, &process->gang_list, list) {
503 r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
504 if (r)
505 DRM_ERROR("failed to suspend pasid %d gangid %d",
506 pasid, gang->gang_id);
507 }
508 }
509
510 amdgpu_mes_unlock(&adev->mes);
511 return 0;
512}
513
514int amdgpu_mes_resume(struct amdgpu_device *adev)
515{
516 struct idr *idp;
517 struct amdgpu_mes_process *process;
518 struct amdgpu_mes_gang *gang;
519 struct mes_resume_gang_input input;
520 int r, pasid;
521
522 /*
523 * Avoid taking any other locks under MES lock to avoid circular
524 * lock dependencies.
525 */
526 amdgpu_mes_lock(&adev->mes);
527
528 idp = &adev->mes.pasid_idr;
529
530 idr_for_each_entry(idp, process, pasid) {
531 list_for_each_entry(gang, &process->gang_list, list) {
532 r = adev->mes.funcs->resume_gang(&adev->mes, &input);
533 if (r)
534 DRM_ERROR("failed to resume pasid %d gangid %d",
535 pasid, gang->gang_id);
536 }
537 }
538
539 amdgpu_mes_unlock(&adev->mes);
540 return 0;
541}
542
543static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
544 struct amdgpu_mes_queue *q,
545 struct amdgpu_mes_queue_properties *p)
546{
547 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
548 u32 mqd_size = mqd_mgr->mqd_size;
549 int r;
550
551 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
552 AMDGPU_GEM_DOMAIN_GTT,
553 &q->mqd_obj,
554 &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
555 if (r) {
556 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
557 return r;
558 }
559 memset(q->mqd_cpu_ptr, 0, mqd_size);
560
561 r = amdgpu_bo_reserve(q->mqd_obj, false);
562 if (unlikely(r != 0))
563 goto clean_up;
564
565 return 0;
566
567clean_up:
568 amdgpu_bo_free_kernel(&q->mqd_obj,
569 &q->mqd_gpu_addr,
570 &q->mqd_cpu_ptr);
571 return r;
572}
573
574static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
575 struct amdgpu_mes_queue *q,
576 struct amdgpu_mes_queue_properties *p)
577{
578 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
579 struct amdgpu_mqd_prop mqd_prop = {0};
580
581 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
582 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
583 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
584 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
585 mqd_prop.queue_size = p->queue_size;
586 mqd_prop.use_doorbell = true;
587 mqd_prop.doorbell_index = p->doorbell_off;
588 mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
589 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
590 mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
591 mqd_prop.hqd_active = false;
592
593 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
594 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
595 mutex_lock(&adev->srbm_mutex);
596 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
597 }
598
599 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
600
601 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
602 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
603 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
604 mutex_unlock(&adev->srbm_mutex);
605 }
606
607 amdgpu_bo_unreserve(q->mqd_obj);
608}
609
610int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
611 struct amdgpu_mes_queue_properties *qprops,
612 int *queue_id)
613{
614 struct amdgpu_mes_queue *queue;
615 struct amdgpu_mes_gang *gang;
616 struct mes_add_queue_input queue_input;
617 unsigned long flags;
618 int r;
619
620 memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
621
622 /* allocate the mes queue buffer */
623 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
624 if (!queue) {
625 DRM_ERROR("Failed to allocate memory for queue\n");
626 return -ENOMEM;
627 }
628
629 /* Allocate the queue mqd */
630 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
631 if (r)
632 goto clean_up_memory;
633
634 /*
635 * Avoid taking any other locks under MES lock to avoid circular
636 * lock dependencies.
637 */
638 amdgpu_mes_lock(&adev->mes);
639
640 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
641 if (!gang) {
642 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
643 r = -EINVAL;
644 goto clean_up_mqd;
645 }
646
647 /* add the mes gang to idr list */
648 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
649 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
650 GFP_ATOMIC);
651 if (r < 0) {
652 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
653 goto clean_up_mqd;
654 }
655 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
656 *queue_id = queue->queue_id = r;
657
658 /* allocate a doorbell index for the queue */
659 r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
660 qprops->queue_type,
661 &qprops->doorbell_off);
662 if (r)
663 goto clean_up_queue_id;
664
665 /* initialize the queue mqd */
666 amdgpu_mes_queue_init_mqd(adev, queue, qprops);
667
668 /* add hw queue to mes */
669 queue_input.process_id = gang->process->pasid;
670
671 queue_input.page_table_base_addr =
672 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
673 adev->gmc.vram_start;
674
675 queue_input.process_va_start = 0;
676 queue_input.process_va_end =
677 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
678 queue_input.process_quantum = gang->process->process_quantum;
679 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
680 queue_input.gang_quantum = gang->gang_quantum;
681 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
682 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
683 queue_input.gang_global_priority_level = gang->global_priority_level;
684 queue_input.doorbell_offset = qprops->doorbell_off;
685 queue_input.mqd_addr = queue->mqd_gpu_addr;
686 queue_input.wptr_addr = qprops->wptr_gpu_addr;
687 queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
688 queue_input.queue_type = qprops->queue_type;
689 queue_input.paging = qprops->paging;
690 queue_input.is_kfd_process = 0;
691
692 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
693 if (r) {
694 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
695 qprops->doorbell_off);
696 goto clean_up_doorbell;
697 }
698
699 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
700 "queue type=%d, doorbell=0x%llx\n",
701 gang->process->pasid, gang_id, qprops->queue_type,
702 qprops->doorbell_off);
703
704 queue->ring = qprops->ring;
705 queue->doorbell_off = qprops->doorbell_off;
706 queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
707 queue->queue_type = qprops->queue_type;
708 queue->paging = qprops->paging;
709 queue->gang = gang;
710 queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
711 list_add_tail(&queue->list, &gang->queue_list);
712
713 amdgpu_mes_unlock(&adev->mes);
714 return 0;
715
716clean_up_doorbell:
717 amdgpu_mes_kernel_doorbell_free(adev, gang->process,
718 qprops->doorbell_off);
719clean_up_queue_id:
720 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
721 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
722 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
723clean_up_mqd:
724 amdgpu_mes_unlock(&adev->mes);
725 amdgpu_mes_queue_free_mqd(queue);
726clean_up_memory:
727 kfree(queue);
728 return r;
729}
730
731int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
732{
733 unsigned long flags;
734 struct amdgpu_mes_queue *queue;
735 struct amdgpu_mes_gang *gang;
736 struct mes_remove_queue_input queue_input;
737 int r;
738
739 /*
740 * Avoid taking any other locks under MES lock to avoid circular
741 * lock dependencies.
742 */
743 amdgpu_mes_lock(&adev->mes);
744
745 /* remove the mes gang from idr list */
746 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
747
748 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
749 if (!queue) {
750 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
751 amdgpu_mes_unlock(&adev->mes);
752 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
753 return -EINVAL;
754 }
755
756 idr_remove(&adev->mes.queue_id_idr, queue_id);
757 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
758
759 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
760 queue->doorbell_off);
761
762 gang = queue->gang;
763 queue_input.doorbell_offset = queue->doorbell_off;
764 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
765
766 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
767 if (r)
768 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
769 queue_id);
770
771 list_del(&queue->list);
772 amdgpu_mes_kernel_doorbell_free(adev, gang->process,
773 queue->doorbell_off);
774 amdgpu_mes_unlock(&adev->mes);
775
776 amdgpu_mes_queue_free_mqd(queue);
777 kfree(queue);
778 return 0;
779}
780
781int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
782 struct amdgpu_ring *ring,
783 enum amdgpu_unmap_queues_action action,
784 u64 gpu_addr, u64 seq)
785{
786 struct mes_unmap_legacy_queue_input queue_input;
787 int r;
788
789 queue_input.action = action;
790 queue_input.queue_type = ring->funcs->type;
791 queue_input.doorbell_offset = ring->doorbell_index;
792 queue_input.pipe_id = ring->pipe;
793 queue_input.queue_id = ring->queue;
794 queue_input.trail_fence_addr = gpu_addr;
795 queue_input.trail_fence_data = seq;
796
797 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
798 if (r)
799 DRM_ERROR("failed to unmap legacy queue\n");
800
801 return r;
802}
803
804uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
805{
806 struct mes_misc_op_input op_input;
807 int r, val = 0;
808
809 op_input.op = MES_MISC_OP_READ_REG;
810 op_input.read_reg.reg_offset = reg;
811 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
812
813 if (!adev->mes.funcs->misc_op) {
814 DRM_ERROR("mes rreg is not supported!\n");
815 goto error;
816 }
817
818 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
819 if (r)
820 DRM_ERROR("failed to read reg (0x%x)\n", reg);
821 else
822 val = *(adev->mes.read_val_ptr);
823
824error:
825 return val;
826}
827
828int amdgpu_mes_wreg(struct amdgpu_device *adev,
829 uint32_t reg, uint32_t val)
830{
831 struct mes_misc_op_input op_input;
832 int r;
833
834 op_input.op = MES_MISC_OP_WRITE_REG;
835 op_input.write_reg.reg_offset = reg;
836 op_input.write_reg.reg_value = val;
837
838 if (!adev->mes.funcs->misc_op) {
839 DRM_ERROR("mes wreg is not supported!\n");
840 r = -EINVAL;
841 goto error;
842 }
843
844 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
845 if (r)
846 DRM_ERROR("failed to write reg (0x%x)\n", reg);
847
848error:
849 return r;
850}
851
852int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
853 uint32_t reg0, uint32_t reg1,
854 uint32_t ref, uint32_t mask)
855{
856 struct mes_misc_op_input op_input;
857 int r;
858
859 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
860 op_input.wrm_reg.reg0 = reg0;
861 op_input.wrm_reg.reg1 = reg1;
862 op_input.wrm_reg.ref = ref;
863 op_input.wrm_reg.mask = mask;
864
865 if (!adev->mes.funcs->misc_op) {
866 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
867 r = -EINVAL;
868 goto error;
869 }
870
871 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
872 if (r)
873 DRM_ERROR("failed to reg_write_reg_wait\n");
874
875error:
876 return r;
877}
878
879int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
880 uint32_t val, uint32_t mask)
881{
882 struct mes_misc_op_input op_input;
883 int r;
884
885 op_input.op = MES_MISC_OP_WRM_REG_WAIT;
886 op_input.wrm_reg.reg0 = reg;
887 op_input.wrm_reg.ref = val;
888 op_input.wrm_reg.mask = mask;
889
890 if (!adev->mes.funcs->misc_op) {
891 DRM_ERROR("mes reg wait is not supported!\n");
892 r = -EINVAL;
893 goto error;
894 }
895
896 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
897 if (r)
898 DRM_ERROR("failed to reg_write_reg_wait\n");
899
900error:
901 return r;
902}
903
904int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
905 uint64_t process_context_addr,
906 uint32_t spi_gdbg_per_vmid_cntl,
907 const uint32_t *tcp_watch_cntl,
908 uint32_t flags,
909 bool trap_en)
910{
911 struct mes_misc_op_input op_input = {0};
912 int r;
913
914 if (!adev->mes.funcs->misc_op) {
915 DRM_ERROR("mes set shader debugger is not supported!\n");
916 return -EINVAL;
917 }
918
919 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
920 op_input.set_shader_debugger.process_context_addr = process_context_addr;
921 op_input.set_shader_debugger.flags.u32all = flags;
922
923 /* use amdgpu mes_flush_shader_debugger instead */
924 if (op_input.set_shader_debugger.flags.process_ctx_flush)
925 return -EINVAL;
926
927 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
928 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
929 sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
930
931 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
932 AMDGPU_MES_API_VERSION_SHIFT) >= 14)
933 op_input.set_shader_debugger.trap_en = trap_en;
934
935 amdgpu_mes_lock(&adev->mes);
936
937 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
938 if (r)
939 DRM_ERROR("failed to set_shader_debugger\n");
940
941 amdgpu_mes_unlock(&adev->mes);
942
943 return r;
944}
945
946int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
947 uint64_t process_context_addr)
948{
949 struct mes_misc_op_input op_input = {0};
950 int r;
951
952 if (!adev->mes.funcs->misc_op) {
953 DRM_ERROR("mes flush shader debugger is not supported!\n");
954 return -EINVAL;
955 }
956
957 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
958 op_input.set_shader_debugger.process_context_addr = process_context_addr;
959 op_input.set_shader_debugger.flags.process_ctx_flush = true;
960
961 amdgpu_mes_lock(&adev->mes);
962
963 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
964 if (r)
965 DRM_ERROR("failed to set_shader_debugger\n");
966
967 amdgpu_mes_unlock(&adev->mes);
968
969 return r;
970}
971
972static void
973amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
974 struct amdgpu_ring *ring,
975 struct amdgpu_mes_queue_properties *props)
976{
977 props->queue_type = ring->funcs->type;
978 props->hqd_base_gpu_addr = ring->gpu_addr;
979 props->rptr_gpu_addr = ring->rptr_gpu_addr;
980 props->wptr_gpu_addr = ring->wptr_gpu_addr;
981 props->wptr_mc_addr =
982 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
983 props->queue_size = ring->ring_size;
984 props->eop_gpu_addr = ring->eop_gpu_addr;
985 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
986 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
987 props->paging = false;
988 props->ring = ring;
989}
990
991#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
992do { \
993 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
994 return offsetof(struct amdgpu_mes_ctx_meta_data, \
995 _eng[ring->idx].slots[id_offs]); \
996 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
997 return offsetof(struct amdgpu_mes_ctx_meta_data, \
998 _eng[ring->idx].ring); \
999 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
1000 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1001 _eng[ring->idx].ib); \
1002 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
1003 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1004 _eng[ring->idx].padding); \
1005} while(0)
1006
1007int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1008{
1009 switch (ring->funcs->type) {
1010 case AMDGPU_RING_TYPE_GFX:
1011 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1012 break;
1013 case AMDGPU_RING_TYPE_COMPUTE:
1014 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1015 break;
1016 case AMDGPU_RING_TYPE_SDMA:
1017 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1018 break;
1019 default:
1020 break;
1021 }
1022
1023 WARN_ON(1);
1024 return -EINVAL;
1025}
1026
1027int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1028 int queue_type, int idx,
1029 struct amdgpu_mes_ctx_data *ctx_data,
1030 struct amdgpu_ring **out)
1031{
1032 struct amdgpu_ring *ring;
1033 struct amdgpu_mes_gang *gang;
1034 struct amdgpu_mes_queue_properties qprops = {0};
1035 int r, queue_id, pasid;
1036
1037 /*
1038 * Avoid taking any other locks under MES lock to avoid circular
1039 * lock dependencies.
1040 */
1041 amdgpu_mes_lock(&adev->mes);
1042 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1043 if (!gang) {
1044 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1045 amdgpu_mes_unlock(&adev->mes);
1046 return -EINVAL;
1047 }
1048 pasid = gang->process->pasid;
1049
1050 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1051 if (!ring) {
1052 amdgpu_mes_unlock(&adev->mes);
1053 return -ENOMEM;
1054 }
1055
1056 ring->ring_obj = NULL;
1057 ring->use_doorbell = true;
1058 ring->is_mes_queue = true;
1059 ring->mes_ctx = ctx_data;
1060 ring->idx = idx;
1061 ring->no_scheduler = true;
1062
1063 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1064 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1065 compute[ring->idx].mec_hpd);
1066 ring->eop_gpu_addr =
1067 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1068 }
1069
1070 switch (queue_type) {
1071 case AMDGPU_RING_TYPE_GFX:
1072 ring->funcs = adev->gfx.gfx_ring[0].funcs;
1073 ring->me = adev->gfx.gfx_ring[0].me;
1074 ring->pipe = adev->gfx.gfx_ring[0].pipe;
1075 break;
1076 case AMDGPU_RING_TYPE_COMPUTE:
1077 ring->funcs = adev->gfx.compute_ring[0].funcs;
1078 ring->me = adev->gfx.compute_ring[0].me;
1079 ring->pipe = adev->gfx.compute_ring[0].pipe;
1080 break;
1081 case AMDGPU_RING_TYPE_SDMA:
1082 ring->funcs = adev->sdma.instance[0].ring.funcs;
1083 break;
1084 default:
1085 BUG();
1086 }
1087
1088 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1089 AMDGPU_RING_PRIO_DEFAULT, NULL);
1090 if (r)
1091 goto clean_up_memory;
1092
1093 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1094
1095 dma_fence_wait(gang->process->vm->last_update, false);
1096 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1097 amdgpu_mes_unlock(&adev->mes);
1098
1099 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1100 if (r)
1101 goto clean_up_ring;
1102
1103 ring->hw_queue_id = queue_id;
1104 ring->doorbell_index = qprops.doorbell_off;
1105
1106 if (queue_type == AMDGPU_RING_TYPE_GFX)
1107 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1108 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1109 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1110 queue_id);
1111 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1112 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1113 queue_id);
1114 else
1115 BUG();
1116
1117 *out = ring;
1118 return 0;
1119
1120clean_up_ring:
1121 amdgpu_ring_fini(ring);
1122clean_up_memory:
1123 kfree(ring);
1124 amdgpu_mes_unlock(&adev->mes);
1125 return r;
1126}
1127
1128void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1129 struct amdgpu_ring *ring)
1130{
1131 if (!ring)
1132 return;
1133
1134 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1135 del_timer_sync(&ring->fence_drv.fallback_timer);
1136 amdgpu_ring_fini(ring);
1137 kfree(ring);
1138}
1139
1140uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1141 enum amdgpu_mes_priority_level prio)
1142{
1143 return adev->mes.aggregated_doorbells[prio];
1144}
1145
1146int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1147 struct amdgpu_mes_ctx_data *ctx_data)
1148{
1149 int r;
1150
1151 r = amdgpu_bo_create_kernel(adev,
1152 sizeof(struct amdgpu_mes_ctx_meta_data),
1153 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1154 &ctx_data->meta_data_obj,
1155 &ctx_data->meta_data_mc_addr,
1156 &ctx_data->meta_data_ptr);
1157 if (r) {
1158 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1159 return r;
1160 }
1161
1162 if (!ctx_data->meta_data_obj)
1163 return -ENOMEM;
1164
1165 memset(ctx_data->meta_data_ptr, 0,
1166 sizeof(struct amdgpu_mes_ctx_meta_data));
1167
1168 return 0;
1169}
1170
1171void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1172{
1173 if (ctx_data->meta_data_obj)
1174 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1175 &ctx_data->meta_data_mc_addr,
1176 &ctx_data->meta_data_ptr);
1177}
1178
1179int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1180 struct amdgpu_vm *vm,
1181 struct amdgpu_mes_ctx_data *ctx_data)
1182{
1183 struct amdgpu_bo_va *bo_va;
1184 struct amdgpu_sync sync;
1185 struct drm_exec exec;
1186 int r;
1187
1188 amdgpu_sync_create(&sync);
1189
1190 drm_exec_init(&exec, 0, 0);
1191 drm_exec_until_all_locked(&exec) {
1192 r = drm_exec_lock_obj(&exec,
1193 &ctx_data->meta_data_obj->tbo.base);
1194 drm_exec_retry_on_contention(&exec);
1195 if (unlikely(r))
1196 goto error_fini_exec;
1197
1198 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1199 drm_exec_retry_on_contention(&exec);
1200 if (unlikely(r))
1201 goto error_fini_exec;
1202 }
1203
1204 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1205 if (!bo_va) {
1206 DRM_ERROR("failed to create bo_va for meta data BO\n");
1207 r = -ENOMEM;
1208 goto error_fini_exec;
1209 }
1210
1211 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1212 sizeof(struct amdgpu_mes_ctx_meta_data),
1213 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1214 AMDGPU_PTE_EXECUTABLE);
1215
1216 if (r) {
1217 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1218 goto error_del_bo_va;
1219 }
1220
1221 r = amdgpu_vm_bo_update(adev, bo_va, false);
1222 if (r) {
1223 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1224 goto error_del_bo_va;
1225 }
1226 amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1227
1228 r = amdgpu_vm_update_pdes(adev, vm, false);
1229 if (r) {
1230 DRM_ERROR("failed to update pdes on meta data\n");
1231 goto error_del_bo_va;
1232 }
1233 amdgpu_sync_fence(&sync, vm->last_update);
1234
1235 amdgpu_sync_wait(&sync, false);
1236 drm_exec_fini(&exec);
1237
1238 amdgpu_sync_free(&sync);
1239 ctx_data->meta_data_va = bo_va;
1240 return 0;
1241
1242error_del_bo_va:
1243 amdgpu_vm_bo_del(adev, bo_va);
1244
1245error_fini_exec:
1246 drm_exec_fini(&exec);
1247 amdgpu_sync_free(&sync);
1248 return r;
1249}
1250
1251int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1252 struct amdgpu_mes_ctx_data *ctx_data)
1253{
1254 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1255 struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1256 struct amdgpu_vm *vm = bo_va->base.vm;
1257 struct dma_fence *fence;
1258 struct drm_exec exec;
1259 long r;
1260
1261 drm_exec_init(&exec, 0, 0);
1262 drm_exec_until_all_locked(&exec) {
1263 r = drm_exec_lock_obj(&exec,
1264 &ctx_data->meta_data_obj->tbo.base);
1265 drm_exec_retry_on_contention(&exec);
1266 if (unlikely(r))
1267 goto out_unlock;
1268
1269 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1270 drm_exec_retry_on_contention(&exec);
1271 if (unlikely(r))
1272 goto out_unlock;
1273 }
1274
1275 amdgpu_vm_bo_del(adev, bo_va);
1276 if (!amdgpu_vm_ready(vm))
1277 goto out_unlock;
1278
1279 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1280 &fence);
1281 if (r)
1282 goto out_unlock;
1283 if (fence) {
1284 amdgpu_bo_fence(bo, fence, true);
1285 fence = NULL;
1286 }
1287
1288 r = amdgpu_vm_clear_freed(adev, vm, &fence);
1289 if (r || !fence)
1290 goto out_unlock;
1291
1292 dma_fence_wait(fence, false);
1293 amdgpu_bo_fence(bo, fence, true);
1294 dma_fence_put(fence);
1295
1296out_unlock:
1297 if (unlikely(r < 0))
1298 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1299 drm_exec_fini(&exec);
1300
1301 return r;
1302}
1303
1304static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1305 int pasid, int *gang_id,
1306 int queue_type, int num_queue,
1307 struct amdgpu_ring **added_rings,
1308 struct amdgpu_mes_ctx_data *ctx_data)
1309{
1310 struct amdgpu_ring *ring;
1311 struct amdgpu_mes_gang_properties gprops = {0};
1312 int r, j;
1313
1314 /* create a gang for the process */
1315 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1316 gprops.gang_quantum = adev->mes.default_gang_quantum;
1317 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1318 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1319 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1320
1321 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1322 if (r) {
1323 DRM_ERROR("failed to add gang\n");
1324 return r;
1325 }
1326
1327 /* create queues for the gang */
1328 for (j = 0; j < num_queue; j++) {
1329 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1330 ctx_data, &ring);
1331 if (r) {
1332 DRM_ERROR("failed to add ring\n");
1333 break;
1334 }
1335
1336 DRM_INFO("ring %s was added\n", ring->name);
1337 added_rings[j] = ring;
1338 }
1339
1340 return 0;
1341}
1342
1343static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1344{
1345 struct amdgpu_ring *ring;
1346 int i, r;
1347
1348 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1349 ring = added_rings[i];
1350 if (!ring)
1351 continue;
1352
1353 r = amdgpu_ring_test_helper(ring);
1354 if (r)
1355 return r;
1356
1357 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1358 if (r) {
1359 DRM_DEV_ERROR(ring->adev->dev,
1360 "ring %s ib test failed (%d)\n",
1361 ring->name, r);
1362 return r;
1363 } else
1364 DRM_INFO("ring %s ib test pass\n", ring->name);
1365 }
1366
1367 return 0;
1368}
1369
1370int amdgpu_mes_self_test(struct amdgpu_device *adev)
1371{
1372 struct amdgpu_vm *vm = NULL;
1373 struct amdgpu_mes_ctx_data ctx_data = {0};
1374 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1375 int gang_ids[3] = {0};
1376 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1377 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1378 { AMDGPU_RING_TYPE_SDMA, 1} };
1379 int i, r, pasid, k = 0;
1380
1381 pasid = amdgpu_pasid_alloc(16);
1382 if (pasid < 0) {
1383 dev_warn(adev->dev, "No more PASIDs available!");
1384 pasid = 0;
1385 }
1386
1387 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1388 if (!vm) {
1389 r = -ENOMEM;
1390 goto error_pasid;
1391 }
1392
1393 r = amdgpu_vm_init(adev, vm, -1);
1394 if (r) {
1395 DRM_ERROR("failed to initialize vm\n");
1396 goto error_pasid;
1397 }
1398
1399 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1400 if (r) {
1401 DRM_ERROR("failed to alloc ctx meta data\n");
1402 goto error_fini;
1403 }
1404
1405 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1406 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1407 if (r) {
1408 DRM_ERROR("failed to map ctx meta data\n");
1409 goto error_vm;
1410 }
1411
1412 r = amdgpu_mes_create_process(adev, pasid, vm);
1413 if (r) {
1414 DRM_ERROR("failed to create MES process\n");
1415 goto error_vm;
1416 }
1417
1418 for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1419 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1420 if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1421 IP_VERSION(10, 3, 0) &&
1422 amdgpu_ip_version(adev, GC_HWIP, 0) <
1423 IP_VERSION(11, 0, 0) &&
1424 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1425 continue;
1426
1427 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1428 &gang_ids[i],
1429 queue_types[i][0],
1430 queue_types[i][1],
1431 &added_rings[k],
1432 &ctx_data);
1433 if (r)
1434 goto error_queues;
1435
1436 k += queue_types[i][1];
1437 }
1438
1439 /* start ring test and ib test for MES queues */
1440 amdgpu_mes_test_queues(added_rings);
1441
1442error_queues:
1443 /* remove all queues */
1444 for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1445 if (!added_rings[i])
1446 continue;
1447 amdgpu_mes_remove_ring(adev, added_rings[i]);
1448 }
1449
1450 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1451 if (!gang_ids[i])
1452 continue;
1453 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1454 }
1455
1456 amdgpu_mes_destroy_process(adev, pasid);
1457
1458error_vm:
1459 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1460
1461error_fini:
1462 amdgpu_vm_fini(adev, vm);
1463
1464error_pasid:
1465 if (pasid)
1466 amdgpu_pasid_free(pasid);
1467
1468 amdgpu_mes_ctx_free_meta_data(&ctx_data);
1469 kfree(vm);
1470 return 0;
1471}
1472
1473int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1474{
1475 const struct mes_firmware_header_v1_0 *mes_hdr;
1476 struct amdgpu_firmware_info *info;
1477 char ucode_prefix[30];
1478 char fw_name[40];
1479 bool need_retry = false;
1480 int r;
1481
1482 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1483 sizeof(ucode_prefix));
1484 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1485 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1486 ucode_prefix,
1487 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1488 need_retry = true;
1489 } else {
1490 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1491 ucode_prefix,
1492 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1493 }
1494
1495 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1496 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1497 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1498 ucode_prefix);
1499 DRM_INFO("try to fall back to %s\n", fw_name);
1500 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1501 fw_name);
1502 }
1503
1504 if (r)
1505 goto out;
1506
1507 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1508 adev->mes.fw[pipe]->data;
1509 adev->mes.uc_start_addr[pipe] =
1510 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1511 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1512 adev->mes.data_start_addr[pipe] =
1513 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1514 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1515
1516 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1517 int ucode, ucode_data;
1518
1519 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1520 ucode = AMDGPU_UCODE_ID_CP_MES;
1521 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1522 } else {
1523 ucode = AMDGPU_UCODE_ID_CP_MES1;
1524 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1525 }
1526
1527 info = &adev->firmware.ucode[ucode];
1528 info->ucode_id = ucode;
1529 info->fw = adev->mes.fw[pipe];
1530 adev->firmware.fw_size +=
1531 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1532 PAGE_SIZE);
1533
1534 info = &adev->firmware.ucode[ucode_data];
1535 info->ucode_id = ucode_data;
1536 info->fw = adev->mes.fw[pipe];
1537 adev->firmware.fw_size +=
1538 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1539 PAGE_SIZE);
1540 }
1541
1542 return 0;
1543out:
1544 amdgpu_ucode_release(&adev->mes.fw[pipe]);
1545 return r;
1546}
1547
1548#if defined(CONFIG_DEBUG_FS)
1549
1550static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1551{
1552 struct amdgpu_device *adev = m->private;
1553 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1554
1555 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1556 mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
1557
1558 return 0;
1559}
1560
1561DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1562
1563#endif
1564
1565void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1566{
1567
1568#if defined(CONFIG_DEBUG_FS)
1569 struct drm_minor *minor = adev_to_drm(adev)->primary;
1570 struct dentry *root = minor->debugfs_root;
1571 if (adev->enable_mes && amdgpu_mes_log_enable)
1572 debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1573 adev, &amdgpu_debugfs_mes_event_log_fops);
1574
1575#endif
1576}
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drm_exec.h>
26
27#include "amdgpu_mes.h"
28#include "amdgpu.h"
29#include "soc15_common.h"
30#include "amdgpu_mes_ctx.h"
31
32#define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33#define AMDGPU_ONE_DOORBELL_SIZE 8
34
35int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36{
37 return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 PAGE_SIZE);
40}
41
42static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 struct amdgpu_mes_process *process,
44 int ip_type, uint64_t *doorbell_index)
45{
46 unsigned int offset, found;
47 struct amdgpu_mes *mes = &adev->mes;
48
49 if (ip_type == AMDGPU_RING_TYPE_SDMA)
50 offset = adev->doorbell_index.sdma_engine[0];
51 else
52 offset = 0;
53
54 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
55 if (found >= mes->num_mes_dbs) {
56 DRM_WARN("No doorbell available\n");
57 return -ENOSPC;
58 }
59
60 set_bit(found, mes->doorbell_bitmap);
61
62 /* Get the absolute doorbell index on BAR */
63 *doorbell_index = mes->db_start_dw_offset + found * 2;
64 return 0;
65}
66
67static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
68 struct amdgpu_mes_process *process,
69 uint32_t doorbell_index)
70{
71 unsigned int old, rel_index;
72 struct amdgpu_mes *mes = &adev->mes;
73
74 /* Find the relative index of the doorbell in this object */
75 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
76 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
77 WARN_ON(!old);
78}
79
80static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
81{
82 int i;
83 struct amdgpu_mes *mes = &adev->mes;
84
85 /* Bitmap for dynamic allocation of kernel doorbells */
86 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
87 if (!mes->doorbell_bitmap) {
88 DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
89 return -ENOMEM;
90 }
91
92 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
93 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
94 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
95 set_bit(i, mes->doorbell_bitmap);
96 }
97
98 return 0;
99}
100
101static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
102{
103 int r;
104
105 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
106 AMDGPU_GEM_DOMAIN_GTT,
107 &adev->mes.event_log_gpu_obj,
108 &adev->mes.event_log_gpu_addr,
109 &adev->mes.event_log_cpu_addr);
110 if (r) {
111 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
112 return r;
113 }
114
115 memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
116
117 return 0;
118
119}
120
121static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
122{
123 bitmap_free(adev->mes.doorbell_bitmap);
124}
125
126int amdgpu_mes_init(struct amdgpu_device *adev)
127{
128 int i, r;
129
130 adev->mes.adev = adev;
131
132 idr_init(&adev->mes.pasid_idr);
133 idr_init(&adev->mes.gang_id_idr);
134 idr_init(&adev->mes.queue_id_idr);
135 ida_init(&adev->mes.doorbell_ida);
136 spin_lock_init(&adev->mes.queue_id_lock);
137 spin_lock_init(&adev->mes.ring_lock);
138 mutex_init(&adev->mes.mutex_hidden);
139
140 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
141 adev->mes.vmid_mask_mmhub = 0xffffff00;
142 adev->mes.vmid_mask_gfxhub = 0xffffff00;
143
144 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
145 /* use only 1st MEC pipes */
146 if (i >= 4)
147 continue;
148 adev->mes.compute_hqd_mask[i] = 0xc;
149 }
150
151 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
152 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
153
154 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
155 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
156 IP_VERSION(6, 0, 0))
157 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
158 /* zero sdma_hqd_mask for non-existent engine */
159 else if (adev->sdma.num_instances == 1)
160 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
161 else
162 adev->mes.sdma_hqd_mask[i] = 0xfc;
163 }
164
165 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
166 if (r) {
167 dev_err(adev->dev,
168 "(%d) ring trail_fence_offs wb alloc failed\n", r);
169 goto error_ids;
170 }
171 adev->mes.sch_ctx_gpu_addr =
172 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
173 adev->mes.sch_ctx_ptr =
174 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
175
176 r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
177 if (r) {
178 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
179 dev_err(adev->dev,
180 "(%d) query_status_fence_offs wb alloc failed\n", r);
181 goto error_ids;
182 }
183 adev->mes.query_status_fence_gpu_addr =
184 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
185 adev->mes.query_status_fence_ptr =
186 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
187
188 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
189 if (r) {
190 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
191 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
192 dev_err(adev->dev,
193 "(%d) read_val_offs alloc failed\n", r);
194 goto error_ids;
195 }
196 adev->mes.read_val_gpu_addr =
197 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
198 adev->mes.read_val_ptr =
199 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
200
201 r = amdgpu_mes_doorbell_init(adev);
202 if (r)
203 goto error;
204
205 r = amdgpu_mes_event_log_init(adev);
206 if (r)
207 goto error_doorbell;
208
209 return 0;
210
211error_doorbell:
212 amdgpu_mes_doorbell_free(adev);
213error:
214 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
215 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
216 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
217error_ids:
218 idr_destroy(&adev->mes.pasid_idr);
219 idr_destroy(&adev->mes.gang_id_idr);
220 idr_destroy(&adev->mes.queue_id_idr);
221 ida_destroy(&adev->mes.doorbell_ida);
222 mutex_destroy(&adev->mes.mutex_hidden);
223 return r;
224}
225
226void amdgpu_mes_fini(struct amdgpu_device *adev)
227{
228 amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
229 &adev->mes.event_log_gpu_addr,
230 &adev->mes.event_log_cpu_addr);
231
232 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
233 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
234 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
235 amdgpu_mes_doorbell_free(adev);
236
237 idr_destroy(&adev->mes.pasid_idr);
238 idr_destroy(&adev->mes.gang_id_idr);
239 idr_destroy(&adev->mes.queue_id_idr);
240 ida_destroy(&adev->mes.doorbell_ida);
241 mutex_destroy(&adev->mes.mutex_hidden);
242}
243
244static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
245{
246 amdgpu_bo_free_kernel(&q->mqd_obj,
247 &q->mqd_gpu_addr,
248 &q->mqd_cpu_ptr);
249}
250
251int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
252 struct amdgpu_vm *vm)
253{
254 struct amdgpu_mes_process *process;
255 int r;
256
257 /* allocate the mes process buffer */
258 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
259 if (!process) {
260 DRM_ERROR("no more memory to create mes process\n");
261 return -ENOMEM;
262 }
263
264 /* allocate the process context bo and map it */
265 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
266 AMDGPU_GEM_DOMAIN_GTT,
267 &process->proc_ctx_bo,
268 &process->proc_ctx_gpu_addr,
269 &process->proc_ctx_cpu_ptr);
270 if (r) {
271 DRM_ERROR("failed to allocate process context bo\n");
272 goto clean_up_memory;
273 }
274 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
275
276 /*
277 * Avoid taking any other locks under MES lock to avoid circular
278 * lock dependencies.
279 */
280 amdgpu_mes_lock(&adev->mes);
281
282 /* add the mes process to idr list */
283 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
284 GFP_KERNEL);
285 if (r < 0) {
286 DRM_ERROR("failed to lock pasid=%d\n", pasid);
287 goto clean_up_ctx;
288 }
289
290 INIT_LIST_HEAD(&process->gang_list);
291 process->vm = vm;
292 process->pasid = pasid;
293 process->process_quantum = adev->mes.default_process_quantum;
294 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
295
296 amdgpu_mes_unlock(&adev->mes);
297 return 0;
298
299clean_up_ctx:
300 amdgpu_mes_unlock(&adev->mes);
301 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
302 &process->proc_ctx_gpu_addr,
303 &process->proc_ctx_cpu_ptr);
304clean_up_memory:
305 kfree(process);
306 return r;
307}
308
309void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
310{
311 struct amdgpu_mes_process *process;
312 struct amdgpu_mes_gang *gang, *tmp1;
313 struct amdgpu_mes_queue *queue, *tmp2;
314 struct mes_remove_queue_input queue_input;
315 unsigned long flags;
316 int r;
317
318 /*
319 * Avoid taking any other locks under MES lock to avoid circular
320 * lock dependencies.
321 */
322 amdgpu_mes_lock(&adev->mes);
323
324 process = idr_find(&adev->mes.pasid_idr, pasid);
325 if (!process) {
326 DRM_WARN("pasid %d doesn't exist\n", pasid);
327 amdgpu_mes_unlock(&adev->mes);
328 return;
329 }
330
331 /* Remove all queues from hardware */
332 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
333 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
334 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
335 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
336 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
337
338 queue_input.doorbell_offset = queue->doorbell_off;
339 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
340
341 r = adev->mes.funcs->remove_hw_queue(&adev->mes,
342 &queue_input);
343 if (r)
344 DRM_WARN("failed to remove hardware queue\n");
345 }
346
347 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
348 }
349
350 idr_remove(&adev->mes.pasid_idr, pasid);
351 amdgpu_mes_unlock(&adev->mes);
352
353 /* free all memory allocated by the process */
354 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
355 /* free all queues in the gang */
356 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
357 amdgpu_mes_queue_free_mqd(queue);
358 list_del(&queue->list);
359 kfree(queue);
360 }
361 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
362 &gang->gang_ctx_gpu_addr,
363 &gang->gang_ctx_cpu_ptr);
364 list_del(&gang->list);
365 kfree(gang);
366
367 }
368 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
369 &process->proc_ctx_gpu_addr,
370 &process->proc_ctx_cpu_ptr);
371 kfree(process);
372}
373
374int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
375 struct amdgpu_mes_gang_properties *gprops,
376 int *gang_id)
377{
378 struct amdgpu_mes_process *process;
379 struct amdgpu_mes_gang *gang;
380 int r;
381
382 /* allocate the mes gang buffer */
383 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
384 if (!gang) {
385 return -ENOMEM;
386 }
387
388 /* allocate the gang context bo and map it to cpu space */
389 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
390 AMDGPU_GEM_DOMAIN_GTT,
391 &gang->gang_ctx_bo,
392 &gang->gang_ctx_gpu_addr,
393 &gang->gang_ctx_cpu_ptr);
394 if (r) {
395 DRM_ERROR("failed to allocate process context bo\n");
396 goto clean_up_mem;
397 }
398 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
399
400 /*
401 * Avoid taking any other locks under MES lock to avoid circular
402 * lock dependencies.
403 */
404 amdgpu_mes_lock(&adev->mes);
405
406 process = idr_find(&adev->mes.pasid_idr, pasid);
407 if (!process) {
408 DRM_ERROR("pasid %d doesn't exist\n", pasid);
409 r = -EINVAL;
410 goto clean_up_ctx;
411 }
412
413 /* add the mes gang to idr list */
414 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
415 GFP_KERNEL);
416 if (r < 0) {
417 DRM_ERROR("failed to allocate idr for gang\n");
418 goto clean_up_ctx;
419 }
420
421 gang->gang_id = r;
422 *gang_id = r;
423
424 INIT_LIST_HEAD(&gang->queue_list);
425 gang->process = process;
426 gang->priority = gprops->priority;
427 gang->gang_quantum = gprops->gang_quantum ?
428 gprops->gang_quantum : adev->mes.default_gang_quantum;
429 gang->global_priority_level = gprops->global_priority_level;
430 gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
431 list_add_tail(&gang->list, &process->gang_list);
432
433 amdgpu_mes_unlock(&adev->mes);
434 return 0;
435
436clean_up_ctx:
437 amdgpu_mes_unlock(&adev->mes);
438 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
439 &gang->gang_ctx_gpu_addr,
440 &gang->gang_ctx_cpu_ptr);
441clean_up_mem:
442 kfree(gang);
443 return r;
444}
445
446int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
447{
448 struct amdgpu_mes_gang *gang;
449
450 /*
451 * Avoid taking any other locks under MES lock to avoid circular
452 * lock dependencies.
453 */
454 amdgpu_mes_lock(&adev->mes);
455
456 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
457 if (!gang) {
458 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
459 amdgpu_mes_unlock(&adev->mes);
460 return -EINVAL;
461 }
462
463 if (!list_empty(&gang->queue_list)) {
464 DRM_ERROR("queue list is not empty\n");
465 amdgpu_mes_unlock(&adev->mes);
466 return -EBUSY;
467 }
468
469 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
470 list_del(&gang->list);
471 amdgpu_mes_unlock(&adev->mes);
472
473 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
474 &gang->gang_ctx_gpu_addr,
475 &gang->gang_ctx_cpu_ptr);
476
477 kfree(gang);
478
479 return 0;
480}
481
482int amdgpu_mes_suspend(struct amdgpu_device *adev)
483{
484 struct idr *idp;
485 struct amdgpu_mes_process *process;
486 struct amdgpu_mes_gang *gang;
487 struct mes_suspend_gang_input input;
488 int r, pasid;
489
490 /*
491 * Avoid taking any other locks under MES lock to avoid circular
492 * lock dependencies.
493 */
494 amdgpu_mes_lock(&adev->mes);
495
496 idp = &adev->mes.pasid_idr;
497
498 idr_for_each_entry(idp, process, pasid) {
499 list_for_each_entry(gang, &process->gang_list, list) {
500 r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
501 if (r)
502 DRM_ERROR("failed to suspend pasid %d gangid %d",
503 pasid, gang->gang_id);
504 }
505 }
506
507 amdgpu_mes_unlock(&adev->mes);
508 return 0;
509}
510
511int amdgpu_mes_resume(struct amdgpu_device *adev)
512{
513 struct idr *idp;
514 struct amdgpu_mes_process *process;
515 struct amdgpu_mes_gang *gang;
516 struct mes_resume_gang_input input;
517 int r, pasid;
518
519 /*
520 * Avoid taking any other locks under MES lock to avoid circular
521 * lock dependencies.
522 */
523 amdgpu_mes_lock(&adev->mes);
524
525 idp = &adev->mes.pasid_idr;
526
527 idr_for_each_entry(idp, process, pasid) {
528 list_for_each_entry(gang, &process->gang_list, list) {
529 r = adev->mes.funcs->resume_gang(&adev->mes, &input);
530 if (r)
531 DRM_ERROR("failed to resume pasid %d gangid %d",
532 pasid, gang->gang_id);
533 }
534 }
535
536 amdgpu_mes_unlock(&adev->mes);
537 return 0;
538}
539
540static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
541 struct amdgpu_mes_queue *q,
542 struct amdgpu_mes_queue_properties *p)
543{
544 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
545 u32 mqd_size = mqd_mgr->mqd_size;
546 int r;
547
548 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
549 AMDGPU_GEM_DOMAIN_GTT,
550 &q->mqd_obj,
551 &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
552 if (r) {
553 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
554 return r;
555 }
556 memset(q->mqd_cpu_ptr, 0, mqd_size);
557
558 r = amdgpu_bo_reserve(q->mqd_obj, false);
559 if (unlikely(r != 0))
560 goto clean_up;
561
562 return 0;
563
564clean_up:
565 amdgpu_bo_free_kernel(&q->mqd_obj,
566 &q->mqd_gpu_addr,
567 &q->mqd_cpu_ptr);
568 return r;
569}
570
571static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
572 struct amdgpu_mes_queue *q,
573 struct amdgpu_mes_queue_properties *p)
574{
575 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
576 struct amdgpu_mqd_prop mqd_prop = {0};
577
578 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
579 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
580 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
581 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
582 mqd_prop.queue_size = p->queue_size;
583 mqd_prop.use_doorbell = true;
584 mqd_prop.doorbell_index = p->doorbell_off;
585 mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
586 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
587 mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
588 mqd_prop.hqd_active = false;
589
590 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
591 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
592 mutex_lock(&adev->srbm_mutex);
593 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
594 }
595
596 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
597
598 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
599 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
600 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
601 mutex_unlock(&adev->srbm_mutex);
602 }
603
604 amdgpu_bo_unreserve(q->mqd_obj);
605}
606
607int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
608 struct amdgpu_mes_queue_properties *qprops,
609 int *queue_id)
610{
611 struct amdgpu_mes_queue *queue;
612 struct amdgpu_mes_gang *gang;
613 struct mes_add_queue_input queue_input;
614 unsigned long flags;
615 int r;
616
617 memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
618
619 /* allocate the mes queue buffer */
620 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
621 if (!queue) {
622 DRM_ERROR("Failed to allocate memory for queue\n");
623 return -ENOMEM;
624 }
625
626 /* Allocate the queue mqd */
627 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
628 if (r)
629 goto clean_up_memory;
630
631 /*
632 * Avoid taking any other locks under MES lock to avoid circular
633 * lock dependencies.
634 */
635 amdgpu_mes_lock(&adev->mes);
636
637 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
638 if (!gang) {
639 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
640 r = -EINVAL;
641 goto clean_up_mqd;
642 }
643
644 /* add the mes gang to idr list */
645 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
646 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
647 GFP_ATOMIC);
648 if (r < 0) {
649 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
650 goto clean_up_mqd;
651 }
652 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
653 *queue_id = queue->queue_id = r;
654
655 /* allocate a doorbell index for the queue */
656 r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
657 qprops->queue_type,
658 &qprops->doorbell_off);
659 if (r)
660 goto clean_up_queue_id;
661
662 /* initialize the queue mqd */
663 amdgpu_mes_queue_init_mqd(adev, queue, qprops);
664
665 /* add hw queue to mes */
666 queue_input.process_id = gang->process->pasid;
667
668 queue_input.page_table_base_addr =
669 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
670 adev->gmc.vram_start;
671
672 queue_input.process_va_start = 0;
673 queue_input.process_va_end =
674 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
675 queue_input.process_quantum = gang->process->process_quantum;
676 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
677 queue_input.gang_quantum = gang->gang_quantum;
678 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
679 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
680 queue_input.gang_global_priority_level = gang->global_priority_level;
681 queue_input.doorbell_offset = qprops->doorbell_off;
682 queue_input.mqd_addr = queue->mqd_gpu_addr;
683 queue_input.wptr_addr = qprops->wptr_gpu_addr;
684 queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
685 queue_input.queue_type = qprops->queue_type;
686 queue_input.paging = qprops->paging;
687 queue_input.is_kfd_process = 0;
688
689 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
690 if (r) {
691 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
692 qprops->doorbell_off);
693 goto clean_up_doorbell;
694 }
695
696 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
697 "queue type=%d, doorbell=0x%llx\n",
698 gang->process->pasid, gang_id, qprops->queue_type,
699 qprops->doorbell_off);
700
701 queue->ring = qprops->ring;
702 queue->doorbell_off = qprops->doorbell_off;
703 queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
704 queue->queue_type = qprops->queue_type;
705 queue->paging = qprops->paging;
706 queue->gang = gang;
707 queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
708 list_add_tail(&queue->list, &gang->queue_list);
709
710 amdgpu_mes_unlock(&adev->mes);
711 return 0;
712
713clean_up_doorbell:
714 amdgpu_mes_kernel_doorbell_free(adev, gang->process,
715 qprops->doorbell_off);
716clean_up_queue_id:
717 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
718 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
719 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
720clean_up_mqd:
721 amdgpu_mes_unlock(&adev->mes);
722 amdgpu_mes_queue_free_mqd(queue);
723clean_up_memory:
724 kfree(queue);
725 return r;
726}
727
728int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
729{
730 unsigned long flags;
731 struct amdgpu_mes_queue *queue;
732 struct amdgpu_mes_gang *gang;
733 struct mes_remove_queue_input queue_input;
734 int r;
735
736 /*
737 * Avoid taking any other locks under MES lock to avoid circular
738 * lock dependencies.
739 */
740 amdgpu_mes_lock(&adev->mes);
741
742 /* remove the mes gang from idr list */
743 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
744
745 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
746 if (!queue) {
747 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
748 amdgpu_mes_unlock(&adev->mes);
749 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
750 return -EINVAL;
751 }
752
753 idr_remove(&adev->mes.queue_id_idr, queue_id);
754 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
755
756 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
757 queue->doorbell_off);
758
759 gang = queue->gang;
760 queue_input.doorbell_offset = queue->doorbell_off;
761 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
762
763 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
764 if (r)
765 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
766 queue_id);
767
768 list_del(&queue->list);
769 amdgpu_mes_kernel_doorbell_free(adev, gang->process,
770 queue->doorbell_off);
771 amdgpu_mes_unlock(&adev->mes);
772
773 amdgpu_mes_queue_free_mqd(queue);
774 kfree(queue);
775 return 0;
776}
777
778int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
779 struct amdgpu_ring *ring,
780 enum amdgpu_unmap_queues_action action,
781 u64 gpu_addr, u64 seq)
782{
783 struct mes_unmap_legacy_queue_input queue_input;
784 int r;
785
786 queue_input.action = action;
787 queue_input.queue_type = ring->funcs->type;
788 queue_input.doorbell_offset = ring->doorbell_index;
789 queue_input.pipe_id = ring->pipe;
790 queue_input.queue_id = ring->queue;
791 queue_input.trail_fence_addr = gpu_addr;
792 queue_input.trail_fence_data = seq;
793
794 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
795 if (r)
796 DRM_ERROR("failed to unmap legacy queue\n");
797
798 return r;
799}
800
801uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
802{
803 struct mes_misc_op_input op_input;
804 int r, val = 0;
805
806 op_input.op = MES_MISC_OP_READ_REG;
807 op_input.read_reg.reg_offset = reg;
808 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
809
810 if (!adev->mes.funcs->misc_op) {
811 DRM_ERROR("mes rreg is not supported!\n");
812 goto error;
813 }
814
815 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
816 if (r)
817 DRM_ERROR("failed to read reg (0x%x)\n", reg);
818 else
819 val = *(adev->mes.read_val_ptr);
820
821error:
822 return val;
823}
824
825int amdgpu_mes_wreg(struct amdgpu_device *adev,
826 uint32_t reg, uint32_t val)
827{
828 struct mes_misc_op_input op_input;
829 int r;
830
831 op_input.op = MES_MISC_OP_WRITE_REG;
832 op_input.write_reg.reg_offset = reg;
833 op_input.write_reg.reg_value = val;
834
835 if (!adev->mes.funcs->misc_op) {
836 DRM_ERROR("mes wreg is not supported!\n");
837 r = -EINVAL;
838 goto error;
839 }
840
841 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
842 if (r)
843 DRM_ERROR("failed to write reg (0x%x)\n", reg);
844
845error:
846 return r;
847}
848
849int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
850 uint32_t reg0, uint32_t reg1,
851 uint32_t ref, uint32_t mask)
852{
853 struct mes_misc_op_input op_input;
854 int r;
855
856 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
857 op_input.wrm_reg.reg0 = reg0;
858 op_input.wrm_reg.reg1 = reg1;
859 op_input.wrm_reg.ref = ref;
860 op_input.wrm_reg.mask = mask;
861
862 if (!adev->mes.funcs->misc_op) {
863 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
864 r = -EINVAL;
865 goto error;
866 }
867
868 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
869 if (r)
870 DRM_ERROR("failed to reg_write_reg_wait\n");
871
872error:
873 return r;
874}
875
876int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
877 uint32_t val, uint32_t mask)
878{
879 struct mes_misc_op_input op_input;
880 int r;
881
882 op_input.op = MES_MISC_OP_WRM_REG_WAIT;
883 op_input.wrm_reg.reg0 = reg;
884 op_input.wrm_reg.ref = val;
885 op_input.wrm_reg.mask = mask;
886
887 if (!adev->mes.funcs->misc_op) {
888 DRM_ERROR("mes reg wait is not supported!\n");
889 r = -EINVAL;
890 goto error;
891 }
892
893 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
894 if (r)
895 DRM_ERROR("failed to reg_write_reg_wait\n");
896
897error:
898 return r;
899}
900
901int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
902 uint64_t process_context_addr,
903 uint32_t spi_gdbg_per_vmid_cntl,
904 const uint32_t *tcp_watch_cntl,
905 uint32_t flags,
906 bool trap_en)
907{
908 struct mes_misc_op_input op_input = {0};
909 int r;
910
911 if (!adev->mes.funcs->misc_op) {
912 DRM_ERROR("mes set shader debugger is not supported!\n");
913 return -EINVAL;
914 }
915
916 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
917 op_input.set_shader_debugger.process_context_addr = process_context_addr;
918 op_input.set_shader_debugger.flags.u32all = flags;
919
920 /* use amdgpu mes_flush_shader_debugger instead */
921 if (op_input.set_shader_debugger.flags.process_ctx_flush)
922 return -EINVAL;
923
924 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
925 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
926 sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
927
928 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
929 AMDGPU_MES_API_VERSION_SHIFT) >= 14)
930 op_input.set_shader_debugger.trap_en = trap_en;
931
932 amdgpu_mes_lock(&adev->mes);
933
934 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
935 if (r)
936 DRM_ERROR("failed to set_shader_debugger\n");
937
938 amdgpu_mes_unlock(&adev->mes);
939
940 return r;
941}
942
943int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
944 uint64_t process_context_addr)
945{
946 struct mes_misc_op_input op_input = {0};
947 int r;
948
949 if (!adev->mes.funcs->misc_op) {
950 DRM_ERROR("mes flush shader debugger is not supported!\n");
951 return -EINVAL;
952 }
953
954 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
955 op_input.set_shader_debugger.process_context_addr = process_context_addr;
956 op_input.set_shader_debugger.flags.process_ctx_flush = true;
957
958 amdgpu_mes_lock(&adev->mes);
959
960 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
961 if (r)
962 DRM_ERROR("failed to set_shader_debugger\n");
963
964 amdgpu_mes_unlock(&adev->mes);
965
966 return r;
967}
968
969static void
970amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
971 struct amdgpu_ring *ring,
972 struct amdgpu_mes_queue_properties *props)
973{
974 props->queue_type = ring->funcs->type;
975 props->hqd_base_gpu_addr = ring->gpu_addr;
976 props->rptr_gpu_addr = ring->rptr_gpu_addr;
977 props->wptr_gpu_addr = ring->wptr_gpu_addr;
978 props->wptr_mc_addr =
979 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
980 props->queue_size = ring->ring_size;
981 props->eop_gpu_addr = ring->eop_gpu_addr;
982 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
983 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
984 props->paging = false;
985 props->ring = ring;
986}
987
988#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
989do { \
990 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
991 return offsetof(struct amdgpu_mes_ctx_meta_data, \
992 _eng[ring->idx].slots[id_offs]); \
993 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
994 return offsetof(struct amdgpu_mes_ctx_meta_data, \
995 _eng[ring->idx].ring); \
996 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
997 return offsetof(struct amdgpu_mes_ctx_meta_data, \
998 _eng[ring->idx].ib); \
999 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
1000 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1001 _eng[ring->idx].padding); \
1002} while(0)
1003
1004int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1005{
1006 switch (ring->funcs->type) {
1007 case AMDGPU_RING_TYPE_GFX:
1008 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1009 break;
1010 case AMDGPU_RING_TYPE_COMPUTE:
1011 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1012 break;
1013 case AMDGPU_RING_TYPE_SDMA:
1014 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1015 break;
1016 default:
1017 break;
1018 }
1019
1020 WARN_ON(1);
1021 return -EINVAL;
1022}
1023
1024int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1025 int queue_type, int idx,
1026 struct amdgpu_mes_ctx_data *ctx_data,
1027 struct amdgpu_ring **out)
1028{
1029 struct amdgpu_ring *ring;
1030 struct amdgpu_mes_gang *gang;
1031 struct amdgpu_mes_queue_properties qprops = {0};
1032 int r, queue_id, pasid;
1033
1034 /*
1035 * Avoid taking any other locks under MES lock to avoid circular
1036 * lock dependencies.
1037 */
1038 amdgpu_mes_lock(&adev->mes);
1039 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1040 if (!gang) {
1041 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1042 amdgpu_mes_unlock(&adev->mes);
1043 return -EINVAL;
1044 }
1045 pasid = gang->process->pasid;
1046
1047 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1048 if (!ring) {
1049 amdgpu_mes_unlock(&adev->mes);
1050 return -ENOMEM;
1051 }
1052
1053 ring->ring_obj = NULL;
1054 ring->use_doorbell = true;
1055 ring->is_mes_queue = true;
1056 ring->mes_ctx = ctx_data;
1057 ring->idx = idx;
1058 ring->no_scheduler = true;
1059
1060 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1061 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1062 compute[ring->idx].mec_hpd);
1063 ring->eop_gpu_addr =
1064 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1065 }
1066
1067 switch (queue_type) {
1068 case AMDGPU_RING_TYPE_GFX:
1069 ring->funcs = adev->gfx.gfx_ring[0].funcs;
1070 ring->me = adev->gfx.gfx_ring[0].me;
1071 ring->pipe = adev->gfx.gfx_ring[0].pipe;
1072 break;
1073 case AMDGPU_RING_TYPE_COMPUTE:
1074 ring->funcs = adev->gfx.compute_ring[0].funcs;
1075 ring->me = adev->gfx.compute_ring[0].me;
1076 ring->pipe = adev->gfx.compute_ring[0].pipe;
1077 break;
1078 case AMDGPU_RING_TYPE_SDMA:
1079 ring->funcs = adev->sdma.instance[0].ring.funcs;
1080 break;
1081 default:
1082 BUG();
1083 }
1084
1085 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1086 AMDGPU_RING_PRIO_DEFAULT, NULL);
1087 if (r)
1088 goto clean_up_memory;
1089
1090 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1091
1092 dma_fence_wait(gang->process->vm->last_update, false);
1093 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1094 amdgpu_mes_unlock(&adev->mes);
1095
1096 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1097 if (r)
1098 goto clean_up_ring;
1099
1100 ring->hw_queue_id = queue_id;
1101 ring->doorbell_index = qprops.doorbell_off;
1102
1103 if (queue_type == AMDGPU_RING_TYPE_GFX)
1104 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1105 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1106 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1107 queue_id);
1108 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1109 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1110 queue_id);
1111 else
1112 BUG();
1113
1114 *out = ring;
1115 return 0;
1116
1117clean_up_ring:
1118 amdgpu_ring_fini(ring);
1119clean_up_memory:
1120 kfree(ring);
1121 amdgpu_mes_unlock(&adev->mes);
1122 return r;
1123}
1124
1125void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1126 struct amdgpu_ring *ring)
1127{
1128 if (!ring)
1129 return;
1130
1131 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1132 amdgpu_ring_fini(ring);
1133 kfree(ring);
1134}
1135
1136uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1137 enum amdgpu_mes_priority_level prio)
1138{
1139 return adev->mes.aggregated_doorbells[prio];
1140}
1141
1142int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1143 struct amdgpu_mes_ctx_data *ctx_data)
1144{
1145 int r;
1146
1147 r = amdgpu_bo_create_kernel(adev,
1148 sizeof(struct amdgpu_mes_ctx_meta_data),
1149 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1150 &ctx_data->meta_data_obj,
1151 &ctx_data->meta_data_mc_addr,
1152 &ctx_data->meta_data_ptr);
1153 if (r) {
1154 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1155 return r;
1156 }
1157
1158 if (!ctx_data->meta_data_obj)
1159 return -ENOMEM;
1160
1161 memset(ctx_data->meta_data_ptr, 0,
1162 sizeof(struct amdgpu_mes_ctx_meta_data));
1163
1164 return 0;
1165}
1166
1167void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1168{
1169 if (ctx_data->meta_data_obj)
1170 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1171 &ctx_data->meta_data_mc_addr,
1172 &ctx_data->meta_data_ptr);
1173}
1174
1175int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1176 struct amdgpu_vm *vm,
1177 struct amdgpu_mes_ctx_data *ctx_data)
1178{
1179 struct amdgpu_bo_va *bo_va;
1180 struct amdgpu_sync sync;
1181 struct drm_exec exec;
1182 int r;
1183
1184 amdgpu_sync_create(&sync);
1185
1186 drm_exec_init(&exec, 0, 0);
1187 drm_exec_until_all_locked(&exec) {
1188 r = drm_exec_lock_obj(&exec,
1189 &ctx_data->meta_data_obj->tbo.base);
1190 drm_exec_retry_on_contention(&exec);
1191 if (unlikely(r))
1192 goto error_fini_exec;
1193
1194 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1195 drm_exec_retry_on_contention(&exec);
1196 if (unlikely(r))
1197 goto error_fini_exec;
1198 }
1199
1200 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1201 if (!bo_va) {
1202 DRM_ERROR("failed to create bo_va for meta data BO\n");
1203 r = -ENOMEM;
1204 goto error_fini_exec;
1205 }
1206
1207 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1208 sizeof(struct amdgpu_mes_ctx_meta_data),
1209 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1210 AMDGPU_PTE_EXECUTABLE);
1211
1212 if (r) {
1213 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1214 goto error_del_bo_va;
1215 }
1216
1217 r = amdgpu_vm_bo_update(adev, bo_va, false);
1218 if (r) {
1219 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1220 goto error_del_bo_va;
1221 }
1222 amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1223
1224 r = amdgpu_vm_update_pdes(adev, vm, false);
1225 if (r) {
1226 DRM_ERROR("failed to update pdes on meta data\n");
1227 goto error_del_bo_va;
1228 }
1229 amdgpu_sync_fence(&sync, vm->last_update);
1230
1231 amdgpu_sync_wait(&sync, false);
1232 drm_exec_fini(&exec);
1233
1234 amdgpu_sync_free(&sync);
1235 ctx_data->meta_data_va = bo_va;
1236 return 0;
1237
1238error_del_bo_va:
1239 amdgpu_vm_bo_del(adev, bo_va);
1240
1241error_fini_exec:
1242 drm_exec_fini(&exec);
1243 amdgpu_sync_free(&sync);
1244 return r;
1245}
1246
1247int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1248 struct amdgpu_mes_ctx_data *ctx_data)
1249{
1250 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1251 struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1252 struct amdgpu_vm *vm = bo_va->base.vm;
1253 struct dma_fence *fence;
1254 struct drm_exec exec;
1255 long r;
1256
1257 drm_exec_init(&exec, 0, 0);
1258 drm_exec_until_all_locked(&exec) {
1259 r = drm_exec_lock_obj(&exec,
1260 &ctx_data->meta_data_obj->tbo.base);
1261 drm_exec_retry_on_contention(&exec);
1262 if (unlikely(r))
1263 goto out_unlock;
1264
1265 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1266 drm_exec_retry_on_contention(&exec);
1267 if (unlikely(r))
1268 goto out_unlock;
1269 }
1270
1271 amdgpu_vm_bo_del(adev, bo_va);
1272 if (!amdgpu_vm_ready(vm))
1273 goto out_unlock;
1274
1275 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1276 &fence);
1277 if (r)
1278 goto out_unlock;
1279 if (fence) {
1280 amdgpu_bo_fence(bo, fence, true);
1281 fence = NULL;
1282 }
1283
1284 r = amdgpu_vm_clear_freed(adev, vm, &fence);
1285 if (r || !fence)
1286 goto out_unlock;
1287
1288 dma_fence_wait(fence, false);
1289 amdgpu_bo_fence(bo, fence, true);
1290 dma_fence_put(fence);
1291
1292out_unlock:
1293 if (unlikely(r < 0))
1294 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1295 drm_exec_fini(&exec);
1296
1297 return r;
1298}
1299
1300static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1301 int pasid, int *gang_id,
1302 int queue_type, int num_queue,
1303 struct amdgpu_ring **added_rings,
1304 struct amdgpu_mes_ctx_data *ctx_data)
1305{
1306 struct amdgpu_ring *ring;
1307 struct amdgpu_mes_gang_properties gprops = {0};
1308 int r, j;
1309
1310 /* create a gang for the process */
1311 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1312 gprops.gang_quantum = adev->mes.default_gang_quantum;
1313 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1314 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1315 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1316
1317 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1318 if (r) {
1319 DRM_ERROR("failed to add gang\n");
1320 return r;
1321 }
1322
1323 /* create queues for the gang */
1324 for (j = 0; j < num_queue; j++) {
1325 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1326 ctx_data, &ring);
1327 if (r) {
1328 DRM_ERROR("failed to add ring\n");
1329 break;
1330 }
1331
1332 DRM_INFO("ring %s was added\n", ring->name);
1333 added_rings[j] = ring;
1334 }
1335
1336 return 0;
1337}
1338
1339static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1340{
1341 struct amdgpu_ring *ring;
1342 int i, r;
1343
1344 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1345 ring = added_rings[i];
1346 if (!ring)
1347 continue;
1348
1349 r = amdgpu_ring_test_helper(ring);
1350 if (r)
1351 return r;
1352
1353 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1354 if (r) {
1355 DRM_DEV_ERROR(ring->adev->dev,
1356 "ring %s ib test failed (%d)\n",
1357 ring->name, r);
1358 return r;
1359 } else
1360 DRM_INFO("ring %s ib test pass\n", ring->name);
1361 }
1362
1363 return 0;
1364}
1365
1366int amdgpu_mes_self_test(struct amdgpu_device *adev)
1367{
1368 struct amdgpu_vm *vm = NULL;
1369 struct amdgpu_mes_ctx_data ctx_data = {0};
1370 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1371 int gang_ids[3] = {0};
1372 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1373 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1374 { AMDGPU_RING_TYPE_SDMA, 1} };
1375 int i, r, pasid, k = 0;
1376
1377 pasid = amdgpu_pasid_alloc(16);
1378 if (pasid < 0) {
1379 dev_warn(adev->dev, "No more PASIDs available!");
1380 pasid = 0;
1381 }
1382
1383 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1384 if (!vm) {
1385 r = -ENOMEM;
1386 goto error_pasid;
1387 }
1388
1389 r = amdgpu_vm_init(adev, vm, -1);
1390 if (r) {
1391 DRM_ERROR("failed to initialize vm\n");
1392 goto error_pasid;
1393 }
1394
1395 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1396 if (r) {
1397 DRM_ERROR("failed to alloc ctx meta data\n");
1398 goto error_fini;
1399 }
1400
1401 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1402 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1403 if (r) {
1404 DRM_ERROR("failed to map ctx meta data\n");
1405 goto error_vm;
1406 }
1407
1408 r = amdgpu_mes_create_process(adev, pasid, vm);
1409 if (r) {
1410 DRM_ERROR("failed to create MES process\n");
1411 goto error_vm;
1412 }
1413
1414 for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1415 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1416 if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1417 IP_VERSION(10, 3, 0) &&
1418 amdgpu_ip_version(adev, GC_HWIP, 0) <
1419 IP_VERSION(11, 0, 0) &&
1420 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1421 continue;
1422
1423 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1424 &gang_ids[i],
1425 queue_types[i][0],
1426 queue_types[i][1],
1427 &added_rings[k],
1428 &ctx_data);
1429 if (r)
1430 goto error_queues;
1431
1432 k += queue_types[i][1];
1433 }
1434
1435 /* start ring test and ib test for MES queues */
1436 amdgpu_mes_test_queues(added_rings);
1437
1438error_queues:
1439 /* remove all queues */
1440 for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1441 if (!added_rings[i])
1442 continue;
1443 amdgpu_mes_remove_ring(adev, added_rings[i]);
1444 }
1445
1446 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1447 if (!gang_ids[i])
1448 continue;
1449 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1450 }
1451
1452 amdgpu_mes_destroy_process(adev, pasid);
1453
1454error_vm:
1455 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1456
1457error_fini:
1458 amdgpu_vm_fini(adev, vm);
1459
1460error_pasid:
1461 if (pasid)
1462 amdgpu_pasid_free(pasid);
1463
1464 amdgpu_mes_ctx_free_meta_data(&ctx_data);
1465 kfree(vm);
1466 return 0;
1467}
1468
1469int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1470{
1471 const struct mes_firmware_header_v1_0 *mes_hdr;
1472 struct amdgpu_firmware_info *info;
1473 char ucode_prefix[30];
1474 char fw_name[40];
1475 bool need_retry = false;
1476 int r;
1477
1478 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1479 sizeof(ucode_prefix));
1480 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1481 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1482 ucode_prefix,
1483 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1484 need_retry = true;
1485 } else {
1486 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1487 ucode_prefix,
1488 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1489 }
1490
1491 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1492 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1493 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1494 ucode_prefix);
1495 DRM_INFO("try to fall back to %s\n", fw_name);
1496 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1497 fw_name);
1498 }
1499
1500 if (r)
1501 goto out;
1502
1503 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1504 adev->mes.fw[pipe]->data;
1505 adev->mes.uc_start_addr[pipe] =
1506 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1507 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1508 adev->mes.data_start_addr[pipe] =
1509 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1510 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1511
1512 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1513 int ucode, ucode_data;
1514
1515 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1516 ucode = AMDGPU_UCODE_ID_CP_MES;
1517 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1518 } else {
1519 ucode = AMDGPU_UCODE_ID_CP_MES1;
1520 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1521 }
1522
1523 info = &adev->firmware.ucode[ucode];
1524 info->ucode_id = ucode;
1525 info->fw = adev->mes.fw[pipe];
1526 adev->firmware.fw_size +=
1527 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1528 PAGE_SIZE);
1529
1530 info = &adev->firmware.ucode[ucode_data];
1531 info->ucode_id = ucode_data;
1532 info->fw = adev->mes.fw[pipe];
1533 adev->firmware.fw_size +=
1534 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1535 PAGE_SIZE);
1536 }
1537
1538 return 0;
1539out:
1540 amdgpu_ucode_release(&adev->mes.fw[pipe]);
1541 return r;
1542}
1543
1544#if defined(CONFIG_DEBUG_FS)
1545
1546static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1547{
1548 struct amdgpu_device *adev = m->private;
1549 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1550
1551 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1552 mem, PAGE_SIZE, false);
1553
1554 return 0;
1555}
1556
1557
1558DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1559
1560#endif
1561
1562void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1563{
1564
1565#if defined(CONFIG_DEBUG_FS)
1566 struct drm_minor *minor = adev_to_drm(adev)->primary;
1567 struct dentry *root = minor->debugfs_root;
1568
1569 debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1570 adev, &amdgpu_debugfs_mes_event_log_fops);
1571
1572#endif
1573}