Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu_mes.h"
25#include "amdgpu.h"
26#include "soc15_common.h"
27#include "amdgpu_mes_ctx.h"
28
29#define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
30#define AMDGPU_ONE_DOORBELL_SIZE 8
31
32int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
33{
34 return roundup(AMDGPU_ONE_DOORBELL_SIZE *
35 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
36 PAGE_SIZE);
37}
38
39int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev,
40 unsigned int *doorbell_index)
41{
42 int r = ida_simple_get(&adev->mes.doorbell_ida, 2,
43 adev->mes.max_doorbell_slices,
44 GFP_KERNEL);
45 if (r > 0)
46 *doorbell_index = r;
47
48 return r;
49}
50
51void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev,
52 unsigned int doorbell_index)
53{
54 if (doorbell_index)
55 ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index);
56}
57
58unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar(
59 struct amdgpu_device *adev,
60 uint32_t doorbell_index,
61 unsigned int doorbell_id)
62{
63 return ((doorbell_index *
64 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) +
65 doorbell_id * 2);
66}
67
68static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev,
69 struct amdgpu_mes_process *process,
70 int ip_type, uint64_t *doorbell_index)
71{
72 unsigned int offset, found;
73
74 if (ip_type == AMDGPU_RING_TYPE_SDMA) {
75 offset = adev->doorbell_index.sdma_engine[0];
76 found = find_next_zero_bit(process->doorbell_bitmap,
77 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
78 offset);
79 } else {
80 found = find_first_zero_bit(process->doorbell_bitmap,
81 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS);
82 }
83
84 if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) {
85 DRM_WARN("No doorbell available\n");
86 return -ENOSPC;
87 }
88
89 set_bit(found, process->doorbell_bitmap);
90
91 *doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev,
92 process->doorbell_index, found);
93
94 return 0;
95}
96
97static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev,
98 struct amdgpu_mes_process *process,
99 uint32_t doorbell_index)
100{
101 unsigned int old, doorbell_id;
102
103 doorbell_id = doorbell_index -
104 (process->doorbell_index *
105 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32);
106 doorbell_id /= 2;
107
108 old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap);
109 WARN_ON(!old);
110}
111
112static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
113{
114 size_t doorbell_start_offset;
115 size_t doorbell_aperture_size;
116 size_t doorbell_process_limit;
117 size_t aggregated_doorbell_start;
118 int i;
119
120 aggregated_doorbell_start = (adev->doorbell_index.max_assignment + 1) * sizeof(u32);
121 aggregated_doorbell_start =
122 roundup(aggregated_doorbell_start, PAGE_SIZE);
123
124 doorbell_start_offset = aggregated_doorbell_start + PAGE_SIZE;
125 doorbell_start_offset =
126 roundup(doorbell_start_offset,
127 amdgpu_mes_doorbell_process_slice(adev));
128
129 doorbell_aperture_size = adev->doorbell.size;
130 doorbell_aperture_size =
131 rounddown(doorbell_aperture_size,
132 amdgpu_mes_doorbell_process_slice(adev));
133
134 if (doorbell_aperture_size > doorbell_start_offset)
135 doorbell_process_limit =
136 (doorbell_aperture_size - doorbell_start_offset) /
137 amdgpu_mes_doorbell_process_slice(adev);
138 else
139 return -ENOSPC;
140
141 adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32);
142 adev->mes.max_doorbell_slices = doorbell_process_limit;
143
144 /* allocate Qword range for aggregated doorbell */
145 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++)
146 adev->mes.aggregated_doorbells[i] =
147 aggregated_doorbell_start / sizeof(u32) + i * 2;
148
149 DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit);
150 return 0;
151}
152
153int amdgpu_mes_init(struct amdgpu_device *adev)
154{
155 int i, r;
156
157 adev->mes.adev = adev;
158
159 idr_init(&adev->mes.pasid_idr);
160 idr_init(&adev->mes.gang_id_idr);
161 idr_init(&adev->mes.queue_id_idr);
162 ida_init(&adev->mes.doorbell_ida);
163 spin_lock_init(&adev->mes.queue_id_lock);
164 spin_lock_init(&adev->mes.ring_lock);
165 mutex_init(&adev->mes.mutex_hidden);
166
167 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
168 adev->mes.vmid_mask_mmhub = 0xffffff00;
169 adev->mes.vmid_mask_gfxhub = 0xffffff00;
170
171 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
172 /* use only 1st MEC pipes */
173 if (i >= 4)
174 continue;
175 adev->mes.compute_hqd_mask[i] = 0xc;
176 }
177
178 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
179 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
180
181 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
182 if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
183 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
184 /* zero sdma_hqd_mask for non-existent engine */
185 else if (adev->sdma.num_instances == 1)
186 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
187 else
188 adev->mes.sdma_hqd_mask[i] = 0xfc;
189 }
190
191 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
192 if (r) {
193 dev_err(adev->dev,
194 "(%d) ring trail_fence_offs wb alloc failed\n", r);
195 goto error_ids;
196 }
197 adev->mes.sch_ctx_gpu_addr =
198 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
199 adev->mes.sch_ctx_ptr =
200 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
201
202 r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
203 if (r) {
204 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
205 dev_err(adev->dev,
206 "(%d) query_status_fence_offs wb alloc failed\n", r);
207 goto error_ids;
208 }
209 adev->mes.query_status_fence_gpu_addr =
210 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
211 adev->mes.query_status_fence_ptr =
212 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
213
214 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
215 if (r) {
216 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
217 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
218 dev_err(adev->dev,
219 "(%d) read_val_offs alloc failed\n", r);
220 goto error_ids;
221 }
222 adev->mes.read_val_gpu_addr =
223 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
224 adev->mes.read_val_ptr =
225 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
226
227 r = amdgpu_mes_doorbell_init(adev);
228 if (r)
229 goto error;
230
231 return 0;
232
233error:
234 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
235 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
236 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
237error_ids:
238 idr_destroy(&adev->mes.pasid_idr);
239 idr_destroy(&adev->mes.gang_id_idr);
240 idr_destroy(&adev->mes.queue_id_idr);
241 ida_destroy(&adev->mes.doorbell_ida);
242 mutex_destroy(&adev->mes.mutex_hidden);
243 return r;
244}
245
246void amdgpu_mes_fini(struct amdgpu_device *adev)
247{
248 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
249 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
250 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
251
252 idr_destroy(&adev->mes.pasid_idr);
253 idr_destroy(&adev->mes.gang_id_idr);
254 idr_destroy(&adev->mes.queue_id_idr);
255 ida_destroy(&adev->mes.doorbell_ida);
256 mutex_destroy(&adev->mes.mutex_hidden);
257}
258
259static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
260{
261 amdgpu_bo_free_kernel(&q->mqd_obj,
262 &q->mqd_gpu_addr,
263 &q->mqd_cpu_ptr);
264}
265
266int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
267 struct amdgpu_vm *vm)
268{
269 struct amdgpu_mes_process *process;
270 int r;
271
272 /* allocate the mes process buffer */
273 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
274 if (!process) {
275 DRM_ERROR("no more memory to create mes process\n");
276 return -ENOMEM;
277 }
278
279 process->doorbell_bitmap =
280 kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
281 BITS_PER_BYTE), GFP_KERNEL);
282 if (!process->doorbell_bitmap) {
283 DRM_ERROR("failed to allocate doorbell bitmap\n");
284 kfree(process);
285 return -ENOMEM;
286 }
287
288 /* allocate the process context bo and map it */
289 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
290 AMDGPU_GEM_DOMAIN_GTT,
291 &process->proc_ctx_bo,
292 &process->proc_ctx_gpu_addr,
293 &process->proc_ctx_cpu_ptr);
294 if (r) {
295 DRM_ERROR("failed to allocate process context bo\n");
296 goto clean_up_memory;
297 }
298 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
299
300 /*
301 * Avoid taking any other locks under MES lock to avoid circular
302 * lock dependencies.
303 */
304 amdgpu_mes_lock(&adev->mes);
305
306 /* add the mes process to idr list */
307 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
308 GFP_KERNEL);
309 if (r < 0) {
310 DRM_ERROR("failed to lock pasid=%d\n", pasid);
311 goto clean_up_ctx;
312 }
313
314 /* allocate the starting doorbell index of the process */
315 r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index);
316 if (r < 0) {
317 DRM_ERROR("failed to allocate doorbell for process\n");
318 goto clean_up_pasid;
319 }
320
321 DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index);
322
323 INIT_LIST_HEAD(&process->gang_list);
324 process->vm = vm;
325 process->pasid = pasid;
326 process->process_quantum = adev->mes.default_process_quantum;
327 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
328
329 amdgpu_mes_unlock(&adev->mes);
330 return 0;
331
332clean_up_pasid:
333 idr_remove(&adev->mes.pasid_idr, pasid);
334 amdgpu_mes_unlock(&adev->mes);
335clean_up_ctx:
336 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
337 &process->proc_ctx_gpu_addr,
338 &process->proc_ctx_cpu_ptr);
339clean_up_memory:
340 kfree(process->doorbell_bitmap);
341 kfree(process);
342 return r;
343}
344
345void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
346{
347 struct amdgpu_mes_process *process;
348 struct amdgpu_mes_gang *gang, *tmp1;
349 struct amdgpu_mes_queue *queue, *tmp2;
350 struct mes_remove_queue_input queue_input;
351 unsigned long flags;
352 int r;
353
354 /*
355 * Avoid taking any other locks under MES lock to avoid circular
356 * lock dependencies.
357 */
358 amdgpu_mes_lock(&adev->mes);
359
360 process = idr_find(&adev->mes.pasid_idr, pasid);
361 if (!process) {
362 DRM_WARN("pasid %d doesn't exist\n", pasid);
363 amdgpu_mes_unlock(&adev->mes);
364 return;
365 }
366
367 /* Remove all queues from hardware */
368 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
369 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
370 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
371 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
372 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
373
374 queue_input.doorbell_offset = queue->doorbell_off;
375 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
376
377 r = adev->mes.funcs->remove_hw_queue(&adev->mes,
378 &queue_input);
379 if (r)
380 DRM_WARN("failed to remove hardware queue\n");
381 }
382
383 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
384 }
385
386 amdgpu_mes_free_process_doorbells(adev, process->doorbell_index);
387 idr_remove(&adev->mes.pasid_idr, pasid);
388 amdgpu_mes_unlock(&adev->mes);
389
390 /* free all memory allocated by the process */
391 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
392 /* free all queues in the gang */
393 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
394 amdgpu_mes_queue_free_mqd(queue);
395 list_del(&queue->list);
396 kfree(queue);
397 }
398 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
399 &gang->gang_ctx_gpu_addr,
400 &gang->gang_ctx_cpu_ptr);
401 list_del(&gang->list);
402 kfree(gang);
403
404 }
405 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
406 &process->proc_ctx_gpu_addr,
407 &process->proc_ctx_cpu_ptr);
408 kfree(process->doorbell_bitmap);
409 kfree(process);
410}
411
412int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
413 struct amdgpu_mes_gang_properties *gprops,
414 int *gang_id)
415{
416 struct amdgpu_mes_process *process;
417 struct amdgpu_mes_gang *gang;
418 int r;
419
420 /* allocate the mes gang buffer */
421 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
422 if (!gang) {
423 return -ENOMEM;
424 }
425
426 /* allocate the gang context bo and map it to cpu space */
427 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
428 AMDGPU_GEM_DOMAIN_GTT,
429 &gang->gang_ctx_bo,
430 &gang->gang_ctx_gpu_addr,
431 &gang->gang_ctx_cpu_ptr);
432 if (r) {
433 DRM_ERROR("failed to allocate process context bo\n");
434 goto clean_up_mem;
435 }
436 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
437
438 /*
439 * Avoid taking any other locks under MES lock to avoid circular
440 * lock dependencies.
441 */
442 amdgpu_mes_lock(&adev->mes);
443
444 process = idr_find(&adev->mes.pasid_idr, pasid);
445 if (!process) {
446 DRM_ERROR("pasid %d doesn't exist\n", pasid);
447 r = -EINVAL;
448 goto clean_up_ctx;
449 }
450
451 /* add the mes gang to idr list */
452 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
453 GFP_KERNEL);
454 if (r < 0) {
455 DRM_ERROR("failed to allocate idr for gang\n");
456 goto clean_up_ctx;
457 }
458
459 gang->gang_id = r;
460 *gang_id = r;
461
462 INIT_LIST_HEAD(&gang->queue_list);
463 gang->process = process;
464 gang->priority = gprops->priority;
465 gang->gang_quantum = gprops->gang_quantum ?
466 gprops->gang_quantum : adev->mes.default_gang_quantum;
467 gang->global_priority_level = gprops->global_priority_level;
468 gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
469 list_add_tail(&gang->list, &process->gang_list);
470
471 amdgpu_mes_unlock(&adev->mes);
472 return 0;
473
474clean_up_ctx:
475 amdgpu_mes_unlock(&adev->mes);
476 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
477 &gang->gang_ctx_gpu_addr,
478 &gang->gang_ctx_cpu_ptr);
479clean_up_mem:
480 kfree(gang);
481 return r;
482}
483
484int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
485{
486 struct amdgpu_mes_gang *gang;
487
488 /*
489 * Avoid taking any other locks under MES lock to avoid circular
490 * lock dependencies.
491 */
492 amdgpu_mes_lock(&adev->mes);
493
494 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
495 if (!gang) {
496 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
497 amdgpu_mes_unlock(&adev->mes);
498 return -EINVAL;
499 }
500
501 if (!list_empty(&gang->queue_list)) {
502 DRM_ERROR("queue list is not empty\n");
503 amdgpu_mes_unlock(&adev->mes);
504 return -EBUSY;
505 }
506
507 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
508 list_del(&gang->list);
509 amdgpu_mes_unlock(&adev->mes);
510
511 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
512 &gang->gang_ctx_gpu_addr,
513 &gang->gang_ctx_cpu_ptr);
514
515 kfree(gang);
516
517 return 0;
518}
519
520int amdgpu_mes_suspend(struct amdgpu_device *adev)
521{
522 struct idr *idp;
523 struct amdgpu_mes_process *process;
524 struct amdgpu_mes_gang *gang;
525 struct mes_suspend_gang_input input;
526 int r, pasid;
527
528 /*
529 * Avoid taking any other locks under MES lock to avoid circular
530 * lock dependencies.
531 */
532 amdgpu_mes_lock(&adev->mes);
533
534 idp = &adev->mes.pasid_idr;
535
536 idr_for_each_entry(idp, process, pasid) {
537 list_for_each_entry(gang, &process->gang_list, list) {
538 r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
539 if (r)
540 DRM_ERROR("failed to suspend pasid %d gangid %d",
541 pasid, gang->gang_id);
542 }
543 }
544
545 amdgpu_mes_unlock(&adev->mes);
546 return 0;
547}
548
549int amdgpu_mes_resume(struct amdgpu_device *adev)
550{
551 struct idr *idp;
552 struct amdgpu_mes_process *process;
553 struct amdgpu_mes_gang *gang;
554 struct mes_resume_gang_input input;
555 int r, pasid;
556
557 /*
558 * Avoid taking any other locks under MES lock to avoid circular
559 * lock dependencies.
560 */
561 amdgpu_mes_lock(&adev->mes);
562
563 idp = &adev->mes.pasid_idr;
564
565 idr_for_each_entry(idp, process, pasid) {
566 list_for_each_entry(gang, &process->gang_list, list) {
567 r = adev->mes.funcs->resume_gang(&adev->mes, &input);
568 if (r)
569 DRM_ERROR("failed to resume pasid %d gangid %d",
570 pasid, gang->gang_id);
571 }
572 }
573
574 amdgpu_mes_unlock(&adev->mes);
575 return 0;
576}
577
578static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
579 struct amdgpu_mes_queue *q,
580 struct amdgpu_mes_queue_properties *p)
581{
582 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
583 u32 mqd_size = mqd_mgr->mqd_size;
584 int r;
585
586 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
587 AMDGPU_GEM_DOMAIN_GTT,
588 &q->mqd_obj,
589 &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
590 if (r) {
591 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
592 return r;
593 }
594 memset(q->mqd_cpu_ptr, 0, mqd_size);
595
596 r = amdgpu_bo_reserve(q->mqd_obj, false);
597 if (unlikely(r != 0))
598 goto clean_up;
599
600 return 0;
601
602clean_up:
603 amdgpu_bo_free_kernel(&q->mqd_obj,
604 &q->mqd_gpu_addr,
605 &q->mqd_cpu_ptr);
606 return r;
607}
608
609static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
610 struct amdgpu_mes_queue *q,
611 struct amdgpu_mes_queue_properties *p)
612{
613 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
614 struct amdgpu_mqd_prop mqd_prop = {0};
615
616 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
617 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
618 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
619 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
620 mqd_prop.queue_size = p->queue_size;
621 mqd_prop.use_doorbell = true;
622 mqd_prop.doorbell_index = p->doorbell_off;
623 mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
624 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
625 mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
626 mqd_prop.hqd_active = false;
627
628 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
629
630 amdgpu_bo_unreserve(q->mqd_obj);
631}
632
633int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
634 struct amdgpu_mes_queue_properties *qprops,
635 int *queue_id)
636{
637 struct amdgpu_mes_queue *queue;
638 struct amdgpu_mes_gang *gang;
639 struct mes_add_queue_input queue_input;
640 unsigned long flags;
641 int r;
642
643 /* allocate the mes queue buffer */
644 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
645 if (!queue) {
646 DRM_ERROR("Failed to allocate memory for queue\n");
647 return -ENOMEM;
648 }
649
650 /* Allocate the queue mqd */
651 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
652 if (r)
653 goto clean_up_memory;
654
655 /*
656 * Avoid taking any other locks under MES lock to avoid circular
657 * lock dependencies.
658 */
659 amdgpu_mes_lock(&adev->mes);
660
661 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
662 if (!gang) {
663 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
664 r = -EINVAL;
665 goto clean_up_mqd;
666 }
667
668 /* add the mes gang to idr list */
669 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
670 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
671 GFP_ATOMIC);
672 if (r < 0) {
673 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
674 goto clean_up_mqd;
675 }
676 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
677 *queue_id = queue->queue_id = r;
678
679 /* allocate a doorbell index for the queue */
680 r = amdgpu_mes_queue_doorbell_get(adev, gang->process,
681 qprops->queue_type,
682 &qprops->doorbell_off);
683 if (r)
684 goto clean_up_queue_id;
685
686 /* initialize the queue mqd */
687 amdgpu_mes_queue_init_mqd(adev, queue, qprops);
688
689 /* add hw queue to mes */
690 queue_input.process_id = gang->process->pasid;
691
692 queue_input.page_table_base_addr =
693 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
694 adev->gmc.vram_start;
695
696 queue_input.process_va_start = 0;
697 queue_input.process_va_end =
698 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
699 queue_input.process_quantum = gang->process->process_quantum;
700 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
701 queue_input.gang_quantum = gang->gang_quantum;
702 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
703 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
704 queue_input.gang_global_priority_level = gang->global_priority_level;
705 queue_input.doorbell_offset = qprops->doorbell_off;
706 queue_input.mqd_addr = queue->mqd_gpu_addr;
707 queue_input.wptr_addr = qprops->wptr_gpu_addr;
708 queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
709 queue_input.queue_type = qprops->queue_type;
710 queue_input.paging = qprops->paging;
711 queue_input.is_kfd_process = 0;
712
713 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
714 if (r) {
715 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
716 qprops->doorbell_off);
717 goto clean_up_doorbell;
718 }
719
720 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
721 "queue type=%d, doorbell=0x%llx\n",
722 gang->process->pasid, gang_id, qprops->queue_type,
723 qprops->doorbell_off);
724
725 queue->ring = qprops->ring;
726 queue->doorbell_off = qprops->doorbell_off;
727 queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
728 queue->queue_type = qprops->queue_type;
729 queue->paging = qprops->paging;
730 queue->gang = gang;
731 queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
732 list_add_tail(&queue->list, &gang->queue_list);
733
734 amdgpu_mes_unlock(&adev->mes);
735 return 0;
736
737clean_up_doorbell:
738 amdgpu_mes_queue_doorbell_free(adev, gang->process,
739 qprops->doorbell_off);
740clean_up_queue_id:
741 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
742 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
743 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
744clean_up_mqd:
745 amdgpu_mes_unlock(&adev->mes);
746 amdgpu_mes_queue_free_mqd(queue);
747clean_up_memory:
748 kfree(queue);
749 return r;
750}
751
752int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
753{
754 unsigned long flags;
755 struct amdgpu_mes_queue *queue;
756 struct amdgpu_mes_gang *gang;
757 struct mes_remove_queue_input queue_input;
758 int r;
759
760 /*
761 * Avoid taking any other locks under MES lock to avoid circular
762 * lock dependencies.
763 */
764 amdgpu_mes_lock(&adev->mes);
765
766 /* remove the mes gang from idr list */
767 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
768
769 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
770 if (!queue) {
771 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
772 amdgpu_mes_unlock(&adev->mes);
773 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
774 return -EINVAL;
775 }
776
777 idr_remove(&adev->mes.queue_id_idr, queue_id);
778 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
779
780 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
781 queue->doorbell_off);
782
783 gang = queue->gang;
784 queue_input.doorbell_offset = queue->doorbell_off;
785 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
786
787 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
788 if (r)
789 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
790 queue_id);
791
792 list_del(&queue->list);
793 amdgpu_mes_queue_doorbell_free(adev, gang->process,
794 queue->doorbell_off);
795 amdgpu_mes_unlock(&adev->mes);
796
797 amdgpu_mes_queue_free_mqd(queue);
798 kfree(queue);
799 return 0;
800}
801
802int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
803 struct amdgpu_ring *ring,
804 enum amdgpu_unmap_queues_action action,
805 u64 gpu_addr, u64 seq)
806{
807 struct mes_unmap_legacy_queue_input queue_input;
808 int r;
809
810 queue_input.action = action;
811 queue_input.queue_type = ring->funcs->type;
812 queue_input.doorbell_offset = ring->doorbell_index;
813 queue_input.pipe_id = ring->pipe;
814 queue_input.queue_id = ring->queue;
815 queue_input.trail_fence_addr = gpu_addr;
816 queue_input.trail_fence_data = seq;
817
818 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
819 if (r)
820 DRM_ERROR("failed to unmap legacy queue\n");
821
822 return r;
823}
824
825uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
826{
827 struct mes_misc_op_input op_input;
828 int r, val = 0;
829
830 op_input.op = MES_MISC_OP_READ_REG;
831 op_input.read_reg.reg_offset = reg;
832 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
833
834 if (!adev->mes.funcs->misc_op) {
835 DRM_ERROR("mes rreg is not supported!\n");
836 goto error;
837 }
838
839 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
840 if (r)
841 DRM_ERROR("failed to read reg (0x%x)\n", reg);
842 else
843 val = *(adev->mes.read_val_ptr);
844
845error:
846 return val;
847}
848
849int amdgpu_mes_wreg(struct amdgpu_device *adev,
850 uint32_t reg, uint32_t val)
851{
852 struct mes_misc_op_input op_input;
853 int r;
854
855 op_input.op = MES_MISC_OP_WRITE_REG;
856 op_input.write_reg.reg_offset = reg;
857 op_input.write_reg.reg_value = val;
858
859 if (!adev->mes.funcs->misc_op) {
860 DRM_ERROR("mes wreg is not supported!\n");
861 r = -EINVAL;
862 goto error;
863 }
864
865 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
866 if (r)
867 DRM_ERROR("failed to write reg (0x%x)\n", reg);
868
869error:
870 return r;
871}
872
873int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
874 uint32_t reg0, uint32_t reg1,
875 uint32_t ref, uint32_t mask)
876{
877 struct mes_misc_op_input op_input;
878 int r;
879
880 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
881 op_input.wrm_reg.reg0 = reg0;
882 op_input.wrm_reg.reg1 = reg1;
883 op_input.wrm_reg.ref = ref;
884 op_input.wrm_reg.mask = mask;
885
886 if (!adev->mes.funcs->misc_op) {
887 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
888 r = -EINVAL;
889 goto error;
890 }
891
892 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
893 if (r)
894 DRM_ERROR("failed to reg_write_reg_wait\n");
895
896error:
897 return r;
898}
899
900int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
901 uint32_t val, uint32_t mask)
902{
903 struct mes_misc_op_input op_input;
904 int r;
905
906 op_input.op = MES_MISC_OP_WRM_REG_WAIT;
907 op_input.wrm_reg.reg0 = reg;
908 op_input.wrm_reg.ref = val;
909 op_input.wrm_reg.mask = mask;
910
911 if (!adev->mes.funcs->misc_op) {
912 DRM_ERROR("mes reg wait is not supported!\n");
913 r = -EINVAL;
914 goto error;
915 }
916
917 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
918 if (r)
919 DRM_ERROR("failed to reg_write_reg_wait\n");
920
921error:
922 return r;
923}
924
925static void
926amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
927 struct amdgpu_ring *ring,
928 struct amdgpu_mes_queue_properties *props)
929{
930 props->queue_type = ring->funcs->type;
931 props->hqd_base_gpu_addr = ring->gpu_addr;
932 props->rptr_gpu_addr = ring->rptr_gpu_addr;
933 props->wptr_gpu_addr = ring->wptr_gpu_addr;
934 props->wptr_mc_addr =
935 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
936 props->queue_size = ring->ring_size;
937 props->eop_gpu_addr = ring->eop_gpu_addr;
938 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
939 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
940 props->paging = false;
941 props->ring = ring;
942}
943
944#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
945do { \
946 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
947 return offsetof(struct amdgpu_mes_ctx_meta_data, \
948 _eng[ring->idx].slots[id_offs]); \
949 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
950 return offsetof(struct amdgpu_mes_ctx_meta_data, \
951 _eng[ring->idx].ring); \
952 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
953 return offsetof(struct amdgpu_mes_ctx_meta_data, \
954 _eng[ring->idx].ib); \
955 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
956 return offsetof(struct amdgpu_mes_ctx_meta_data, \
957 _eng[ring->idx].padding); \
958} while(0)
959
960int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
961{
962 switch (ring->funcs->type) {
963 case AMDGPU_RING_TYPE_GFX:
964 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
965 break;
966 case AMDGPU_RING_TYPE_COMPUTE:
967 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
968 break;
969 case AMDGPU_RING_TYPE_SDMA:
970 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
971 break;
972 default:
973 break;
974 }
975
976 WARN_ON(1);
977 return -EINVAL;
978}
979
980int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
981 int queue_type, int idx,
982 struct amdgpu_mes_ctx_data *ctx_data,
983 struct amdgpu_ring **out)
984{
985 struct amdgpu_ring *ring;
986 struct amdgpu_mes_gang *gang;
987 struct amdgpu_mes_queue_properties qprops = {0};
988 int r, queue_id, pasid;
989
990 /*
991 * Avoid taking any other locks under MES lock to avoid circular
992 * lock dependencies.
993 */
994 amdgpu_mes_lock(&adev->mes);
995 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
996 if (!gang) {
997 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
998 amdgpu_mes_unlock(&adev->mes);
999 return -EINVAL;
1000 }
1001 pasid = gang->process->pasid;
1002
1003 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1004 if (!ring) {
1005 amdgpu_mes_unlock(&adev->mes);
1006 return -ENOMEM;
1007 }
1008
1009 ring->ring_obj = NULL;
1010 ring->use_doorbell = true;
1011 ring->is_mes_queue = true;
1012 ring->mes_ctx = ctx_data;
1013 ring->idx = idx;
1014 ring->no_scheduler = true;
1015
1016 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1017 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1018 compute[ring->idx].mec_hpd);
1019 ring->eop_gpu_addr =
1020 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1021 }
1022
1023 switch (queue_type) {
1024 case AMDGPU_RING_TYPE_GFX:
1025 ring->funcs = adev->gfx.gfx_ring[0].funcs;
1026 break;
1027 case AMDGPU_RING_TYPE_COMPUTE:
1028 ring->funcs = adev->gfx.compute_ring[0].funcs;
1029 break;
1030 case AMDGPU_RING_TYPE_SDMA:
1031 ring->funcs = adev->sdma.instance[0].ring.funcs;
1032 break;
1033 default:
1034 BUG();
1035 }
1036
1037 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1038 AMDGPU_RING_PRIO_DEFAULT, NULL);
1039 if (r)
1040 goto clean_up_memory;
1041
1042 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1043
1044 dma_fence_wait(gang->process->vm->last_update, false);
1045 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1046 amdgpu_mes_unlock(&adev->mes);
1047
1048 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1049 if (r)
1050 goto clean_up_ring;
1051
1052 ring->hw_queue_id = queue_id;
1053 ring->doorbell_index = qprops.doorbell_off;
1054
1055 if (queue_type == AMDGPU_RING_TYPE_GFX)
1056 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1057 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1058 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1059 queue_id);
1060 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1061 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1062 queue_id);
1063 else
1064 BUG();
1065
1066 *out = ring;
1067 return 0;
1068
1069clean_up_ring:
1070 amdgpu_ring_fini(ring);
1071clean_up_memory:
1072 kfree(ring);
1073 amdgpu_mes_unlock(&adev->mes);
1074 return r;
1075}
1076
1077void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1078 struct amdgpu_ring *ring)
1079{
1080 if (!ring)
1081 return;
1082
1083 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1084 amdgpu_ring_fini(ring);
1085 kfree(ring);
1086}
1087
1088uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1089 enum amdgpu_mes_priority_level prio)
1090{
1091 return adev->mes.aggregated_doorbells[prio];
1092}
1093
1094int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1095 struct amdgpu_mes_ctx_data *ctx_data)
1096{
1097 int r;
1098
1099 r = amdgpu_bo_create_kernel(adev,
1100 sizeof(struct amdgpu_mes_ctx_meta_data),
1101 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1102 &ctx_data->meta_data_obj,
1103 &ctx_data->meta_data_mc_addr,
1104 &ctx_data->meta_data_ptr);
1105 if (!ctx_data->meta_data_obj)
1106 return -ENOMEM;
1107
1108 memset(ctx_data->meta_data_ptr, 0,
1109 sizeof(struct amdgpu_mes_ctx_meta_data));
1110
1111 return 0;
1112}
1113
1114void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1115{
1116 if (ctx_data->meta_data_obj)
1117 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1118 &ctx_data->meta_data_mc_addr,
1119 &ctx_data->meta_data_ptr);
1120}
1121
1122int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1123 struct amdgpu_vm *vm,
1124 struct amdgpu_mes_ctx_data *ctx_data)
1125{
1126 struct amdgpu_bo_va *bo_va;
1127 struct ww_acquire_ctx ticket;
1128 struct list_head list;
1129 struct amdgpu_bo_list_entry pd;
1130 struct ttm_validate_buffer csa_tv;
1131 struct amdgpu_sync sync;
1132 int r;
1133
1134 amdgpu_sync_create(&sync);
1135 INIT_LIST_HEAD(&list);
1136 INIT_LIST_HEAD(&csa_tv.head);
1137
1138 csa_tv.bo = &ctx_data->meta_data_obj->tbo;
1139 csa_tv.num_shared = 1;
1140
1141 list_add(&csa_tv.head, &list);
1142 amdgpu_vm_get_pd_bo(vm, &list, &pd);
1143
1144 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
1145 if (r) {
1146 DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
1147 return r;
1148 }
1149
1150 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1151 if (!bo_va) {
1152 ttm_eu_backoff_reservation(&ticket, &list);
1153 DRM_ERROR("failed to create bo_va for meta data BO\n");
1154 return -ENOMEM;
1155 }
1156
1157 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1158 sizeof(struct amdgpu_mes_ctx_meta_data),
1159 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1160 AMDGPU_PTE_EXECUTABLE);
1161
1162 if (r) {
1163 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1164 goto error;
1165 }
1166
1167 r = amdgpu_vm_bo_update(adev, bo_va, false);
1168 if (r) {
1169 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1170 goto error;
1171 }
1172 amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1173
1174 r = amdgpu_vm_update_pdes(adev, vm, false);
1175 if (r) {
1176 DRM_ERROR("failed to update pdes on meta data\n");
1177 goto error;
1178 }
1179 amdgpu_sync_fence(&sync, vm->last_update);
1180
1181 amdgpu_sync_wait(&sync, false);
1182 ttm_eu_backoff_reservation(&ticket, &list);
1183
1184 amdgpu_sync_free(&sync);
1185 ctx_data->meta_data_va = bo_va;
1186 return 0;
1187
1188error:
1189 amdgpu_vm_bo_del(adev, bo_va);
1190 ttm_eu_backoff_reservation(&ticket, &list);
1191 amdgpu_sync_free(&sync);
1192 return r;
1193}
1194
1195int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1196 struct amdgpu_mes_ctx_data *ctx_data)
1197{
1198 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1199 struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1200 struct amdgpu_vm *vm = bo_va->base.vm;
1201 struct amdgpu_bo_list_entry vm_pd;
1202 struct list_head list, duplicates;
1203 struct dma_fence *fence = NULL;
1204 struct ttm_validate_buffer tv;
1205 struct ww_acquire_ctx ticket;
1206 long r = 0;
1207
1208 INIT_LIST_HEAD(&list);
1209 INIT_LIST_HEAD(&duplicates);
1210
1211 tv.bo = &bo->tbo;
1212 tv.num_shared = 2;
1213 list_add(&tv.head, &list);
1214
1215 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
1216
1217 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
1218 if (r) {
1219 dev_err(adev->dev, "leaking bo va because "
1220 "we fail to reserve bo (%ld)\n", r);
1221 return r;
1222 }
1223
1224 amdgpu_vm_bo_del(adev, bo_va);
1225 if (!amdgpu_vm_ready(vm))
1226 goto out_unlock;
1227
1228 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
1229 if (r)
1230 goto out_unlock;
1231 if (fence) {
1232 amdgpu_bo_fence(bo, fence, true);
1233 fence = NULL;
1234 }
1235
1236 r = amdgpu_vm_clear_freed(adev, vm, &fence);
1237 if (r || !fence)
1238 goto out_unlock;
1239
1240 dma_fence_wait(fence, false);
1241 amdgpu_bo_fence(bo, fence, true);
1242 dma_fence_put(fence);
1243
1244out_unlock:
1245 if (unlikely(r < 0))
1246 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1247 ttm_eu_backoff_reservation(&ticket, &list);
1248
1249 return r;
1250}
1251
1252static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1253 int pasid, int *gang_id,
1254 int queue_type, int num_queue,
1255 struct amdgpu_ring **added_rings,
1256 struct amdgpu_mes_ctx_data *ctx_data)
1257{
1258 struct amdgpu_ring *ring;
1259 struct amdgpu_mes_gang_properties gprops = {0};
1260 int r, j;
1261
1262 /* create a gang for the process */
1263 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1264 gprops.gang_quantum = adev->mes.default_gang_quantum;
1265 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1266 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1267 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1268
1269 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1270 if (r) {
1271 DRM_ERROR("failed to add gang\n");
1272 return r;
1273 }
1274
1275 /* create queues for the gang */
1276 for (j = 0; j < num_queue; j++) {
1277 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1278 ctx_data, &ring);
1279 if (r) {
1280 DRM_ERROR("failed to add ring\n");
1281 break;
1282 }
1283
1284 DRM_INFO("ring %s was added\n", ring->name);
1285 added_rings[j] = ring;
1286 }
1287
1288 return 0;
1289}
1290
1291static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1292{
1293 struct amdgpu_ring *ring;
1294 int i, r;
1295
1296 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1297 ring = added_rings[i];
1298 if (!ring)
1299 continue;
1300
1301 r = amdgpu_ring_test_ring(ring);
1302 if (r) {
1303 DRM_DEV_ERROR(ring->adev->dev,
1304 "ring %s test failed (%d)\n",
1305 ring->name, r);
1306 return r;
1307 } else
1308 DRM_INFO("ring %s test pass\n", ring->name);
1309
1310 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1311 if (r) {
1312 DRM_DEV_ERROR(ring->adev->dev,
1313 "ring %s ib test failed (%d)\n",
1314 ring->name, r);
1315 return r;
1316 } else
1317 DRM_INFO("ring %s ib test pass\n", ring->name);
1318 }
1319
1320 return 0;
1321}
1322
1323int amdgpu_mes_self_test(struct amdgpu_device *adev)
1324{
1325 struct amdgpu_vm *vm = NULL;
1326 struct amdgpu_mes_ctx_data ctx_data = {0};
1327 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1328 int gang_ids[3] = {0};
1329 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX,
1330 AMDGPU_MES_CTX_MAX_GFX_RINGS},
1331 { AMDGPU_RING_TYPE_COMPUTE,
1332 AMDGPU_MES_CTX_MAX_COMPUTE_RINGS},
1333 { AMDGPU_RING_TYPE_SDMA,
1334 AMDGPU_MES_CTX_MAX_SDMA_RINGS } };
1335 int i, r, pasid, k = 0;
1336
1337 pasid = amdgpu_pasid_alloc(16);
1338 if (pasid < 0) {
1339 dev_warn(adev->dev, "No more PASIDs available!");
1340 pasid = 0;
1341 }
1342
1343 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1344 if (!vm) {
1345 r = -ENOMEM;
1346 goto error_pasid;
1347 }
1348
1349 r = amdgpu_vm_init(adev, vm);
1350 if (r) {
1351 DRM_ERROR("failed to initialize vm\n");
1352 goto error_pasid;
1353 }
1354
1355 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1356 if (r) {
1357 DRM_ERROR("failed to alloc ctx meta data\n");
1358 goto error_fini;
1359 }
1360
1361 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1362 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1363 if (r) {
1364 DRM_ERROR("failed to map ctx meta data\n");
1365 goto error_vm;
1366 }
1367
1368 r = amdgpu_mes_create_process(adev, pasid, vm);
1369 if (r) {
1370 DRM_ERROR("failed to create MES process\n");
1371 goto error_vm;
1372 }
1373
1374 for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1375 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1376 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&
1377 adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&
1378 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1379 continue;
1380
1381 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1382 &gang_ids[i],
1383 queue_types[i][0],
1384 queue_types[i][1],
1385 &added_rings[k],
1386 &ctx_data);
1387 if (r)
1388 goto error_queues;
1389
1390 k += queue_types[i][1];
1391 }
1392
1393 /* start ring test and ib test for MES queues */
1394 amdgpu_mes_test_queues(added_rings);
1395
1396error_queues:
1397 /* remove all queues */
1398 for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1399 if (!added_rings[i])
1400 continue;
1401 amdgpu_mes_remove_ring(adev, added_rings[i]);
1402 }
1403
1404 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1405 if (!gang_ids[i])
1406 continue;
1407 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1408 }
1409
1410 amdgpu_mes_destroy_process(adev, pasid);
1411
1412error_vm:
1413 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1414
1415error_fini:
1416 amdgpu_vm_fini(adev, vm);
1417
1418error_pasid:
1419 if (pasid)
1420 amdgpu_pasid_free(pasid);
1421
1422 amdgpu_mes_ctx_free_meta_data(&ctx_data);
1423 kfree(vm);
1424 return 0;
1425}
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drm_exec.h>
26
27#include "amdgpu_mes.h"
28#include "amdgpu.h"
29#include "soc15_common.h"
30#include "amdgpu_mes_ctx.h"
31
32#define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33#define AMDGPU_ONE_DOORBELL_SIZE 8
34
35int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36{
37 return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 PAGE_SIZE);
40}
41
42static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 int ip_type, uint64_t *doorbell_index)
44{
45 unsigned int offset, found;
46 struct amdgpu_mes *mes = &adev->mes;
47
48 if (ip_type == AMDGPU_RING_TYPE_SDMA)
49 offset = adev->doorbell_index.sdma_engine[0];
50 else
51 offset = 0;
52
53 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
54 if (found >= mes->num_mes_dbs) {
55 DRM_WARN("No doorbell available\n");
56 return -ENOSPC;
57 }
58
59 set_bit(found, mes->doorbell_bitmap);
60
61 /* Get the absolute doorbell index on BAR */
62 *doorbell_index = mes->db_start_dw_offset + found * 2;
63 return 0;
64}
65
66static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
67 uint32_t doorbell_index)
68{
69 unsigned int old, rel_index;
70 struct amdgpu_mes *mes = &adev->mes;
71
72 /* Find the relative index of the doorbell in this object */
73 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
74 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
75 WARN_ON(!old);
76}
77
78static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
79{
80 int i;
81 struct amdgpu_mes *mes = &adev->mes;
82
83 /* Bitmap for dynamic allocation of kernel doorbells */
84 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
85 if (!mes->doorbell_bitmap) {
86 DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
87 return -ENOMEM;
88 }
89
90 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
91 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
92 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
93 set_bit(i, mes->doorbell_bitmap);
94 }
95
96 return 0;
97}
98
99static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
100{
101 int r;
102
103 if (!amdgpu_mes_log_enable)
104 return 0;
105
106 r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
107 AMDGPU_GEM_DOMAIN_VRAM,
108 &adev->mes.event_log_gpu_obj,
109 &adev->mes.event_log_gpu_addr,
110 &adev->mes.event_log_cpu_addr);
111 if (r) {
112 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
113 return r;
114 }
115
116 memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
117
118 return 0;
119
120}
121
122static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
123{
124 bitmap_free(adev->mes.doorbell_bitmap);
125}
126
127int amdgpu_mes_init(struct amdgpu_device *adev)
128{
129 int i, r;
130
131 adev->mes.adev = adev;
132
133 idr_init(&adev->mes.pasid_idr);
134 idr_init(&adev->mes.gang_id_idr);
135 idr_init(&adev->mes.queue_id_idr);
136 ida_init(&adev->mes.doorbell_ida);
137 spin_lock_init(&adev->mes.queue_id_lock);
138 mutex_init(&adev->mes.mutex_hidden);
139
140 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
141 spin_lock_init(&adev->mes.ring_lock[i]);
142
143 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
144 adev->mes.vmid_mask_mmhub = 0xffffff00;
145 adev->mes.vmid_mask_gfxhub = 0xffffff00;
146
147 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
148 /* use only 1st MEC pipes */
149 if (i >= adev->gfx.mec.num_pipe_per_mec)
150 continue;
151 adev->mes.compute_hqd_mask[i] = 0xc;
152 }
153
154 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
155 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
156
157 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
158 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
159 IP_VERSION(6, 0, 0))
160 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
161 /* zero sdma_hqd_mask for non-existent engine */
162 else if (adev->sdma.num_instances == 1)
163 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
164 else
165 adev->mes.sdma_hqd_mask[i] = 0xfc;
166 }
167
168 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
169 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
170 if (r) {
171 dev_err(adev->dev,
172 "(%d) ring trail_fence_offs wb alloc failed\n",
173 r);
174 goto error;
175 }
176 adev->mes.sch_ctx_gpu_addr[i] =
177 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
178 adev->mes.sch_ctx_ptr[i] =
179 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
180
181 r = amdgpu_device_wb_get(adev,
182 &adev->mes.query_status_fence_offs[i]);
183 if (r) {
184 dev_err(adev->dev,
185 "(%d) query_status_fence_offs wb alloc failed\n",
186 r);
187 goto error;
188 }
189 adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
190 (adev->mes.query_status_fence_offs[i] * 4);
191 adev->mes.query_status_fence_ptr[i] =
192 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
193 }
194
195 r = amdgpu_mes_doorbell_init(adev);
196 if (r)
197 goto error;
198
199 r = amdgpu_mes_event_log_init(adev);
200 if (r)
201 goto error_doorbell;
202
203 return 0;
204
205error_doorbell:
206 amdgpu_mes_doorbell_free(adev);
207error:
208 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
209 if (adev->mes.sch_ctx_ptr[i])
210 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
211 if (adev->mes.query_status_fence_ptr[i])
212 amdgpu_device_wb_free(adev,
213 adev->mes.query_status_fence_offs[i]);
214 }
215
216 idr_destroy(&adev->mes.pasid_idr);
217 idr_destroy(&adev->mes.gang_id_idr);
218 idr_destroy(&adev->mes.queue_id_idr);
219 ida_destroy(&adev->mes.doorbell_ida);
220 mutex_destroy(&adev->mes.mutex_hidden);
221 return r;
222}
223
224void amdgpu_mes_fini(struct amdgpu_device *adev)
225{
226 int i;
227
228 amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
229 &adev->mes.event_log_gpu_addr,
230 &adev->mes.event_log_cpu_addr);
231
232 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
233 if (adev->mes.sch_ctx_ptr[i])
234 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
235 if (adev->mes.query_status_fence_ptr[i])
236 amdgpu_device_wb_free(adev,
237 adev->mes.query_status_fence_offs[i]);
238 }
239
240 amdgpu_mes_doorbell_free(adev);
241
242 idr_destroy(&adev->mes.pasid_idr);
243 idr_destroy(&adev->mes.gang_id_idr);
244 idr_destroy(&adev->mes.queue_id_idr);
245 ida_destroy(&adev->mes.doorbell_ida);
246 mutex_destroy(&adev->mes.mutex_hidden);
247}
248
249static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
250{
251 amdgpu_bo_free_kernel(&q->mqd_obj,
252 &q->mqd_gpu_addr,
253 &q->mqd_cpu_ptr);
254}
255
256int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
257 struct amdgpu_vm *vm)
258{
259 struct amdgpu_mes_process *process;
260 int r;
261
262 /* allocate the mes process buffer */
263 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
264 if (!process) {
265 DRM_ERROR("no more memory to create mes process\n");
266 return -ENOMEM;
267 }
268
269 /* allocate the process context bo and map it */
270 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
271 AMDGPU_GEM_DOMAIN_GTT,
272 &process->proc_ctx_bo,
273 &process->proc_ctx_gpu_addr,
274 &process->proc_ctx_cpu_ptr);
275 if (r) {
276 DRM_ERROR("failed to allocate process context bo\n");
277 goto clean_up_memory;
278 }
279 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
280
281 /*
282 * Avoid taking any other locks under MES lock to avoid circular
283 * lock dependencies.
284 */
285 amdgpu_mes_lock(&adev->mes);
286
287 /* add the mes process to idr list */
288 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
289 GFP_KERNEL);
290 if (r < 0) {
291 DRM_ERROR("failed to lock pasid=%d\n", pasid);
292 goto clean_up_ctx;
293 }
294
295 INIT_LIST_HEAD(&process->gang_list);
296 process->vm = vm;
297 process->pasid = pasid;
298 process->process_quantum = adev->mes.default_process_quantum;
299 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
300
301 amdgpu_mes_unlock(&adev->mes);
302 return 0;
303
304clean_up_ctx:
305 amdgpu_mes_unlock(&adev->mes);
306 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
307 &process->proc_ctx_gpu_addr,
308 &process->proc_ctx_cpu_ptr);
309clean_up_memory:
310 kfree(process);
311 return r;
312}
313
314void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
315{
316 struct amdgpu_mes_process *process;
317 struct amdgpu_mes_gang *gang, *tmp1;
318 struct amdgpu_mes_queue *queue, *tmp2;
319 struct mes_remove_queue_input queue_input;
320 unsigned long flags;
321 int r;
322
323 /*
324 * Avoid taking any other locks under MES lock to avoid circular
325 * lock dependencies.
326 */
327 amdgpu_mes_lock(&adev->mes);
328
329 process = idr_find(&adev->mes.pasid_idr, pasid);
330 if (!process) {
331 DRM_WARN("pasid %d doesn't exist\n", pasid);
332 amdgpu_mes_unlock(&adev->mes);
333 return;
334 }
335
336 /* Remove all queues from hardware */
337 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
338 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
339 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
340 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
341 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
342
343 queue_input.doorbell_offset = queue->doorbell_off;
344 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
345
346 r = adev->mes.funcs->remove_hw_queue(&adev->mes,
347 &queue_input);
348 if (r)
349 DRM_WARN("failed to remove hardware queue\n");
350 }
351
352 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
353 }
354
355 idr_remove(&adev->mes.pasid_idr, pasid);
356 amdgpu_mes_unlock(&adev->mes);
357
358 /* free all memory allocated by the process */
359 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
360 /* free all queues in the gang */
361 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
362 amdgpu_mes_queue_free_mqd(queue);
363 list_del(&queue->list);
364 kfree(queue);
365 }
366 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
367 &gang->gang_ctx_gpu_addr,
368 &gang->gang_ctx_cpu_ptr);
369 list_del(&gang->list);
370 kfree(gang);
371
372 }
373 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
374 &process->proc_ctx_gpu_addr,
375 &process->proc_ctx_cpu_ptr);
376 kfree(process);
377}
378
379int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
380 struct amdgpu_mes_gang_properties *gprops,
381 int *gang_id)
382{
383 struct amdgpu_mes_process *process;
384 struct amdgpu_mes_gang *gang;
385 int r;
386
387 /* allocate the mes gang buffer */
388 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
389 if (!gang) {
390 return -ENOMEM;
391 }
392
393 /* allocate the gang context bo and map it to cpu space */
394 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
395 AMDGPU_GEM_DOMAIN_GTT,
396 &gang->gang_ctx_bo,
397 &gang->gang_ctx_gpu_addr,
398 &gang->gang_ctx_cpu_ptr);
399 if (r) {
400 DRM_ERROR("failed to allocate process context bo\n");
401 goto clean_up_mem;
402 }
403 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
404
405 /*
406 * Avoid taking any other locks under MES lock to avoid circular
407 * lock dependencies.
408 */
409 amdgpu_mes_lock(&adev->mes);
410
411 process = idr_find(&adev->mes.pasid_idr, pasid);
412 if (!process) {
413 DRM_ERROR("pasid %d doesn't exist\n", pasid);
414 r = -EINVAL;
415 goto clean_up_ctx;
416 }
417
418 /* add the mes gang to idr list */
419 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
420 GFP_KERNEL);
421 if (r < 0) {
422 DRM_ERROR("failed to allocate idr for gang\n");
423 goto clean_up_ctx;
424 }
425
426 gang->gang_id = r;
427 *gang_id = r;
428
429 INIT_LIST_HEAD(&gang->queue_list);
430 gang->process = process;
431 gang->priority = gprops->priority;
432 gang->gang_quantum = gprops->gang_quantum ?
433 gprops->gang_quantum : adev->mes.default_gang_quantum;
434 gang->global_priority_level = gprops->global_priority_level;
435 gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
436 list_add_tail(&gang->list, &process->gang_list);
437
438 amdgpu_mes_unlock(&adev->mes);
439 return 0;
440
441clean_up_ctx:
442 amdgpu_mes_unlock(&adev->mes);
443 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
444 &gang->gang_ctx_gpu_addr,
445 &gang->gang_ctx_cpu_ptr);
446clean_up_mem:
447 kfree(gang);
448 return r;
449}
450
451int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
452{
453 struct amdgpu_mes_gang *gang;
454
455 /*
456 * Avoid taking any other locks under MES lock to avoid circular
457 * lock dependencies.
458 */
459 amdgpu_mes_lock(&adev->mes);
460
461 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
462 if (!gang) {
463 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
464 amdgpu_mes_unlock(&adev->mes);
465 return -EINVAL;
466 }
467
468 if (!list_empty(&gang->queue_list)) {
469 DRM_ERROR("queue list is not empty\n");
470 amdgpu_mes_unlock(&adev->mes);
471 return -EBUSY;
472 }
473
474 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
475 list_del(&gang->list);
476 amdgpu_mes_unlock(&adev->mes);
477
478 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
479 &gang->gang_ctx_gpu_addr,
480 &gang->gang_ctx_cpu_ptr);
481
482 kfree(gang);
483
484 return 0;
485}
486
487int amdgpu_mes_suspend(struct amdgpu_device *adev)
488{
489 struct mes_suspend_gang_input input;
490 int r;
491
492 if (!amdgpu_mes_suspend_resume_all_supported(adev))
493 return 0;
494
495 memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
496 input.suspend_all_gangs = 1;
497
498 /*
499 * Avoid taking any other locks under MES lock to avoid circular
500 * lock dependencies.
501 */
502 amdgpu_mes_lock(&adev->mes);
503 r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
504 amdgpu_mes_unlock(&adev->mes);
505 if (r)
506 DRM_ERROR("failed to suspend all gangs");
507
508 return r;
509}
510
511int amdgpu_mes_resume(struct amdgpu_device *adev)
512{
513 struct mes_resume_gang_input input;
514 int r;
515
516 if (!amdgpu_mes_suspend_resume_all_supported(adev))
517 return 0;
518
519 memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
520 input.resume_all_gangs = 1;
521
522 /*
523 * Avoid taking any other locks under MES lock to avoid circular
524 * lock dependencies.
525 */
526 amdgpu_mes_lock(&adev->mes);
527 r = adev->mes.funcs->resume_gang(&adev->mes, &input);
528 amdgpu_mes_unlock(&adev->mes);
529 if (r)
530 DRM_ERROR("failed to resume all gangs");
531
532 return r;
533}
534
535static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
536 struct amdgpu_mes_queue *q,
537 struct amdgpu_mes_queue_properties *p)
538{
539 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
540 u32 mqd_size = mqd_mgr->mqd_size;
541 int r;
542
543 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
544 AMDGPU_GEM_DOMAIN_GTT,
545 &q->mqd_obj,
546 &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
547 if (r) {
548 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
549 return r;
550 }
551 memset(q->mqd_cpu_ptr, 0, mqd_size);
552
553 r = amdgpu_bo_reserve(q->mqd_obj, false);
554 if (unlikely(r != 0))
555 goto clean_up;
556
557 return 0;
558
559clean_up:
560 amdgpu_bo_free_kernel(&q->mqd_obj,
561 &q->mqd_gpu_addr,
562 &q->mqd_cpu_ptr);
563 return r;
564}
565
566static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
567 struct amdgpu_mes_queue *q,
568 struct amdgpu_mes_queue_properties *p)
569{
570 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
571 struct amdgpu_mqd_prop mqd_prop = {0};
572
573 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
574 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
575 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
576 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
577 mqd_prop.queue_size = p->queue_size;
578 mqd_prop.use_doorbell = true;
579 mqd_prop.doorbell_index = p->doorbell_off;
580 mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
581 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
582 mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
583 mqd_prop.hqd_active = false;
584
585 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
586 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
587 mutex_lock(&adev->srbm_mutex);
588 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
589 }
590
591 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
592
593 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
594 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
595 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
596 mutex_unlock(&adev->srbm_mutex);
597 }
598
599 amdgpu_bo_unreserve(q->mqd_obj);
600}
601
602int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
603 struct amdgpu_mes_queue_properties *qprops,
604 int *queue_id)
605{
606 struct amdgpu_mes_queue *queue;
607 struct amdgpu_mes_gang *gang;
608 struct mes_add_queue_input queue_input;
609 unsigned long flags;
610 int r;
611
612 memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
613
614 /* allocate the mes queue buffer */
615 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
616 if (!queue) {
617 DRM_ERROR("Failed to allocate memory for queue\n");
618 return -ENOMEM;
619 }
620
621 /* Allocate the queue mqd */
622 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
623 if (r)
624 goto clean_up_memory;
625
626 /*
627 * Avoid taking any other locks under MES lock to avoid circular
628 * lock dependencies.
629 */
630 amdgpu_mes_lock(&adev->mes);
631
632 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
633 if (!gang) {
634 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
635 r = -EINVAL;
636 goto clean_up_mqd;
637 }
638
639 /* add the mes gang to idr list */
640 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
641 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
642 GFP_ATOMIC);
643 if (r < 0) {
644 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
645 goto clean_up_mqd;
646 }
647 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
648 *queue_id = queue->queue_id = r;
649
650 /* allocate a doorbell index for the queue */
651 r = amdgpu_mes_kernel_doorbell_get(adev,
652 qprops->queue_type,
653 &qprops->doorbell_off);
654 if (r)
655 goto clean_up_queue_id;
656
657 /* initialize the queue mqd */
658 amdgpu_mes_queue_init_mqd(adev, queue, qprops);
659
660 /* add hw queue to mes */
661 queue_input.process_id = gang->process->pasid;
662
663 queue_input.page_table_base_addr =
664 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
665 adev->gmc.vram_start;
666
667 queue_input.process_va_start = 0;
668 queue_input.process_va_end =
669 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
670 queue_input.process_quantum = gang->process->process_quantum;
671 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
672 queue_input.gang_quantum = gang->gang_quantum;
673 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
674 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
675 queue_input.gang_global_priority_level = gang->global_priority_level;
676 queue_input.doorbell_offset = qprops->doorbell_off;
677 queue_input.mqd_addr = queue->mqd_gpu_addr;
678 queue_input.wptr_addr = qprops->wptr_gpu_addr;
679 queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
680 queue_input.queue_type = qprops->queue_type;
681 queue_input.paging = qprops->paging;
682 queue_input.is_kfd_process = 0;
683
684 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
685 if (r) {
686 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
687 qprops->doorbell_off);
688 goto clean_up_doorbell;
689 }
690
691 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
692 "queue type=%d, doorbell=0x%llx\n",
693 gang->process->pasid, gang_id, qprops->queue_type,
694 qprops->doorbell_off);
695
696 queue->ring = qprops->ring;
697 queue->doorbell_off = qprops->doorbell_off;
698 queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
699 queue->queue_type = qprops->queue_type;
700 queue->paging = qprops->paging;
701 queue->gang = gang;
702 queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
703 list_add_tail(&queue->list, &gang->queue_list);
704
705 amdgpu_mes_unlock(&adev->mes);
706 return 0;
707
708clean_up_doorbell:
709 amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
710clean_up_queue_id:
711 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
712 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
713 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
714clean_up_mqd:
715 amdgpu_mes_unlock(&adev->mes);
716 amdgpu_mes_queue_free_mqd(queue);
717clean_up_memory:
718 kfree(queue);
719 return r;
720}
721
722int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
723{
724 unsigned long flags;
725 struct amdgpu_mes_queue *queue;
726 struct amdgpu_mes_gang *gang;
727 struct mes_remove_queue_input queue_input;
728 int r;
729
730 /*
731 * Avoid taking any other locks under MES lock to avoid circular
732 * lock dependencies.
733 */
734 amdgpu_mes_lock(&adev->mes);
735
736 /* remove the mes gang from idr list */
737 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
738
739 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
740 if (!queue) {
741 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
742 amdgpu_mes_unlock(&adev->mes);
743 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
744 return -EINVAL;
745 }
746
747 idr_remove(&adev->mes.queue_id_idr, queue_id);
748 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
749
750 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
751 queue->doorbell_off);
752
753 gang = queue->gang;
754 queue_input.doorbell_offset = queue->doorbell_off;
755 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
756
757 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
758 if (r)
759 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
760 queue_id);
761
762 list_del(&queue->list);
763 amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
764 amdgpu_mes_unlock(&adev->mes);
765
766 amdgpu_mes_queue_free_mqd(queue);
767 kfree(queue);
768 return 0;
769}
770
771int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
772{
773 unsigned long flags;
774 struct amdgpu_mes_queue *queue;
775 struct amdgpu_mes_gang *gang;
776 struct mes_reset_queue_input queue_input;
777 int r;
778
779 /*
780 * Avoid taking any other locks under MES lock to avoid circular
781 * lock dependencies.
782 */
783 amdgpu_mes_lock(&adev->mes);
784
785 /* remove the mes gang from idr list */
786 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
787
788 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
789 if (!queue) {
790 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
791 amdgpu_mes_unlock(&adev->mes);
792 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
793 return -EINVAL;
794 }
795 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
796
797 DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
798 queue->doorbell_off);
799
800 gang = queue->gang;
801 queue_input.doorbell_offset = queue->doorbell_off;
802 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
803
804 r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
805 if (r)
806 DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
807 queue_id);
808
809 amdgpu_mes_unlock(&adev->mes);
810
811 return 0;
812}
813
814int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
815 int me_id, int pipe_id, int queue_id, int vmid)
816{
817 struct mes_reset_queue_input queue_input;
818 int r;
819
820 queue_input.queue_type = queue_type;
821 queue_input.use_mmio = true;
822 queue_input.me_id = me_id;
823 queue_input.pipe_id = pipe_id;
824 queue_input.queue_id = queue_id;
825 queue_input.vmid = vmid;
826 r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
827 if (r)
828 DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
829 queue_id);
830 return r;
831}
832
833int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
834 struct amdgpu_ring *ring)
835{
836 struct mes_map_legacy_queue_input queue_input;
837 int r;
838
839 memset(&queue_input, 0, sizeof(queue_input));
840
841 queue_input.queue_type = ring->funcs->type;
842 queue_input.doorbell_offset = ring->doorbell_index;
843 queue_input.pipe_id = ring->pipe;
844 queue_input.queue_id = ring->queue;
845 queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
846 queue_input.wptr_addr = ring->wptr_gpu_addr;
847
848 r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
849 if (r)
850 DRM_ERROR("failed to map legacy queue\n");
851
852 return r;
853}
854
855int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
856 struct amdgpu_ring *ring,
857 enum amdgpu_unmap_queues_action action,
858 u64 gpu_addr, u64 seq)
859{
860 struct mes_unmap_legacy_queue_input queue_input;
861 int r;
862
863 queue_input.action = action;
864 queue_input.queue_type = ring->funcs->type;
865 queue_input.doorbell_offset = ring->doorbell_index;
866 queue_input.pipe_id = ring->pipe;
867 queue_input.queue_id = ring->queue;
868 queue_input.trail_fence_addr = gpu_addr;
869 queue_input.trail_fence_data = seq;
870
871 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
872 if (r)
873 DRM_ERROR("failed to unmap legacy queue\n");
874
875 return r;
876}
877
878int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
879 struct amdgpu_ring *ring,
880 unsigned int vmid,
881 bool use_mmio)
882{
883 struct mes_reset_legacy_queue_input queue_input;
884 int r;
885
886 memset(&queue_input, 0, sizeof(queue_input));
887
888 queue_input.queue_type = ring->funcs->type;
889 queue_input.doorbell_offset = ring->doorbell_index;
890 queue_input.me_id = ring->me;
891 queue_input.pipe_id = ring->pipe;
892 queue_input.queue_id = ring->queue;
893 queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0;
894 queue_input.wptr_addr = ring->wptr_gpu_addr;
895 queue_input.vmid = vmid;
896 queue_input.use_mmio = use_mmio;
897
898 r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
899 if (r)
900 DRM_ERROR("failed to reset legacy queue\n");
901
902 return r;
903}
904
905uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
906{
907 struct mes_misc_op_input op_input;
908 int r, val = 0;
909 uint32_t addr_offset = 0;
910 uint64_t read_val_gpu_addr;
911 uint32_t *read_val_ptr;
912
913 if (amdgpu_device_wb_get(adev, &addr_offset)) {
914 DRM_ERROR("critical bug! too many mes readers\n");
915 goto error;
916 }
917 read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
918 read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
919 op_input.op = MES_MISC_OP_READ_REG;
920 op_input.read_reg.reg_offset = reg;
921 op_input.read_reg.buffer_addr = read_val_gpu_addr;
922
923 if (!adev->mes.funcs->misc_op) {
924 DRM_ERROR("mes rreg is not supported!\n");
925 goto error;
926 }
927
928 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
929 if (r)
930 DRM_ERROR("failed to read reg (0x%x)\n", reg);
931 else
932 val = *(read_val_ptr);
933
934error:
935 if (addr_offset)
936 amdgpu_device_wb_free(adev, addr_offset);
937 return val;
938}
939
940int amdgpu_mes_wreg(struct amdgpu_device *adev,
941 uint32_t reg, uint32_t val)
942{
943 struct mes_misc_op_input op_input;
944 int r;
945
946 op_input.op = MES_MISC_OP_WRITE_REG;
947 op_input.write_reg.reg_offset = reg;
948 op_input.write_reg.reg_value = val;
949
950 if (!adev->mes.funcs->misc_op) {
951 DRM_ERROR("mes wreg is not supported!\n");
952 r = -EINVAL;
953 goto error;
954 }
955
956 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
957 if (r)
958 DRM_ERROR("failed to write reg (0x%x)\n", reg);
959
960error:
961 return r;
962}
963
964int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
965 uint32_t reg0, uint32_t reg1,
966 uint32_t ref, uint32_t mask)
967{
968 struct mes_misc_op_input op_input;
969 int r;
970
971 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
972 op_input.wrm_reg.reg0 = reg0;
973 op_input.wrm_reg.reg1 = reg1;
974 op_input.wrm_reg.ref = ref;
975 op_input.wrm_reg.mask = mask;
976
977 if (!adev->mes.funcs->misc_op) {
978 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
979 r = -EINVAL;
980 goto error;
981 }
982
983 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
984 if (r)
985 DRM_ERROR("failed to reg_write_reg_wait\n");
986
987error:
988 return r;
989}
990
991int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
992 uint32_t val, uint32_t mask)
993{
994 struct mes_misc_op_input op_input;
995 int r;
996
997 op_input.op = MES_MISC_OP_WRM_REG_WAIT;
998 op_input.wrm_reg.reg0 = reg;
999 op_input.wrm_reg.ref = val;
1000 op_input.wrm_reg.mask = mask;
1001
1002 if (!adev->mes.funcs->misc_op) {
1003 DRM_ERROR("mes reg wait is not supported!\n");
1004 r = -EINVAL;
1005 goto error;
1006 }
1007
1008 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1009 if (r)
1010 DRM_ERROR("failed to reg_write_reg_wait\n");
1011
1012error:
1013 return r;
1014}
1015
1016int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
1017 uint64_t process_context_addr,
1018 uint32_t spi_gdbg_per_vmid_cntl,
1019 const uint32_t *tcp_watch_cntl,
1020 uint32_t flags,
1021 bool trap_en)
1022{
1023 struct mes_misc_op_input op_input = {0};
1024 int r;
1025
1026 if (!adev->mes.funcs->misc_op) {
1027 DRM_ERROR("mes set shader debugger is not supported!\n");
1028 return -EINVAL;
1029 }
1030
1031 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1032 op_input.set_shader_debugger.process_context_addr = process_context_addr;
1033 op_input.set_shader_debugger.flags.u32all = flags;
1034
1035 /* use amdgpu mes_flush_shader_debugger instead */
1036 if (op_input.set_shader_debugger.flags.process_ctx_flush)
1037 return -EINVAL;
1038
1039 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
1040 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
1041 sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
1042
1043 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
1044 AMDGPU_MES_API_VERSION_SHIFT) >= 14)
1045 op_input.set_shader_debugger.trap_en = trap_en;
1046
1047 amdgpu_mes_lock(&adev->mes);
1048
1049 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1050 if (r)
1051 DRM_ERROR("failed to set_shader_debugger\n");
1052
1053 amdgpu_mes_unlock(&adev->mes);
1054
1055 return r;
1056}
1057
1058int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
1059 uint64_t process_context_addr)
1060{
1061 struct mes_misc_op_input op_input = {0};
1062 int r;
1063
1064 if (!adev->mes.funcs->misc_op) {
1065 DRM_ERROR("mes flush shader debugger is not supported!\n");
1066 return -EINVAL;
1067 }
1068
1069 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1070 op_input.set_shader_debugger.process_context_addr = process_context_addr;
1071 op_input.set_shader_debugger.flags.process_ctx_flush = true;
1072
1073 amdgpu_mes_lock(&adev->mes);
1074
1075 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1076 if (r)
1077 DRM_ERROR("failed to set_shader_debugger\n");
1078
1079 amdgpu_mes_unlock(&adev->mes);
1080
1081 return r;
1082}
1083
1084static void
1085amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
1086 struct amdgpu_ring *ring,
1087 struct amdgpu_mes_queue_properties *props)
1088{
1089 props->queue_type = ring->funcs->type;
1090 props->hqd_base_gpu_addr = ring->gpu_addr;
1091 props->rptr_gpu_addr = ring->rptr_gpu_addr;
1092 props->wptr_gpu_addr = ring->wptr_gpu_addr;
1093 props->wptr_mc_addr =
1094 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
1095 props->queue_size = ring->ring_size;
1096 props->eop_gpu_addr = ring->eop_gpu_addr;
1097 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
1098 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
1099 props->paging = false;
1100 props->ring = ring;
1101}
1102
1103#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
1104do { \
1105 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
1106 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1107 _eng[ring->idx].slots[id_offs]); \
1108 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
1109 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1110 _eng[ring->idx].ring); \
1111 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
1112 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1113 _eng[ring->idx].ib); \
1114 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
1115 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1116 _eng[ring->idx].padding); \
1117} while(0)
1118
1119int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1120{
1121 switch (ring->funcs->type) {
1122 case AMDGPU_RING_TYPE_GFX:
1123 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1124 break;
1125 case AMDGPU_RING_TYPE_COMPUTE:
1126 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1127 break;
1128 case AMDGPU_RING_TYPE_SDMA:
1129 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1130 break;
1131 default:
1132 break;
1133 }
1134
1135 WARN_ON(1);
1136 return -EINVAL;
1137}
1138
1139int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1140 int queue_type, int idx,
1141 struct amdgpu_mes_ctx_data *ctx_data,
1142 struct amdgpu_ring **out)
1143{
1144 struct amdgpu_ring *ring;
1145 struct amdgpu_mes_gang *gang;
1146 struct amdgpu_mes_queue_properties qprops = {0};
1147 int r, queue_id, pasid;
1148
1149 /*
1150 * Avoid taking any other locks under MES lock to avoid circular
1151 * lock dependencies.
1152 */
1153 amdgpu_mes_lock(&adev->mes);
1154 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1155 if (!gang) {
1156 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1157 amdgpu_mes_unlock(&adev->mes);
1158 return -EINVAL;
1159 }
1160 pasid = gang->process->pasid;
1161
1162 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1163 if (!ring) {
1164 amdgpu_mes_unlock(&adev->mes);
1165 return -ENOMEM;
1166 }
1167
1168 ring->ring_obj = NULL;
1169 ring->use_doorbell = true;
1170 ring->is_mes_queue = true;
1171 ring->mes_ctx = ctx_data;
1172 ring->idx = idx;
1173 ring->no_scheduler = true;
1174
1175 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1176 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1177 compute[ring->idx].mec_hpd);
1178 ring->eop_gpu_addr =
1179 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1180 }
1181
1182 switch (queue_type) {
1183 case AMDGPU_RING_TYPE_GFX:
1184 ring->funcs = adev->gfx.gfx_ring[0].funcs;
1185 ring->me = adev->gfx.gfx_ring[0].me;
1186 ring->pipe = adev->gfx.gfx_ring[0].pipe;
1187 break;
1188 case AMDGPU_RING_TYPE_COMPUTE:
1189 ring->funcs = adev->gfx.compute_ring[0].funcs;
1190 ring->me = adev->gfx.compute_ring[0].me;
1191 ring->pipe = adev->gfx.compute_ring[0].pipe;
1192 break;
1193 case AMDGPU_RING_TYPE_SDMA:
1194 ring->funcs = adev->sdma.instance[0].ring.funcs;
1195 break;
1196 default:
1197 BUG();
1198 }
1199
1200 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1201 AMDGPU_RING_PRIO_DEFAULT, NULL);
1202 if (r) {
1203 amdgpu_mes_unlock(&adev->mes);
1204 goto clean_up_memory;
1205 }
1206
1207 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1208
1209 dma_fence_wait(gang->process->vm->last_update, false);
1210 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1211 amdgpu_mes_unlock(&adev->mes);
1212
1213 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1214 if (r)
1215 goto clean_up_ring;
1216
1217 ring->hw_queue_id = queue_id;
1218 ring->doorbell_index = qprops.doorbell_off;
1219
1220 if (queue_type == AMDGPU_RING_TYPE_GFX)
1221 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1222 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1223 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1224 queue_id);
1225 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1226 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1227 queue_id);
1228 else
1229 BUG();
1230
1231 *out = ring;
1232 return 0;
1233
1234clean_up_ring:
1235 amdgpu_ring_fini(ring);
1236clean_up_memory:
1237 kfree(ring);
1238 return r;
1239}
1240
1241void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1242 struct amdgpu_ring *ring)
1243{
1244 if (!ring)
1245 return;
1246
1247 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1248 del_timer_sync(&ring->fence_drv.fallback_timer);
1249 amdgpu_ring_fini(ring);
1250 kfree(ring);
1251}
1252
1253uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1254 enum amdgpu_mes_priority_level prio)
1255{
1256 return adev->mes.aggregated_doorbells[prio];
1257}
1258
1259int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1260 struct amdgpu_mes_ctx_data *ctx_data)
1261{
1262 int r;
1263
1264 r = amdgpu_bo_create_kernel(adev,
1265 sizeof(struct amdgpu_mes_ctx_meta_data),
1266 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1267 &ctx_data->meta_data_obj,
1268 &ctx_data->meta_data_mc_addr,
1269 &ctx_data->meta_data_ptr);
1270 if (r) {
1271 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1272 return r;
1273 }
1274
1275 if (!ctx_data->meta_data_obj)
1276 return -ENOMEM;
1277
1278 memset(ctx_data->meta_data_ptr, 0,
1279 sizeof(struct amdgpu_mes_ctx_meta_data));
1280
1281 return 0;
1282}
1283
1284void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1285{
1286 if (ctx_data->meta_data_obj)
1287 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1288 &ctx_data->meta_data_mc_addr,
1289 &ctx_data->meta_data_ptr);
1290}
1291
1292int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1293 struct amdgpu_vm *vm,
1294 struct amdgpu_mes_ctx_data *ctx_data)
1295{
1296 struct amdgpu_bo_va *bo_va;
1297 struct amdgpu_sync sync;
1298 struct drm_exec exec;
1299 int r;
1300
1301 amdgpu_sync_create(&sync);
1302
1303 drm_exec_init(&exec, 0, 0);
1304 drm_exec_until_all_locked(&exec) {
1305 r = drm_exec_lock_obj(&exec,
1306 &ctx_data->meta_data_obj->tbo.base);
1307 drm_exec_retry_on_contention(&exec);
1308 if (unlikely(r))
1309 goto error_fini_exec;
1310
1311 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1312 drm_exec_retry_on_contention(&exec);
1313 if (unlikely(r))
1314 goto error_fini_exec;
1315 }
1316
1317 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1318 if (!bo_va) {
1319 DRM_ERROR("failed to create bo_va for meta data BO\n");
1320 r = -ENOMEM;
1321 goto error_fini_exec;
1322 }
1323
1324 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1325 sizeof(struct amdgpu_mes_ctx_meta_data),
1326 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1327 AMDGPU_PTE_EXECUTABLE);
1328
1329 if (r) {
1330 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1331 goto error_del_bo_va;
1332 }
1333
1334 r = amdgpu_vm_bo_update(adev, bo_va, false);
1335 if (r) {
1336 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1337 goto error_del_bo_va;
1338 }
1339 amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1340
1341 r = amdgpu_vm_update_pdes(adev, vm, false);
1342 if (r) {
1343 DRM_ERROR("failed to update pdes on meta data\n");
1344 goto error_del_bo_va;
1345 }
1346 amdgpu_sync_fence(&sync, vm->last_update);
1347
1348 amdgpu_sync_wait(&sync, false);
1349 drm_exec_fini(&exec);
1350
1351 amdgpu_sync_free(&sync);
1352 ctx_data->meta_data_va = bo_va;
1353 return 0;
1354
1355error_del_bo_va:
1356 amdgpu_vm_bo_del(adev, bo_va);
1357
1358error_fini_exec:
1359 drm_exec_fini(&exec);
1360 amdgpu_sync_free(&sync);
1361 return r;
1362}
1363
1364int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1365 struct amdgpu_mes_ctx_data *ctx_data)
1366{
1367 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1368 struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1369 struct amdgpu_vm *vm = bo_va->base.vm;
1370 struct dma_fence *fence;
1371 struct drm_exec exec;
1372 long r;
1373
1374 drm_exec_init(&exec, 0, 0);
1375 drm_exec_until_all_locked(&exec) {
1376 r = drm_exec_lock_obj(&exec,
1377 &ctx_data->meta_data_obj->tbo.base);
1378 drm_exec_retry_on_contention(&exec);
1379 if (unlikely(r))
1380 goto out_unlock;
1381
1382 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1383 drm_exec_retry_on_contention(&exec);
1384 if (unlikely(r))
1385 goto out_unlock;
1386 }
1387
1388 amdgpu_vm_bo_del(adev, bo_va);
1389 if (!amdgpu_vm_ready(vm))
1390 goto out_unlock;
1391
1392 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1393 &fence);
1394 if (r)
1395 goto out_unlock;
1396 if (fence) {
1397 amdgpu_bo_fence(bo, fence, true);
1398 fence = NULL;
1399 }
1400
1401 r = amdgpu_vm_clear_freed(adev, vm, &fence);
1402 if (r || !fence)
1403 goto out_unlock;
1404
1405 dma_fence_wait(fence, false);
1406 amdgpu_bo_fence(bo, fence, true);
1407 dma_fence_put(fence);
1408
1409out_unlock:
1410 if (unlikely(r < 0))
1411 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1412 drm_exec_fini(&exec);
1413
1414 return r;
1415}
1416
1417static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1418 int pasid, int *gang_id,
1419 int queue_type, int num_queue,
1420 struct amdgpu_ring **added_rings,
1421 struct amdgpu_mes_ctx_data *ctx_data)
1422{
1423 struct amdgpu_ring *ring;
1424 struct amdgpu_mes_gang_properties gprops = {0};
1425 int r, j;
1426
1427 /* create a gang for the process */
1428 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1429 gprops.gang_quantum = adev->mes.default_gang_quantum;
1430 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1431 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1432 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1433
1434 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1435 if (r) {
1436 DRM_ERROR("failed to add gang\n");
1437 return r;
1438 }
1439
1440 /* create queues for the gang */
1441 for (j = 0; j < num_queue; j++) {
1442 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1443 ctx_data, &ring);
1444 if (r) {
1445 DRM_ERROR("failed to add ring\n");
1446 break;
1447 }
1448
1449 DRM_INFO("ring %s was added\n", ring->name);
1450 added_rings[j] = ring;
1451 }
1452
1453 return 0;
1454}
1455
1456static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1457{
1458 struct amdgpu_ring *ring;
1459 int i, r;
1460
1461 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1462 ring = added_rings[i];
1463 if (!ring)
1464 continue;
1465
1466 r = amdgpu_ring_test_helper(ring);
1467 if (r)
1468 return r;
1469
1470 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1471 if (r) {
1472 DRM_DEV_ERROR(ring->adev->dev,
1473 "ring %s ib test failed (%d)\n",
1474 ring->name, r);
1475 return r;
1476 } else
1477 DRM_INFO("ring %s ib test pass\n", ring->name);
1478 }
1479
1480 return 0;
1481}
1482
1483int amdgpu_mes_self_test(struct amdgpu_device *adev)
1484{
1485 struct amdgpu_vm *vm = NULL;
1486 struct amdgpu_mes_ctx_data ctx_data = {0};
1487 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1488 int gang_ids[3] = {0};
1489 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1490 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1491 { AMDGPU_RING_TYPE_SDMA, 1} };
1492 int i, r, pasid, k = 0;
1493
1494 pasid = amdgpu_pasid_alloc(16);
1495 if (pasid < 0) {
1496 dev_warn(adev->dev, "No more PASIDs available!");
1497 pasid = 0;
1498 }
1499
1500 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1501 if (!vm) {
1502 r = -ENOMEM;
1503 goto error_pasid;
1504 }
1505
1506 r = amdgpu_vm_init(adev, vm, -1);
1507 if (r) {
1508 DRM_ERROR("failed to initialize vm\n");
1509 goto error_pasid;
1510 }
1511
1512 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1513 if (r) {
1514 DRM_ERROR("failed to alloc ctx meta data\n");
1515 goto error_fini;
1516 }
1517
1518 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1519 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1520 if (r) {
1521 DRM_ERROR("failed to map ctx meta data\n");
1522 goto error_vm;
1523 }
1524
1525 r = amdgpu_mes_create_process(adev, pasid, vm);
1526 if (r) {
1527 DRM_ERROR("failed to create MES process\n");
1528 goto error_vm;
1529 }
1530
1531 for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1532 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1533 if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1534 IP_VERSION(10, 3, 0) &&
1535 amdgpu_ip_version(adev, GC_HWIP, 0) <
1536 IP_VERSION(11, 0, 0) &&
1537 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1538 continue;
1539
1540 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1541 &gang_ids[i],
1542 queue_types[i][0],
1543 queue_types[i][1],
1544 &added_rings[k],
1545 &ctx_data);
1546 if (r)
1547 goto error_queues;
1548
1549 k += queue_types[i][1];
1550 }
1551
1552 /* start ring test and ib test for MES queues */
1553 amdgpu_mes_test_queues(added_rings);
1554
1555error_queues:
1556 /* remove all queues */
1557 for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1558 if (!added_rings[i])
1559 continue;
1560 amdgpu_mes_remove_ring(adev, added_rings[i]);
1561 }
1562
1563 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1564 if (!gang_ids[i])
1565 continue;
1566 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1567 }
1568
1569 amdgpu_mes_destroy_process(adev, pasid);
1570
1571error_vm:
1572 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1573
1574error_fini:
1575 amdgpu_vm_fini(adev, vm);
1576
1577error_pasid:
1578 if (pasid)
1579 amdgpu_pasid_free(pasid);
1580
1581 amdgpu_mes_ctx_free_meta_data(&ctx_data);
1582 kfree(vm);
1583 return 0;
1584}
1585
1586int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1587{
1588 const struct mes_firmware_header_v1_0 *mes_hdr;
1589 struct amdgpu_firmware_info *info;
1590 char ucode_prefix[30];
1591 char fw_name[50];
1592 bool need_retry = false;
1593 u32 *ucode_ptr;
1594 int r;
1595
1596 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1597 sizeof(ucode_prefix));
1598 if (adev->enable_uni_mes) {
1599 snprintf(fw_name, sizeof(fw_name),
1600 "amdgpu/%s_uni_mes.bin", ucode_prefix);
1601 } else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1602 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
1603 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1604 ucode_prefix,
1605 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1606 need_retry = true;
1607 } else {
1608 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1609 ucode_prefix,
1610 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1611 }
1612
1613 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
1614 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1615 dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
1616 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1617 "amdgpu/%s_mes.bin", ucode_prefix);
1618 }
1619
1620 if (r)
1621 goto out;
1622
1623 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1624 adev->mes.fw[pipe]->data;
1625 adev->mes.uc_start_addr[pipe] =
1626 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1627 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1628 adev->mes.data_start_addr[pipe] =
1629 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1630 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1631 ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data +
1632 sizeof(union amdgpu_firmware_header));
1633 adev->mes.fw_version[pipe] =
1634 le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK;
1635
1636 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1637 int ucode, ucode_data;
1638
1639 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1640 ucode = AMDGPU_UCODE_ID_CP_MES;
1641 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1642 } else {
1643 ucode = AMDGPU_UCODE_ID_CP_MES1;
1644 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1645 }
1646
1647 info = &adev->firmware.ucode[ucode];
1648 info->ucode_id = ucode;
1649 info->fw = adev->mes.fw[pipe];
1650 adev->firmware.fw_size +=
1651 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1652 PAGE_SIZE);
1653
1654 info = &adev->firmware.ucode[ucode_data];
1655 info->ucode_id = ucode_data;
1656 info->fw = adev->mes.fw[pipe];
1657 adev->firmware.fw_size +=
1658 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1659 PAGE_SIZE);
1660 }
1661
1662 return 0;
1663out:
1664 amdgpu_ucode_release(&adev->mes.fw[pipe]);
1665 return r;
1666}
1667
1668bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
1669{
1670 uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
1671 bool is_supported = false;
1672
1673 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1674 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
1675 mes_rev >= 0x63)
1676 is_supported = true;
1677
1678 return is_supported;
1679}
1680
1681/* Fix me -- node_id is used to identify the correct MES instances in the future */
1682static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
1683 uint32_t node_id, bool enable)
1684{
1685 struct mes_misc_op_input op_input = {0};
1686 int r;
1687
1688 op_input.op = MES_MISC_OP_CHANGE_CONFIG;
1689 op_input.change_config.option.limit_single_process = enable ? 1 : 0;
1690
1691 if (!adev->mes.funcs->misc_op) {
1692 dev_err(adev->dev, "mes change config is not supported!\n");
1693 r = -EINVAL;
1694 goto error;
1695 }
1696
1697 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1698 if (r)
1699 dev_err(adev->dev, "failed to change_config.\n");
1700
1701error:
1702 return r;
1703}
1704
1705int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
1706{
1707 int i, r = 0;
1708
1709 if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
1710 mutex_lock(&adev->enforce_isolation_mutex);
1711 for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
1712 if (adev->enforce_isolation[i])
1713 r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
1714 else
1715 r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
1716 }
1717 mutex_unlock(&adev->enforce_isolation_mutex);
1718 }
1719 return r;
1720}
1721
1722#if defined(CONFIG_DEBUG_FS)
1723
1724static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1725{
1726 struct amdgpu_device *adev = m->private;
1727 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1728
1729 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1730 mem, adev->mes.event_log_size, false);
1731
1732 return 0;
1733}
1734
1735DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1736
1737#endif
1738
1739void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1740{
1741
1742#if defined(CONFIG_DEBUG_FS)
1743 struct drm_minor *minor = adev_to_drm(adev)->primary;
1744 struct dentry *root = minor->debugfs_root;
1745 if (adev->enable_mes && amdgpu_mes_log_enable)
1746 debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1747 adev, &amdgpu_debugfs_mes_event_log_fops);
1748
1749#endif
1750}