Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "amdgpu_amdkfd.h"
24#include "amd_shared.h"
25
26#include "amdgpu.h"
27#include "amdgpu_gfx.h"
28#include "amdgpu_dma_buf.h"
29#include <linux/module.h>
30#include <linux/dma-buf.h>
31#include "amdgpu_xgmi.h"
32#include <uapi/linux/kfd_ioctl.h>
33
34/* Total memory size in system memory and all GPU VRAM. Used to
35 * estimate worst case amount of memory to reserve for page tables
36 */
37uint64_t amdgpu_amdkfd_total_mem_size;
38
39int amdgpu_amdkfd_init(void)
40{
41 struct sysinfo si;
42 int ret;
43
44 si_meminfo(&si);
45 amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
46 amdgpu_amdkfd_total_mem_size *= si.mem_unit;
47
48#ifdef CONFIG_HSA_AMD
49 ret = kgd2kfd_init();
50 amdgpu_amdkfd_gpuvm_init_mem_limits();
51#else
52 ret = -ENOENT;
53#endif
54
55 return ret;
56}
57
58void amdgpu_amdkfd_fini(void)
59{
60 kgd2kfd_exit();
61}
62
63void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
64{
65 bool vf = amdgpu_sriov_vf(adev);
66
67 adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
68 adev->pdev, adev->asic_type, vf);
69
70 if (adev->kfd.dev)
71 amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
72}
73
74/**
75 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
76 * setup amdkfd
77 *
78 * @adev: amdgpu_device pointer
79 * @aperture_base: output returning doorbell aperture base physical address
80 * @aperture_size: output returning doorbell aperture size in bytes
81 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
82 *
83 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
84 * takes doorbells required for its own rings and reports the setup to amdkfd.
85 * amdgpu reserved doorbells are at the start of the doorbell aperture.
86 */
87static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
88 phys_addr_t *aperture_base,
89 size_t *aperture_size,
90 size_t *start_offset)
91{
92 /*
93 * The first num_doorbells are used by amdgpu.
94 * amdkfd takes whatever's left in the aperture.
95 */
96 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
97 *aperture_base = adev->doorbell.base;
98 *aperture_size = adev->doorbell.size;
99 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
100 } else {
101 *aperture_base = 0;
102 *aperture_size = 0;
103 *start_offset = 0;
104 }
105}
106
107void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
108{
109 int i;
110 int last_valid_bit;
111
112 if (adev->kfd.dev) {
113 struct kgd2kfd_shared_resources gpu_resources = {
114 .compute_vmid_bitmap =
115 ((1 << AMDGPU_NUM_VMID) - 1) -
116 ((1 << adev->vm_manager.first_kfd_vmid) - 1),
117 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
118 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
119 .gpuvm_size = min(adev->vm_manager.max_pfn
120 << AMDGPU_GPU_PAGE_SHIFT,
121 AMDGPU_GMC_HOLE_START),
122 .drm_render_minor = adev->ddev->render->index,
123 .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
124
125 };
126
127 /* this is going to have a few of the MSBs set that we need to
128 * clear
129 */
130 bitmap_complement(gpu_resources.cp_queue_bitmap,
131 adev->gfx.mec.queue_bitmap,
132 KGD_MAX_QUEUES);
133
134 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
135 * nbits is not compile time constant
136 */
137 last_valid_bit = 1 /* only first MEC can have compute queues */
138 * adev->gfx.mec.num_pipe_per_mec
139 * adev->gfx.mec.num_queue_per_pipe;
140 for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
141 clear_bit(i, gpu_resources.cp_queue_bitmap);
142
143 amdgpu_doorbell_get_kfd_info(adev,
144 &gpu_resources.doorbell_physical_address,
145 &gpu_resources.doorbell_aperture_size,
146 &gpu_resources.doorbell_start_offset);
147
148 /* Since SOC15, BIF starts to statically use the
149 * lower 12 bits of doorbell addresses for routing
150 * based on settings in registers like
151 * SDMA0_DOORBELL_RANGE etc..
152 * In order to route a doorbell to CP engine, the lower
153 * 12 bits of its address has to be outside the range
154 * set for SDMA, VCN, and IH blocks.
155 */
156 if (adev->asic_type >= CHIP_VEGA10) {
157 gpu_resources.non_cp_doorbells_start =
158 adev->doorbell_index.first_non_cp;
159 gpu_resources.non_cp_doorbells_end =
160 adev->doorbell_index.last_non_cp;
161 }
162
163 kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources);
164 }
165}
166
167void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
168{
169 if (adev->kfd.dev) {
170 kgd2kfd_device_exit(adev->kfd.dev);
171 adev->kfd.dev = NULL;
172 }
173}
174
175void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
176 const void *ih_ring_entry)
177{
178 if (adev->kfd.dev)
179 kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
180}
181
182void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
183{
184 if (adev->kfd.dev)
185 kgd2kfd_suspend(adev->kfd.dev, run_pm);
186}
187
188int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
189{
190 int r = 0;
191
192 if (adev->kfd.dev)
193 r = kgd2kfd_resume(adev->kfd.dev, run_pm);
194
195 return r;
196}
197
198int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
199{
200 int r = 0;
201
202 if (adev->kfd.dev)
203 r = kgd2kfd_pre_reset(adev->kfd.dev);
204
205 return r;
206}
207
208int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
209{
210 int r = 0;
211
212 if (adev->kfd.dev)
213 r = kgd2kfd_post_reset(adev->kfd.dev);
214
215 return r;
216}
217
218void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
219{
220 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
221
222 if (amdgpu_device_should_recover_gpu(adev))
223 amdgpu_device_gpu_recover(adev, NULL);
224}
225
226int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
227 void **mem_obj, uint64_t *gpu_addr,
228 void **cpu_ptr, bool cp_mqd_gfx9)
229{
230 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
231 struct amdgpu_bo *bo = NULL;
232 struct amdgpu_bo_param bp;
233 int r;
234 void *cpu_ptr_tmp = NULL;
235
236 memset(&bp, 0, sizeof(bp));
237 bp.size = size;
238 bp.byte_align = PAGE_SIZE;
239 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
240 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
241 bp.type = ttm_bo_type_kernel;
242 bp.resv = NULL;
243
244 if (cp_mqd_gfx9)
245 bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
246
247 r = amdgpu_bo_create(adev, &bp, &bo);
248 if (r) {
249 dev_err(adev->dev,
250 "failed to allocate BO for amdkfd (%d)\n", r);
251 return r;
252 }
253
254 /* map the buffer */
255 r = amdgpu_bo_reserve(bo, true);
256 if (r) {
257 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
258 goto allocate_mem_reserve_bo_failed;
259 }
260
261 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
262 if (r) {
263 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
264 goto allocate_mem_pin_bo_failed;
265 }
266
267 r = amdgpu_ttm_alloc_gart(&bo->tbo);
268 if (r) {
269 dev_err(adev->dev, "%p bind failed\n", bo);
270 goto allocate_mem_kmap_bo_failed;
271 }
272
273 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
274 if (r) {
275 dev_err(adev->dev,
276 "(%d) failed to map bo to kernel for amdkfd\n", r);
277 goto allocate_mem_kmap_bo_failed;
278 }
279
280 *mem_obj = bo;
281 *gpu_addr = amdgpu_bo_gpu_offset(bo);
282 *cpu_ptr = cpu_ptr_tmp;
283
284 amdgpu_bo_unreserve(bo);
285
286 return 0;
287
288allocate_mem_kmap_bo_failed:
289 amdgpu_bo_unpin(bo);
290allocate_mem_pin_bo_failed:
291 amdgpu_bo_unreserve(bo);
292allocate_mem_reserve_bo_failed:
293 amdgpu_bo_unref(&bo);
294
295 return r;
296}
297
298void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
299{
300 struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
301
302 amdgpu_bo_reserve(bo, true);
303 amdgpu_bo_kunmap(bo);
304 amdgpu_bo_unpin(bo);
305 amdgpu_bo_unreserve(bo);
306 amdgpu_bo_unref(&(bo));
307}
308
309int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
310 void **mem_obj)
311{
312 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
313 struct amdgpu_bo *bo = NULL;
314 struct amdgpu_bo_param bp;
315 int r;
316
317 memset(&bp, 0, sizeof(bp));
318 bp.size = size;
319 bp.byte_align = 1;
320 bp.domain = AMDGPU_GEM_DOMAIN_GWS;
321 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
322 bp.type = ttm_bo_type_device;
323 bp.resv = NULL;
324
325 r = amdgpu_bo_create(adev, &bp, &bo);
326 if (r) {
327 dev_err(adev->dev,
328 "failed to allocate gws BO for amdkfd (%d)\n", r);
329 return r;
330 }
331
332 *mem_obj = bo;
333 return 0;
334}
335
336void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj)
337{
338 struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
339
340 amdgpu_bo_unref(&bo);
341}
342
343uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
344 enum kgd_engine_type type)
345{
346 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
347
348 switch (type) {
349 case KGD_ENGINE_PFP:
350 return adev->gfx.pfp_fw_version;
351
352 case KGD_ENGINE_ME:
353 return adev->gfx.me_fw_version;
354
355 case KGD_ENGINE_CE:
356 return adev->gfx.ce_fw_version;
357
358 case KGD_ENGINE_MEC1:
359 return adev->gfx.mec_fw_version;
360
361 case KGD_ENGINE_MEC2:
362 return adev->gfx.mec2_fw_version;
363
364 case KGD_ENGINE_RLC:
365 return adev->gfx.rlc_fw_version;
366
367 case KGD_ENGINE_SDMA1:
368 return adev->sdma.instance[0].fw_version;
369
370 case KGD_ENGINE_SDMA2:
371 return adev->sdma.instance[1].fw_version;
372
373 default:
374 return 0;
375 }
376
377 return 0;
378}
379
380void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
381 struct kfd_local_mem_info *mem_info)
382{
383 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
384 uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
385 ~((1ULL << 32) - 1);
386 resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
387
388 memset(mem_info, 0, sizeof(*mem_info));
389 if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
390 mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
391 mem_info->local_mem_size_private = adev->gmc.real_vram_size -
392 adev->gmc.visible_vram_size;
393 } else {
394 mem_info->local_mem_size_public = 0;
395 mem_info->local_mem_size_private = adev->gmc.real_vram_size;
396 }
397 mem_info->vram_width = adev->gmc.vram_width;
398
399 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
400 &adev->gmc.aper_base, &aper_limit,
401 mem_info->local_mem_size_public,
402 mem_info->local_mem_size_private);
403
404 if (amdgpu_sriov_vf(adev))
405 mem_info->mem_clk_max = adev->clock.default_mclk / 100;
406 else if (adev->pm.dpm_enabled) {
407 if (amdgpu_emu_mode == 1)
408 mem_info->mem_clk_max = 0;
409 else
410 mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
411 } else
412 mem_info->mem_clk_max = 100;
413}
414
415uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
416{
417 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
418
419 if (adev->gfx.funcs->get_gpu_clock_counter)
420 return adev->gfx.funcs->get_gpu_clock_counter(adev);
421 return 0;
422}
423
424uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
425{
426 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
427
428 /* the sclk is in quantas of 10kHz */
429 if (amdgpu_sriov_vf(adev))
430 return adev->clock.default_sclk / 100;
431 else if (adev->pm.dpm_enabled)
432 return amdgpu_dpm_get_sclk(adev, false) / 100;
433 else
434 return 100;
435}
436
437void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
438{
439 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
440 struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
441
442 memset(cu_info, 0, sizeof(*cu_info));
443 if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
444 return;
445
446 cu_info->cu_active_number = acu_info.number;
447 cu_info->cu_ao_mask = acu_info.ao_cu_mask;
448 memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
449 sizeof(acu_info.bitmap));
450 cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
451 cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
452 cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
453 cu_info->simd_per_cu = acu_info.simd_per_cu;
454 cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
455 cu_info->wave_front_size = acu_info.wave_front_size;
456 cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
457 cu_info->lds_size = acu_info.lds_size;
458}
459
460int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
461 struct kgd_dev **dma_buf_kgd,
462 uint64_t *bo_size, void *metadata_buffer,
463 size_t buffer_size, uint32_t *metadata_size,
464 uint32_t *flags)
465{
466 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
467 struct dma_buf *dma_buf;
468 struct drm_gem_object *obj;
469 struct amdgpu_bo *bo;
470 uint64_t metadata_flags;
471 int r = -EINVAL;
472
473 dma_buf = dma_buf_get(dma_buf_fd);
474 if (IS_ERR(dma_buf))
475 return PTR_ERR(dma_buf);
476
477 if (dma_buf->ops != &amdgpu_dmabuf_ops)
478 /* Can't handle non-graphics buffers */
479 goto out_put;
480
481 obj = dma_buf->priv;
482 if (obj->dev->driver != adev->ddev->driver)
483 /* Can't handle buffers from different drivers */
484 goto out_put;
485
486 adev = obj->dev->dev_private;
487 bo = gem_to_amdgpu_bo(obj);
488 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
489 AMDGPU_GEM_DOMAIN_GTT)))
490 /* Only VRAM and GTT BOs are supported */
491 goto out_put;
492
493 r = 0;
494 if (dma_buf_kgd)
495 *dma_buf_kgd = (struct kgd_dev *)adev;
496 if (bo_size)
497 *bo_size = amdgpu_bo_size(bo);
498 if (metadata_size)
499 *metadata_size = bo->metadata_size;
500 if (metadata_buffer)
501 r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
502 metadata_size, &metadata_flags);
503 if (flags) {
504 *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
505 KFD_IOC_ALLOC_MEM_FLAGS_VRAM
506 : KFD_IOC_ALLOC_MEM_FLAGS_GTT;
507
508 if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
509 *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
510 }
511
512out_put:
513 dma_buf_put(dma_buf);
514 return r;
515}
516
517uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
518{
519 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
520
521 return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
522}
523
524uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
525{
526 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
527
528 return adev->gmc.xgmi.hive_id;
529}
530
531uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd)
532{
533 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
534
535 return adev->unique_id;
536}
537
538uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
539{
540 struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
541 struct amdgpu_device *adev = (struct amdgpu_device *)dst;
542 int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
543
544 if (ret < 0) {
545 DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
546 adev->gmc.xgmi.physical_node_id,
547 peer_adev->gmc.xgmi.physical_node_id, ret);
548 ret = 0;
549 }
550 return (uint8_t)ret;
551}
552
553uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
554{
555 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
556
557 return adev->rmmio_remap.bus_addr;
558}
559
560uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
561{
562 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
563
564 return adev->gds.gws_size;
565}
566
567uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
568{
569 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
570
571 return adev->rev_id;
572}
573
574int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
575 uint32_t vmid, uint64_t gpu_addr,
576 uint32_t *ib_cmd, uint32_t ib_len)
577{
578 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
579 struct amdgpu_job *job;
580 struct amdgpu_ib *ib;
581 struct amdgpu_ring *ring;
582 struct dma_fence *f = NULL;
583 int ret;
584
585 switch (engine) {
586 case KGD_ENGINE_MEC1:
587 ring = &adev->gfx.compute_ring[0];
588 break;
589 case KGD_ENGINE_SDMA1:
590 ring = &adev->sdma.instance[0].ring;
591 break;
592 case KGD_ENGINE_SDMA2:
593 ring = &adev->sdma.instance[1].ring;
594 break;
595 default:
596 pr_err("Invalid engine in IB submission: %d\n", engine);
597 ret = -EINVAL;
598 goto err;
599 }
600
601 ret = amdgpu_job_alloc(adev, 1, &job, NULL);
602 if (ret)
603 goto err;
604
605 ib = &job->ibs[0];
606 memset(ib, 0, sizeof(struct amdgpu_ib));
607
608 ib->gpu_addr = gpu_addr;
609 ib->ptr = ib_cmd;
610 ib->length_dw = ib_len;
611 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
612 job->vmid = vmid;
613
614 ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
615 if (ret) {
616 DRM_ERROR("amdgpu: failed to schedule IB.\n");
617 goto err_ib_sched;
618 }
619
620 ret = dma_fence_wait(f, false);
621
622err_ib_sched:
623 dma_fence_put(f);
624 amdgpu_job_free(job);
625err:
626 return ret;
627}
628
629void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
630{
631 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
632
633 amdgpu_dpm_switch_power_profile(adev,
634 PP_SMC_POWER_PROFILE_COMPUTE,
635 !idle);
636}
637
638bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
639{
640 if (adev->kfd.dev)
641 return vmid >= adev->vm_manager.first_kfd_vmid;
642
643 return false;
644}
645
646int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
647{
648 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
649
650 if (adev->family == AMDGPU_FAMILY_AI) {
651 int i;
652
653 for (i = 0; i < adev->num_vmhubs; i++)
654 amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
655 } else {
656 amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
657 }
658
659 return 0;
660}
661
662int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
663{
664 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
665 const uint32_t flush_type = 0;
666 bool all_hub = false;
667
668 if (adev->family == AMDGPU_FAMILY_AI)
669 all_hub = true;
670
671 return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
672}
673
674bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
675{
676 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
677
678 return adev->have_atomics_support;
679}
680
681#ifndef CONFIG_HSA_AMD
682bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
683{
684 return false;
685}
686
687void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
688{
689}
690
691int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
692{
693 return 0;
694}
695
696void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
697 struct amdgpu_vm *vm)
698{
699}
700
701struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
702{
703 return NULL;
704}
705
706int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
707{
708 return 0;
709}
710
711struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
712 unsigned int asic_type, bool vf)
713{
714 return NULL;
715}
716
717bool kgd2kfd_device_init(struct kfd_dev *kfd,
718 struct drm_device *ddev,
719 const struct kgd2kfd_shared_resources *gpu_resources)
720{
721 return false;
722}
723
724void kgd2kfd_device_exit(struct kfd_dev *kfd)
725{
726}
727
728void kgd2kfd_exit(void)
729{
730}
731
732void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
733{
734}
735
736int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
737{
738 return 0;
739}
740
741int kgd2kfd_pre_reset(struct kfd_dev *kfd)
742{
743 return 0;
744}
745
746int kgd2kfd_post_reset(struct kfd_dev *kfd)
747{
748 return 0;
749}
750
751void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
752{
753}
754
755void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
756{
757}
758#endif
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2014 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include "amdgpu_amdkfd.h"
25#include "amd_pcie.h"
26#include "amd_shared.h"
27
28#include "amdgpu.h"
29#include "amdgpu_gfx.h"
30#include "amdgpu_dma_buf.h"
31#include <drm/ttm/ttm_tt.h>
32#include <linux/module.h>
33#include <linux/dma-buf.h>
34#include "amdgpu_xgmi.h"
35#include <uapi/linux/kfd_ioctl.h>
36#include "amdgpu_ras.h"
37#include "amdgpu_umc.h"
38#include "amdgpu_reset.h"
39
40/* Total memory size in system memory and all GPU VRAM. Used to
41 * estimate worst case amount of memory to reserve for page tables
42 */
43uint64_t amdgpu_amdkfd_total_mem_size;
44
45static bool kfd_initialized;
46
47int amdgpu_amdkfd_init(void)
48{
49 struct sysinfo si;
50 int ret;
51
52 si_meminfo(&si);
53 amdgpu_amdkfd_total_mem_size = si.freeram - si.freehigh;
54 amdgpu_amdkfd_total_mem_size *= si.mem_unit;
55
56 ret = kgd2kfd_init();
57 kfd_initialized = !ret;
58
59 return ret;
60}
61
62void amdgpu_amdkfd_fini(void)
63{
64 if (kfd_initialized) {
65 kgd2kfd_exit();
66 kfd_initialized = false;
67 }
68}
69
70void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
71{
72 bool vf = amdgpu_sriov_vf(adev);
73
74 if (!kfd_initialized)
75 return;
76
77 adev->kfd.dev = kgd2kfd_probe(adev, vf);
78}
79
80/**
81 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
82 * setup amdkfd
83 *
84 * @adev: amdgpu_device pointer
85 * @aperture_base: output returning doorbell aperture base physical address
86 * @aperture_size: output returning doorbell aperture size in bytes
87 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
88 *
89 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
90 * takes doorbells required for its own rings and reports the setup to amdkfd.
91 * amdgpu reserved doorbells are at the start of the doorbell aperture.
92 */
93static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
94 phys_addr_t *aperture_base,
95 size_t *aperture_size,
96 size_t *start_offset)
97{
98 /*
99 * The first num_kernel_doorbells are used by amdgpu.
100 * amdkfd takes whatever's left in the aperture.
101 */
102 if (adev->enable_mes) {
103 /*
104 * With MES enabled, we only need to initialize
105 * the base address. The size and offset are
106 * not initialized as AMDGPU manages the whole
107 * doorbell space.
108 */
109 *aperture_base = adev->doorbell.base;
110 *aperture_size = 0;
111 *start_offset = 0;
112 } else if (adev->doorbell.size > adev->doorbell.num_kernel_doorbells *
113 sizeof(u32)) {
114 *aperture_base = adev->doorbell.base;
115 *aperture_size = adev->doorbell.size;
116 *start_offset = adev->doorbell.num_kernel_doorbells * sizeof(u32);
117 } else {
118 *aperture_base = 0;
119 *aperture_size = 0;
120 *start_offset = 0;
121 }
122}
123
124
125static void amdgpu_amdkfd_reset_work(struct work_struct *work)
126{
127 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
128 kfd.reset_work);
129
130 struct amdgpu_reset_context reset_context;
131
132 memset(&reset_context, 0, sizeof(reset_context));
133
134 reset_context.method = AMD_RESET_METHOD_NONE;
135 reset_context.reset_req_dev = adev;
136 reset_context.src = adev->enable_mes ?
137 AMDGPU_RESET_SRC_MES :
138 AMDGPU_RESET_SRC_HWS;
139 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
140
141 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
142}
143
144static const struct drm_client_funcs kfd_client_funcs = {
145 .unregister = drm_client_release,
146};
147
148int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
149{
150 int ret;
151
152 if (!adev->kfd.init_complete || adev->kfd.client.dev)
153 return 0;
154
155 ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
156 &kfd_client_funcs);
157 if (ret) {
158 dev_err(adev->dev, "Failed to init DRM client: %d\n",
159 ret);
160 return ret;
161 }
162
163 drm_client_register(&adev->kfd.client);
164
165 return 0;
166}
167
168void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
169{
170 int i;
171 int last_valid_bit;
172
173 amdgpu_amdkfd_gpuvm_init_mem_limits();
174
175 if (adev->kfd.dev) {
176 struct kgd2kfd_shared_resources gpu_resources = {
177 .compute_vmid_bitmap =
178 ((1 << AMDGPU_NUM_VMID) - 1) -
179 ((1 << adev->vm_manager.first_kfd_vmid) - 1),
180 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
181 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
182 .gpuvm_size = min(adev->vm_manager.max_pfn
183 << AMDGPU_GPU_PAGE_SHIFT,
184 AMDGPU_GMC_HOLE_START),
185 .drm_render_minor = adev_to_drm(adev)->render->index,
186 .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
187 .enable_mes = adev->enable_mes,
188 };
189
190 /* this is going to have a few of the MSBs set that we need to
191 * clear
192 */
193 bitmap_complement(gpu_resources.cp_queue_bitmap,
194 adev->gfx.mec_bitmap[0].queue_bitmap,
195 AMDGPU_MAX_QUEUES);
196
197 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
198 * nbits is not compile time constant
199 */
200 last_valid_bit = 1 /* only first MEC can have compute queues */
201 * adev->gfx.mec.num_pipe_per_mec
202 * adev->gfx.mec.num_queue_per_pipe;
203 for (i = last_valid_bit; i < AMDGPU_MAX_QUEUES; ++i)
204 clear_bit(i, gpu_resources.cp_queue_bitmap);
205
206 amdgpu_doorbell_get_kfd_info(adev,
207 &gpu_resources.doorbell_physical_address,
208 &gpu_resources.doorbell_aperture_size,
209 &gpu_resources.doorbell_start_offset);
210
211 /* Since SOC15, BIF starts to statically use the
212 * lower 12 bits of doorbell addresses for routing
213 * based on settings in registers like
214 * SDMA0_DOORBELL_RANGE etc..
215 * In order to route a doorbell to CP engine, the lower
216 * 12 bits of its address has to be outside the range
217 * set for SDMA, VCN, and IH blocks.
218 */
219 if (adev->asic_type >= CHIP_VEGA10) {
220 gpu_resources.non_cp_doorbells_start =
221 adev->doorbell_index.first_non_cp;
222 gpu_resources.non_cp_doorbells_end =
223 adev->doorbell_index.last_non_cp;
224 }
225
226 adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
227 &gpu_resources);
228
229 amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
230
231 INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work);
232 }
233}
234
235void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev)
236{
237 if (adev->kfd.dev) {
238 kgd2kfd_device_exit(adev->kfd.dev);
239 adev->kfd.dev = NULL;
240 amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size;
241 }
242}
243
244void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
245 const void *ih_ring_entry)
246{
247 if (adev->kfd.dev)
248 kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
249}
250
251void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
252{
253 if (adev->kfd.dev)
254 kgd2kfd_suspend(adev->kfd.dev, run_pm);
255}
256
257int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
258{
259 int r = 0;
260
261 if (adev->kfd.dev)
262 r = kgd2kfd_resume(adev->kfd.dev, run_pm);
263
264 return r;
265}
266
267int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev,
268 struct amdgpu_reset_context *reset_context)
269{
270 int r = 0;
271
272 if (adev->kfd.dev)
273 r = kgd2kfd_pre_reset(adev->kfd.dev, reset_context);
274
275 return r;
276}
277
278int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
279{
280 int r = 0;
281
282 if (adev->kfd.dev)
283 r = kgd2kfd_post_reset(adev->kfd.dev);
284
285 return r;
286}
287
288void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev)
289{
290 if (amdgpu_device_should_recover_gpu(adev))
291 amdgpu_reset_domain_schedule(adev->reset_domain,
292 &adev->kfd.reset_work);
293}
294
295int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
296 void **mem_obj, uint64_t *gpu_addr,
297 void **cpu_ptr, bool cp_mqd_gfx9)
298{
299 struct amdgpu_bo *bo = NULL;
300 struct amdgpu_bo_param bp;
301 int r;
302 void *cpu_ptr_tmp = NULL;
303
304 memset(&bp, 0, sizeof(bp));
305 bp.size = size;
306 bp.byte_align = PAGE_SIZE;
307 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
308 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
309 bp.type = ttm_bo_type_kernel;
310 bp.resv = NULL;
311 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
312
313 if (cp_mqd_gfx9)
314 bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
315
316 r = amdgpu_bo_create(adev, &bp, &bo);
317 if (r) {
318 dev_err(adev->dev,
319 "failed to allocate BO for amdkfd (%d)\n", r);
320 return r;
321 }
322
323 /* map the buffer */
324 r = amdgpu_bo_reserve(bo, true);
325 if (r) {
326 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
327 goto allocate_mem_reserve_bo_failed;
328 }
329
330 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
331 if (r) {
332 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
333 goto allocate_mem_pin_bo_failed;
334 }
335
336 r = amdgpu_ttm_alloc_gart(&bo->tbo);
337 if (r) {
338 dev_err(adev->dev, "%p bind failed\n", bo);
339 goto allocate_mem_kmap_bo_failed;
340 }
341
342 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
343 if (r) {
344 dev_err(adev->dev,
345 "(%d) failed to map bo to kernel for amdkfd\n", r);
346 goto allocate_mem_kmap_bo_failed;
347 }
348
349 *mem_obj = bo;
350 *gpu_addr = amdgpu_bo_gpu_offset(bo);
351 *cpu_ptr = cpu_ptr_tmp;
352
353 amdgpu_bo_unreserve(bo);
354
355 return 0;
356
357allocate_mem_kmap_bo_failed:
358 amdgpu_bo_unpin(bo);
359allocate_mem_pin_bo_failed:
360 amdgpu_bo_unreserve(bo);
361allocate_mem_reserve_bo_failed:
362 amdgpu_bo_unref(&bo);
363
364 return r;
365}
366
367void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
368{
369 struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
370
371 amdgpu_bo_reserve(*bo, true);
372 amdgpu_bo_kunmap(*bo);
373 amdgpu_bo_unpin(*bo);
374 amdgpu_bo_unreserve(*bo);
375 amdgpu_bo_unref(bo);
376}
377
378int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
379 void **mem_obj)
380{
381 struct amdgpu_bo *bo = NULL;
382 struct amdgpu_bo_user *ubo;
383 struct amdgpu_bo_param bp;
384 int r;
385
386 memset(&bp, 0, sizeof(bp));
387 bp.size = size;
388 bp.byte_align = 1;
389 bp.domain = AMDGPU_GEM_DOMAIN_GWS;
390 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
391 bp.type = ttm_bo_type_device;
392 bp.resv = NULL;
393 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
394
395 r = amdgpu_bo_create_user(adev, &bp, &ubo);
396 if (r) {
397 dev_err(adev->dev,
398 "failed to allocate gws BO for amdkfd (%d)\n", r);
399 return r;
400 }
401
402 bo = &ubo->bo;
403 *mem_obj = bo;
404 return 0;
405}
406
407void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj)
408{
409 struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
410
411 amdgpu_bo_unref(&bo);
412}
413
414uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
415 enum kgd_engine_type type)
416{
417 switch (type) {
418 case KGD_ENGINE_PFP:
419 return adev->gfx.pfp_fw_version;
420
421 case KGD_ENGINE_ME:
422 return adev->gfx.me_fw_version;
423
424 case KGD_ENGINE_CE:
425 return adev->gfx.ce_fw_version;
426
427 case KGD_ENGINE_MEC1:
428 return adev->gfx.mec_fw_version;
429
430 case KGD_ENGINE_MEC2:
431 return adev->gfx.mec2_fw_version;
432
433 case KGD_ENGINE_RLC:
434 return adev->gfx.rlc_fw_version;
435
436 case KGD_ENGINE_SDMA1:
437 return adev->sdma.instance[0].fw_version;
438
439 case KGD_ENGINE_SDMA2:
440 return adev->sdma.instance[1].fw_version;
441
442 default:
443 return 0;
444 }
445
446 return 0;
447}
448
449void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
450 struct kfd_local_mem_info *mem_info,
451 struct amdgpu_xcp *xcp)
452{
453 memset(mem_info, 0, sizeof(*mem_info));
454
455 if (xcp) {
456 if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size)
457 mem_info->local_mem_size_public =
458 KFD_XCP_MEMORY_SIZE(adev, xcp->id);
459 else
460 mem_info->local_mem_size_private =
461 KFD_XCP_MEMORY_SIZE(adev, xcp->id);
462 } else if (adev->flags & AMD_IS_APU) {
463 mem_info->local_mem_size_public = (ttm_tt_pages_limit() << PAGE_SHIFT);
464 mem_info->local_mem_size_private = 0;
465 } else {
466 mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
467 mem_info->local_mem_size_private = adev->gmc.real_vram_size -
468 adev->gmc.visible_vram_size;
469 }
470 mem_info->vram_width = adev->gmc.vram_width;
471
472 pr_debug("Address base: %pap public 0x%llx private 0x%llx\n",
473 &adev->gmc.aper_base,
474 mem_info->local_mem_size_public,
475 mem_info->local_mem_size_private);
476
477 if (adev->pm.dpm_enabled) {
478 if (amdgpu_emu_mode == 1)
479 mem_info->mem_clk_max = 0;
480 else
481 mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
482 } else
483 mem_info->mem_clk_max = 100;
484}
485
486uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev)
487{
488 if (adev->gfx.funcs->get_gpu_clock_counter)
489 return adev->gfx.funcs->get_gpu_clock_counter(adev);
490 return 0;
491}
492
493uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev)
494{
495 /* the sclk is in quantas of 10kHz */
496 if (adev->pm.dpm_enabled)
497 return amdgpu_dpm_get_sclk(adev, false) / 100;
498 else
499 return 100;
500}
501
502int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
503 struct amdgpu_device **dmabuf_adev,
504 uint64_t *bo_size, void *metadata_buffer,
505 size_t buffer_size, uint32_t *metadata_size,
506 uint32_t *flags, int8_t *xcp_id)
507{
508 struct dma_buf *dma_buf;
509 struct drm_gem_object *obj;
510 struct amdgpu_bo *bo;
511 uint64_t metadata_flags;
512 int r = -EINVAL;
513
514 dma_buf = dma_buf_get(dma_buf_fd);
515 if (IS_ERR(dma_buf))
516 return PTR_ERR(dma_buf);
517
518 if (dma_buf->ops != &amdgpu_dmabuf_ops)
519 /* Can't handle non-graphics buffers */
520 goto out_put;
521
522 obj = dma_buf->priv;
523 if (obj->dev->driver != adev_to_drm(adev)->driver)
524 /* Can't handle buffers from different drivers */
525 goto out_put;
526
527 adev = drm_to_adev(obj->dev);
528 bo = gem_to_amdgpu_bo(obj);
529 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
530 AMDGPU_GEM_DOMAIN_GTT)))
531 /* Only VRAM and GTT BOs are supported */
532 goto out_put;
533
534 r = 0;
535 if (dmabuf_adev)
536 *dmabuf_adev = adev;
537 if (bo_size)
538 *bo_size = amdgpu_bo_size(bo);
539 if (metadata_buffer)
540 r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
541 metadata_size, &metadata_flags);
542 if (flags) {
543 *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
544 KFD_IOC_ALLOC_MEM_FLAGS_VRAM
545 : KFD_IOC_ALLOC_MEM_FLAGS_GTT;
546
547 if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
548 *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
549 }
550 if (xcp_id)
551 *xcp_id = bo->xcp_id;
552
553out_put:
554 dma_buf_put(dma_buf);
555 return r;
556}
557
558uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
559 struct amdgpu_device *src)
560{
561 struct amdgpu_device *peer_adev = src;
562 struct amdgpu_device *adev = dst;
563 int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
564
565 if (ret < 0) {
566 DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
567 adev->gmc.xgmi.physical_node_id,
568 peer_adev->gmc.xgmi.physical_node_id, ret);
569 ret = 0;
570 }
571 return (uint8_t)ret;
572}
573
574int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
575 struct amdgpu_device *src,
576 bool is_min)
577{
578 struct amdgpu_device *adev = dst, *peer_adev;
579 int num_links;
580
581 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))
582 return 0;
583
584 if (src)
585 peer_adev = src;
586
587 /* num links returns 0 for indirect peers since indirect route is unknown. */
588 num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev);
589 if (num_links < 0) {
590 DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n",
591 adev->gmc.xgmi.physical_node_id,
592 peer_adev->gmc.xgmi.physical_node_id, num_links);
593 num_links = 0;
594 }
595
596 /* Aldebaran xGMI DPM is defeatured so assume x16 x 25Gbps for bandwidth. */
597 return (num_links * 16 * 25000)/BITS_PER_BYTE;
598}
599
600int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min)
601{
602 int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
603 fls(adev->pm.pcie_mlw_mask)) - 1;
604 int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask &
605 CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) :
606 fls(adev->pm.pcie_gen_mask &
607 CAIL_PCIE_LINK_SPEED_SUPPORT_MASK)) - 1;
608 uint32_t num_lanes_mask = 1 << num_lanes_shift;
609 uint32_t gen_speed_mask = 1 << gen_speed_shift;
610 int num_lanes_factor = 0, gen_speed_mbits_factor = 0;
611
612 switch (num_lanes_mask) {
613 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1:
614 num_lanes_factor = 1;
615 break;
616 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2:
617 num_lanes_factor = 2;
618 break;
619 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4:
620 num_lanes_factor = 4;
621 break;
622 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8:
623 num_lanes_factor = 8;
624 break;
625 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12:
626 num_lanes_factor = 12;
627 break;
628 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16:
629 num_lanes_factor = 16;
630 break;
631 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32:
632 num_lanes_factor = 32;
633 break;
634 }
635
636 switch (gen_speed_mask) {
637 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1:
638 gen_speed_mbits_factor = 2500;
639 break;
640 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2:
641 gen_speed_mbits_factor = 5000;
642 break;
643 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3:
644 gen_speed_mbits_factor = 8000;
645 break;
646 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4:
647 gen_speed_mbits_factor = 16000;
648 break;
649 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5:
650 gen_speed_mbits_factor = 32000;
651 break;
652 }
653
654 return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE;
655}
656
657int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
658 enum kgd_engine_type engine,
659 uint32_t vmid, uint64_t gpu_addr,
660 uint32_t *ib_cmd, uint32_t ib_len)
661{
662 struct amdgpu_job *job;
663 struct amdgpu_ib *ib;
664 struct amdgpu_ring *ring;
665 struct dma_fence *f = NULL;
666 int ret;
667
668 switch (engine) {
669 case KGD_ENGINE_MEC1:
670 ring = &adev->gfx.compute_ring[0];
671 break;
672 case KGD_ENGINE_SDMA1:
673 ring = &adev->sdma.instance[0].ring;
674 break;
675 case KGD_ENGINE_SDMA2:
676 ring = &adev->sdma.instance[1].ring;
677 break;
678 default:
679 pr_err("Invalid engine in IB submission: %d\n", engine);
680 ret = -EINVAL;
681 goto err;
682 }
683
684 ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
685 if (ret)
686 goto err;
687
688 ib = &job->ibs[0];
689 memset(ib, 0, sizeof(struct amdgpu_ib));
690
691 ib->gpu_addr = gpu_addr;
692 ib->ptr = ib_cmd;
693 ib->length_dw = ib_len;
694 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
695 job->vmid = vmid;
696 job->num_ibs = 1;
697
698 ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
699
700 if (ret) {
701 DRM_ERROR("amdgpu: failed to schedule IB.\n");
702 goto err_ib_sched;
703 }
704
705 /* Drop the initial kref_init count (see drm_sched_main as example) */
706 dma_fence_put(f);
707 ret = dma_fence_wait(f, false);
708
709err_ib_sched:
710 amdgpu_job_free(job);
711err:
712 return ret;
713}
714
715void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
716{
717 enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
718 if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
719 ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) ||
720 (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) {
721 pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
722 amdgpu_gfx_off_ctrl(adev, idle);
723 } else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
724 (adev->flags & AMD_IS_APU)) {
725 /* Disable GFXOFF and PG. Temporary workaround
726 * to fix some compute applications issue on GFX9.
727 */
728 adev->ip_blocks[AMD_IP_BLOCK_TYPE_GFX].version->funcs->set_powergating_state((void *)adev, state);
729 }
730 amdgpu_dpm_switch_power_profile(adev,
731 PP_SMC_POWER_PROFILE_COMPUTE,
732 !idle);
733}
734
735bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
736{
737 if (adev->kfd.dev)
738 return vmid >= adev->vm_manager.first_kfd_vmid;
739
740 return false;
741}
742
743bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev)
744{
745 return adev->have_atomics_support;
746}
747
748void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev)
749{
750 amdgpu_device_flush_hdp(adev, NULL);
751}
752
753bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev)
754{
755 return amdgpu_ras_get_fed_status(adev);
756}
757
758void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev,
759 enum amdgpu_ras_block block, uint16_t pasid,
760 pasid_notify pasid_fn, void *data, uint32_t reset)
761{
762 amdgpu_umc_pasid_poison_handler(adev, block, pasid, pasid_fn, data, reset);
763}
764
765void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
766 enum amdgpu_ras_block block, uint32_t reset)
767{
768 amdgpu_umc_pasid_poison_handler(adev, block, 0, NULL, NULL, reset);
769}
770
771int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
772 uint32_t *payload)
773{
774 int ret;
775
776 /* Device or IH ring is not ready so bail. */
777 ret = amdgpu_ih_wait_on_checkpoint_process_ts(adev, &adev->irq.ih);
778 if (ret)
779 return ret;
780
781 /* Send payload to fence KFD interrupts */
782 amdgpu_amdkfd_interrupt(adev, payload);
783
784 return 0;
785}
786
787int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
788{
789 return kgd2kfd_check_and_lock_kfd();
790}
791
792void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev)
793{
794 kgd2kfd_unlock_kfd();
795}
796
797
798u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
799{
800 s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id);
801 u64 tmp;
802
803 if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) {
804 if (adev->gmc.is_app_apu && adev->gmc.num_mem_partitions == 1) {
805 /* In NPS1 mode, we should restrict the vram reporting
806 * tied to the ttm_pages_limit which is 1/2 of the system
807 * memory. For other partition modes, the HBM is uniformly
808 * divided already per numa node reported. If user wants to
809 * go beyond the default ttm limit and maximize the ROCm
810 * allocations, they can go up to max ttm and sysmem limits.
811 */
812
813 tmp = (ttm_tt_pages_limit() << PAGE_SHIFT) / num_online_nodes();
814 } else {
815 tmp = adev->gmc.mem_partitions[mem_id].size;
816 }
817 do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
818 return ALIGN_DOWN(tmp, PAGE_SIZE);
819 } else if (adev->flags & AMD_IS_APU) {
820 return (ttm_tt_pages_limit() << PAGE_SHIFT);
821 } else {
822 return adev->gmc.real_vram_size;
823 }
824}
825
826int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
827 u32 inst)
828{
829 struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
830 struct amdgpu_ring *kiq_ring = &kiq->ring;
831 struct amdgpu_ring_funcs *ring_funcs;
832 struct amdgpu_ring *ring;
833 int r = 0;
834
835 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
836 return -EINVAL;
837
838 if (!kiq_ring->sched.ready || adev->job_hang)
839 return 0;
840
841 ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
842 if (!ring_funcs)
843 return -ENOMEM;
844
845 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
846 if (!ring) {
847 r = -ENOMEM;
848 goto free_ring_funcs;
849 }
850
851 ring_funcs->type = AMDGPU_RING_TYPE_COMPUTE;
852 ring->doorbell_index = doorbell_off;
853 ring->funcs = ring_funcs;
854
855 spin_lock(&kiq->ring_lock);
856
857 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
858 spin_unlock(&kiq->ring_lock);
859 r = -ENOMEM;
860 goto free_ring;
861 }
862
863 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0);
864
865 /* Submit unmap queue packet */
866 amdgpu_ring_commit(kiq_ring);
867 /*
868 * Ring test will do a basic scratch register change check. Just run
869 * this to ensure that unmap queues that is submitted before got
870 * processed successfully before returning.
871 */
872 r = amdgpu_ring_test_helper(kiq_ring);
873
874 spin_unlock(&kiq->ring_lock);
875
876free_ring:
877 kfree(ring);
878
879free_ring_funcs:
880 kfree(ring_funcs);
881
882 return r;
883}
884
885/* Stop scheduling on KFD */
886int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id)
887{
888 if (!adev->kfd.init_complete)
889 return 0;
890
891 return kgd2kfd_stop_sched(adev->kfd.dev, node_id);
892}
893
894/* Start scheduling on KFD */
895int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id)
896{
897 if (!adev->kfd.init_complete)
898 return 0;
899
900 return kgd2kfd_start_sched(adev->kfd.dev, node_id);
901}
902
903/* check if there are KFD queues active */
904bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id)
905{
906 if (!adev->kfd.init_complete)
907 return false;
908
909 return kgd2kfd_compute_active(adev->kfd.dev, node_id);
910}
911
912/* Config CGTT_SQ_CLK_CTRL */
913int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id,
914 bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable)
915{
916 int r;
917
918 if (!adev->kfd.init_complete)
919 return 0;
920
921 r = psp_config_sq_perfmon(&adev->psp, xcp_id, core_override_enable,
922 reg_override_enable, perfmon_override_enable);
923
924 return r;
925}