Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23/* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
24
25#ifndef AMDGPU_AMDKFD_H_INCLUDED
26#define AMDGPU_AMDKFD_H_INCLUDED
27
28#include <linux/types.h>
29#include <linux/mm.h>
30#include <linux/kthread.h>
31#include <linux/workqueue.h>
32#include <kgd_kfd_interface.h>
33#include <drm/ttm/ttm_execbuf_util.h>
34#include "amdgpu_sync.h"
35#include "amdgpu_vm.h"
36
37extern uint64_t amdgpu_amdkfd_total_mem_size;
38
39struct amdgpu_device;
40
41struct kfd_bo_va_list {
42 struct list_head bo_list;
43 struct amdgpu_bo_va *bo_va;
44 void *kgd_dev;
45 bool is_mapped;
46 uint64_t va;
47 uint64_t pte_flags;
48};
49
50struct kgd_mem {
51 struct mutex lock;
52 struct amdgpu_bo *bo;
53 struct list_head bo_va_list;
54 /* protected by amdkfd_process_info.lock */
55 struct ttm_validate_buffer validate_list;
56 struct ttm_validate_buffer resv_list;
57 uint32_t domain;
58 unsigned int mapped_to_gpu_memory;
59 uint64_t va;
60
61 uint32_t alloc_flags;
62
63 atomic_t invalid;
64 struct amdkfd_process_info *process_info;
65
66 struct amdgpu_sync sync;
67
68 bool aql_queue;
69 bool is_imported;
70};
71
72/* KFD Memory Eviction */
73struct amdgpu_amdkfd_fence {
74 struct dma_fence base;
75 struct mm_struct *mm;
76 spinlock_t lock;
77 char timeline_name[TASK_COMM_LEN];
78};
79
80struct amdgpu_kfd_dev {
81 struct kfd_dev *dev;
82 uint64_t vram_used;
83};
84
85enum kgd_engine_type {
86 KGD_ENGINE_PFP = 1,
87 KGD_ENGINE_ME,
88 KGD_ENGINE_CE,
89 KGD_ENGINE_MEC1,
90 KGD_ENGINE_MEC2,
91 KGD_ENGINE_RLC,
92 KGD_ENGINE_SDMA1,
93 KGD_ENGINE_SDMA2,
94 KGD_ENGINE_MAX
95};
96
97struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
98 struct mm_struct *mm);
99bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
100struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
101int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
102
103struct amdkfd_process_info {
104 /* List head of all VMs that belong to a KFD process */
105 struct list_head vm_list_head;
106 /* List head for all KFD BOs that belong to a KFD process. */
107 struct list_head kfd_bo_list;
108 /* List of userptr BOs that are valid or invalid */
109 struct list_head userptr_valid_list;
110 struct list_head userptr_inval_list;
111 /* Lock to protect kfd_bo_list */
112 struct mutex lock;
113
114 /* Number of VMs */
115 unsigned int n_vms;
116 /* Eviction Fence */
117 struct amdgpu_amdkfd_fence *eviction_fence;
118
119 /* MMU-notifier related fields */
120 atomic_t evicted_bos;
121 struct delayed_work restore_userptr_work;
122 struct pid *pid;
123};
124
125int amdgpu_amdkfd_init(void);
126void amdgpu_amdkfd_fini(void);
127
128void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
129int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
130void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
131 const void *ih_ring_entry);
132void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
133void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
134void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev);
135
136int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
137int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
138 uint32_t vmid, uint64_t gpu_addr,
139 uint32_t *ib_cmd, uint32_t ib_len);
140void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
141bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
142int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid);
143int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid);
144
145bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
146
147int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
148
149int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
150
151void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
152
153int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
154 int queue_bit);
155
156/* Shared API */
157int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
158 void **mem_obj, uint64_t *gpu_addr,
159 void **cpu_ptr, bool mqd_gfx9);
160void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
161int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, void **mem_obj);
162void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj);
163int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem);
164int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem);
165uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
166 enum kgd_engine_type type);
167void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
168 struct kfd_local_mem_info *mem_info);
169uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
170
171uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
172void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
173int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
174 struct kgd_dev **dmabuf_kgd,
175 uint64_t *bo_size, void *metadata_buffer,
176 size_t buffer_size, uint32_t *metadata_size,
177 uint32_t *flags);
178uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
179uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
180uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
181uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
182uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
183uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
184uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
185
186/* Read user wptr from a specified user address space with page fault
187 * disabled. The memory must be pinned and mapped to the hardware when
188 * this is called in hqd_load functions, so it should never fault in
189 * the first place. This resolves a circular lock dependency involving
190 * four locks, including the DQM lock and mmap_lock.
191 */
192#define read_user_wptr(mmptr, wptr, dst) \
193 ({ \
194 bool valid = false; \
195 if ((mmptr) && (wptr)) { \
196 pagefault_disable(); \
197 if ((mmptr) == current->mm) { \
198 valid = !get_user((dst), (wptr)); \
199 } else if (current->flags & PF_KTHREAD) { \
200 kthread_use_mm(mmptr); \
201 valid = !get_user((dst), (wptr)); \
202 kthread_unuse_mm(mmptr); \
203 } \
204 pagefault_enable(); \
205 } \
206 valid; \
207 })
208
209/* GPUVM API */
210int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
211 void **vm, void **process_info,
212 struct dma_fence **ef);
213int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
214 struct file *filp, unsigned int pasid,
215 void **vm, void **process_info,
216 struct dma_fence **ef);
217void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
218 struct amdgpu_vm *vm);
219void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
220void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
221uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
222int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
223 struct kgd_dev *kgd, uint64_t va, uint64_t size,
224 void *vm, struct kgd_mem **mem,
225 uint64_t *offset, uint32_t flags);
226int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
227 struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size);
228int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
229 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
230int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
231 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
232int amdgpu_amdkfd_gpuvm_sync_memory(
233 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
234int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
235 struct kgd_mem *mem, void **kptr, uint64_t *size);
236int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
237 struct dma_fence **ef);
238
239int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
240 struct kfd_vm_fault_info *info);
241
242int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
243 struct dma_buf *dmabuf,
244 uint64_t va, void *vm,
245 struct kgd_mem **mem, uint64_t *size,
246 uint64_t *mmap_offset);
247
248void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
249void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
250
251int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
252 struct tile_config *config);
253
254/* KGD2KFD callbacks */
255int kgd2kfd_init(void);
256void kgd2kfd_exit(void);
257struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
258 unsigned int asic_type, bool vf);
259bool kgd2kfd_device_init(struct kfd_dev *kfd,
260 struct drm_device *ddev,
261 const struct kgd2kfd_shared_resources *gpu_resources);
262void kgd2kfd_device_exit(struct kfd_dev *kfd);
263void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
264int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
265int kgd2kfd_pre_reset(struct kfd_dev *kfd);
266int kgd2kfd_post_reset(struct kfd_dev *kfd);
267void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
268int kgd2kfd_quiesce_mm(struct mm_struct *mm);
269int kgd2kfd_resume_mm(struct mm_struct *mm);
270int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
271 struct dma_fence *fence);
272void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
273
274#endif /* AMDGPU_AMDKFD_H_INCLUDED */
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23/* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
24
25#ifndef AMDGPU_AMDKFD_H_INCLUDED
26#define AMDGPU_AMDKFD_H_INCLUDED
27
28#include <linux/types.h>
29#include <linux/mm.h>
30#include <linux/kthread.h>
31#include <linux/workqueue.h>
32#include <linux/mmu_notifier.h>
33#include <kgd_kfd_interface.h>
34#include <drm/ttm/ttm_execbuf_util.h>
35#include "amdgpu_sync.h"
36#include "amdgpu_vm.h"
37
38extern uint64_t amdgpu_amdkfd_total_mem_size;
39
40enum TLB_FLUSH_TYPE {
41 TLB_FLUSH_LEGACY = 0,
42 TLB_FLUSH_LIGHTWEIGHT,
43 TLB_FLUSH_HEAVYWEIGHT
44};
45
46struct amdgpu_device;
47
48enum kfd_mem_attachment_type {
49 KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */
50 KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */
51 KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */
52 KFD_MEM_ATT_SG /* Tag to DMA map SG BOs */
53};
54
55struct kfd_mem_attachment {
56 struct list_head list;
57 enum kfd_mem_attachment_type type;
58 bool is_mapped;
59 struct amdgpu_bo_va *bo_va;
60 struct amdgpu_device *adev;
61 uint64_t va;
62 uint64_t pte_flags;
63};
64
65struct kgd_mem {
66 struct mutex lock;
67 struct amdgpu_bo *bo;
68 struct dma_buf *dmabuf;
69 struct hmm_range *range;
70 struct list_head attachments;
71 /* protected by amdkfd_process_info.lock */
72 struct ttm_validate_buffer validate_list;
73 struct ttm_validate_buffer resv_list;
74 uint32_t domain;
75 unsigned int mapped_to_gpu_memory;
76 uint64_t va;
77
78 uint32_t alloc_flags;
79
80 uint32_t invalid;
81 struct amdkfd_process_info *process_info;
82
83 struct amdgpu_sync sync;
84
85 bool aql_queue;
86 bool is_imported;
87};
88
89/* KFD Memory Eviction */
90struct amdgpu_amdkfd_fence {
91 struct dma_fence base;
92 struct mm_struct *mm;
93 spinlock_t lock;
94 char timeline_name[TASK_COMM_LEN];
95 struct svm_range_bo *svm_bo;
96};
97
98struct amdgpu_kfd_dev {
99 struct kfd_dev *dev;
100 uint64_t vram_used;
101 uint64_t vram_used_aligned;
102 bool init_complete;
103 struct work_struct reset_work;
104};
105
106enum kgd_engine_type {
107 KGD_ENGINE_PFP = 1,
108 KGD_ENGINE_ME,
109 KGD_ENGINE_CE,
110 KGD_ENGINE_MEC1,
111 KGD_ENGINE_MEC2,
112 KGD_ENGINE_RLC,
113 KGD_ENGINE_SDMA1,
114 KGD_ENGINE_SDMA2,
115 KGD_ENGINE_MAX
116};
117
118
119struct amdkfd_process_info {
120 /* List head of all VMs that belong to a KFD process */
121 struct list_head vm_list_head;
122 /* List head for all KFD BOs that belong to a KFD process. */
123 struct list_head kfd_bo_list;
124 /* List of userptr BOs that are valid or invalid */
125 struct list_head userptr_valid_list;
126 struct list_head userptr_inval_list;
127 /* Lock to protect kfd_bo_list */
128 struct mutex lock;
129
130 /* Number of VMs */
131 unsigned int n_vms;
132 /* Eviction Fence */
133 struct amdgpu_amdkfd_fence *eviction_fence;
134
135 /* MMU-notifier related fields */
136 struct mutex notifier_lock;
137 uint32_t evicted_bos;
138 struct delayed_work restore_userptr_work;
139 struct pid *pid;
140 bool block_mmu_notifications;
141};
142
143int amdgpu_amdkfd_init(void);
144void amdgpu_amdkfd_fini(void);
145
146void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
147int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
148int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
149void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
150 const void *ih_ring_entry);
151void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
152void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
153void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev);
154int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
155 enum kgd_engine_type engine,
156 uint32_t vmid, uint64_t gpu_addr,
157 uint32_t *ib_cmd, uint32_t ib_len);
158void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle);
159bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev);
160int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
161 uint16_t vmid);
162int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
163 uint16_t pasid, enum TLB_FLUSH_TYPE flush_type);
164
165bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
166
167int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
168
169int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
170
171void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev);
172
173int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
174 int queue_bit);
175
176struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
177 struct mm_struct *mm,
178 struct svm_range_bo *svm_bo);
179#if defined(CONFIG_DEBUG_FS)
180int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
181#endif
182#if IS_ENABLED(CONFIG_HSA_AMD)
183bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
184struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
185int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
186int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
187 unsigned long cur_seq, struct kgd_mem *mem);
188#else
189static inline
190bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
191{
192 return false;
193}
194
195static inline
196struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
197{
198 return NULL;
199}
200
201static inline
202int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
203{
204 return 0;
205}
206
207static inline
208int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
209 unsigned long cur_seq, struct kgd_mem *mem)
210{
211 return 0;
212}
213#endif
214/* Shared API */
215int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
216 void **mem_obj, uint64_t *gpu_addr,
217 void **cpu_ptr, bool mqd_gfx9);
218void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj);
219int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
220 void **mem_obj);
221void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
222int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem);
223int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem);
224uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
225 enum kgd_engine_type type);
226void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
227 struct kfd_local_mem_info *mem_info);
228uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev);
229
230uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev);
231void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev,
232 struct kfd_cu_info *cu_info);
233int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
234 struct amdgpu_device **dmabuf_adev,
235 uint64_t *bo_size, void *metadata_buffer,
236 size_t buffer_size, uint32_t *metadata_size,
237 uint32_t *flags);
238uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
239 struct amdgpu_device *src);
240int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
241 struct amdgpu_device *src,
242 bool is_min);
243int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
244
245/* Read user wptr from a specified user address space with page fault
246 * disabled. The memory must be pinned and mapped to the hardware when
247 * this is called in hqd_load functions, so it should never fault in
248 * the first place. This resolves a circular lock dependency involving
249 * four locks, including the DQM lock and mmap_lock.
250 */
251#define read_user_wptr(mmptr, wptr, dst) \
252 ({ \
253 bool valid = false; \
254 if ((mmptr) && (wptr)) { \
255 pagefault_disable(); \
256 if ((mmptr) == current->mm) { \
257 valid = !get_user((dst), (wptr)); \
258 } else if (current->flags & PF_KTHREAD) { \
259 kthread_use_mm(mmptr); \
260 valid = !get_user((dst), (wptr)); \
261 kthread_unuse_mm(mmptr); \
262 } \
263 pagefault_enable(); \
264 } \
265 valid; \
266 })
267
268/* GPUVM API */
269#define drm_priv_to_vm(drm_priv) \
270 (&((struct amdgpu_fpriv *) \
271 ((struct drm_file *)(drm_priv))->driver_priv)->vm)
272
273int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
274 struct file *filp, u32 pasid);
275int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
276 struct file *filp,
277 void **process_info,
278 struct dma_fence **ef);
279void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
280 void *drm_priv);
281uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
282size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev);
283int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
284 struct amdgpu_device *adev, uint64_t va, uint64_t size,
285 void *drm_priv, struct kgd_mem **mem,
286 uint64_t *offset, uint32_t flags, bool criu_resume);
287int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
288 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
289 uint64_t *size);
290int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
291 struct kgd_mem *mem, void *drm_priv);
292int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
293 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
294int amdgpu_amdkfd_gpuvm_sync_memory(
295 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
296int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
297 void **kptr, uint64_t *size);
298void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
299
300int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo);
301
302int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
303 struct dma_fence **ef);
304int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
305 struct kfd_vm_fault_info *info);
306int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
307 struct dma_buf *dmabuf,
308 uint64_t va, void *drm_priv,
309 struct kgd_mem **mem, uint64_t *size,
310 uint64_t *mmap_offset);
311int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
312 struct tile_config *config);
313void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
314 bool reset);
315bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
316void amdgpu_amdkfd_block_mmu_notifications(void *p);
317int amdgpu_amdkfd_criu_resume(void *p);
318bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev);
319int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
320 uint64_t size, u32 alloc_flag);
321void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
322 uint64_t size, u32 alloc_flag);
323
324#if IS_ENABLED(CONFIG_HSA_AMD)
325void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
326void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
327 struct amdgpu_vm *vm);
328
329/**
330 * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released
331 *
332 * Allows KFD to release its resources associated with the GEM object.
333 */
334void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo);
335void amdgpu_amdkfd_reserve_system_mem(uint64_t size);
336#else
337static inline
338void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
339{
340}
341
342static inline
343void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
344 struct amdgpu_vm *vm)
345{
346}
347
348static inline
349void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
350{
351}
352#endif
353/* KGD2KFD callbacks */
354int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger);
355int kgd2kfd_resume_mm(struct mm_struct *mm);
356int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
357 struct dma_fence *fence);
358#if IS_ENABLED(CONFIG_HSA_AMD)
359int kgd2kfd_init(void);
360void kgd2kfd_exit(void);
361struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
362bool kgd2kfd_device_init(struct kfd_dev *kfd,
363 const struct kgd2kfd_shared_resources *gpu_resources);
364void kgd2kfd_device_exit(struct kfd_dev *kfd);
365void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
366int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
367int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
368int kgd2kfd_pre_reset(struct kfd_dev *kfd);
369int kgd2kfd_post_reset(struct kfd_dev *kfd);
370void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
371void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
372void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
373#else
374static inline int kgd2kfd_init(void)
375{
376 return -ENOENT;
377}
378
379static inline void kgd2kfd_exit(void)
380{
381}
382
383static inline
384struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
385{
386 return NULL;
387}
388
389static inline
390bool kgd2kfd_device_init(struct kfd_dev *kfd,
391 const struct kgd2kfd_shared_resources *gpu_resources)
392{
393 return false;
394}
395
396static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
397{
398}
399
400static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
401{
402}
403
404static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
405{
406 return 0;
407}
408
409static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
410{
411 return 0;
412}
413
414static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd)
415{
416 return 0;
417}
418
419static inline int kgd2kfd_post_reset(struct kfd_dev *kfd)
420{
421 return 0;
422}
423
424static inline
425void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
426{
427}
428
429static inline
430void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
431{
432}
433
434static inline
435void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
436{
437}
438#endif
439#endif /* AMDGPU_AMDKFD_H_INCLUDED */