Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23/*
24 * This file defines the private interface between the
25 * AMD kernel graphics drivers and the AMD KFD.
26 */
27
28#ifndef KGD_KFD_INTERFACE_H_INCLUDED
29#define KGD_KFD_INTERFACE_H_INCLUDED
30
31#include <linux/types.h>
32#include <linux/bitmap.h>
33#include <linux/dma-fence.h>
34#include "amdgpu_irq.h"
35#include "amdgpu_gfx.h"
36
37struct pci_dev;
38struct amdgpu_device;
39
40struct kfd_dev;
41struct kgd_mem;
42
43enum kfd_preempt_type {
44 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
45 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
46 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE
47};
48
49struct kfd_vm_fault_info {
50 uint64_t page_addr;
51 uint32_t vmid;
52 uint32_t mc_id;
53 uint32_t status;
54 bool prot_valid;
55 bool prot_read;
56 bool prot_write;
57 bool prot_exec;
58};
59
60/* For getting GPU local memory information from KGD */
61struct kfd_local_mem_info {
62 uint64_t local_mem_size_private;
63 uint64_t local_mem_size_public;
64 uint32_t vram_width;
65 uint32_t mem_clk_max;
66};
67
68enum kgd_memory_pool {
69 KGD_POOL_SYSTEM_CACHEABLE = 1,
70 KGD_POOL_SYSTEM_WRITECOMBINE = 2,
71 KGD_POOL_FRAMEBUFFER = 3,
72};
73
74/**
75 * enum kfd_sched_policy
76 *
77 * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
78 * scheduling. In this scheduling mode we're using the firmware code to
79 * schedule the user mode queues and kernel queues such as HIQ and DIQ.
80 * the HIQ queue is used as a special queue that dispatches the configuration
81 * to the cp and the user mode queues list that are currently running.
82 * the DIQ queue is a debugging queue that dispatches debugging commands to the
83 * firmware.
84 * in this scheduling mode user mode queues over subscription feature is
85 * enabled.
86 *
87 * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
88 * subscription feature disabled.
89 *
90 * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
91 * set the command processor registers and sets the queues "manually". This
92 * mode is used *ONLY* for debugging proposes.
93 *
94 */
95enum kfd_sched_policy {
96 KFD_SCHED_POLICY_HWS = 0,
97 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
98 KFD_SCHED_POLICY_NO_HWS
99};
100
101struct kgd2kfd_shared_resources {
102 /* Bit n == 1 means VMID n is available for KFD. */
103 unsigned int compute_vmid_bitmap;
104
105 /* number of pipes per mec */
106 uint32_t num_pipe_per_mec;
107
108 /* number of queues per pipe */
109 uint32_t num_queue_per_pipe;
110
111 /* Bit n == 1 means Queue n is available for KFD */
112 DECLARE_BITMAP(cp_queue_bitmap, AMDGPU_MAX_QUEUES);
113
114 /* SDMA doorbell assignments (SOC15 and later chips only). Only
115 * specific doorbells are routed to each SDMA engine. Others
116 * are routed to IH and VCN. They are not usable by the CP.
117 */
118 uint32_t *sdma_doorbell_idx;
119
120 /* From SOC15 onward, the doorbell index range not usable for CP
121 * queues.
122 */
123 uint32_t non_cp_doorbells_start;
124 uint32_t non_cp_doorbells_end;
125
126 /* Base address of doorbell aperture. */
127 phys_addr_t doorbell_physical_address;
128
129 /* Size in bytes of doorbell aperture. */
130 size_t doorbell_aperture_size;
131
132 /* Number of bytes at start of aperture reserved for KGD. */
133 size_t doorbell_start_offset;
134
135 /* GPUVM address space size in bytes */
136 uint64_t gpuvm_size;
137
138 /* Minor device number of the render node */
139 int drm_render_minor;
140
141 bool enable_mes;
142};
143
144struct tile_config {
145 uint32_t *tile_config_ptr;
146 uint32_t *macro_tile_config_ptr;
147 uint32_t num_tile_configs;
148 uint32_t num_macro_tile_configs;
149
150 uint32_t gb_addr_config;
151 uint32_t num_banks;
152 uint32_t num_ranks;
153};
154
155#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
156
157/**
158 * struct kfd2kgd_calls
159 *
160 * @program_sh_mem_settings: A function that should initiate the memory
161 * properties such as main aperture memory type (cache / non cached) and
162 * secondary aperture base address, size and memory type.
163 * This function is used only for no cp scheduling mode.
164 *
165 * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
166 * scheduling mode. Only used for no cp scheduling mode.
167 *
168 * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
169 * sceduling mode.
170 *
171 * @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot.
172 * used only for no HWS mode.
173 *
174 * @hqd_dump: Dumps CPC HQD registers to an array of address-value pairs.
175 * Array is allocated with kmalloc, needs to be freed with kfree by caller.
176 *
177 * @hqd_sdma_dump: Dumps SDMA HQD registers to an array of address-value pairs.
178 * Array is allocated with kmalloc, needs to be freed with kfree by caller.
179 *
180 * @hqd_is_occupies: Checks if a hqd slot is occupied.
181 *
182 * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
183 *
184 * @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied.
185 *
186 * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
187 * SDMA hqd slot.
188 *
189 * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
190 * Only used for no cp scheduling mode
191 *
192 * @set_vm_context_page_table_base: Program page table base for a VMID
193 *
194 * @invalidate_tlbs: Invalidate TLBs for a specific PASID
195 *
196 * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
197 *
198 * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
199 * IH ring entry. This function allows the KFD ISR to get the VMID
200 * from the fault status register as early as possible.
201 *
202 * @get_cu_occupancy: Function pointer that returns to caller the number
203 * of wave fronts that are in flight for all of the queues of a process
204 * as identified by its pasid. It is important to note that the value
205 * returned by this function is a snapshot of current moment and cannot
206 * guarantee any minimum for the number of waves in-flight. This function
207 * is defined for devices that belong to GFX9 and later GFX families. Care
208 * must be taken in calling this function as it is not defined for devices
209 * that belong to GFX8 and below GFX families.
210 *
211 * This structure contains function pointers to services that the kgd driver
212 * provides to amdkfd driver.
213 *
214 */
215struct kfd2kgd_calls {
216 /* Register access functions */
217 void (*program_sh_mem_settings)(struct amdgpu_device *adev, uint32_t vmid,
218 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
219 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases,
220 uint32_t inst);
221
222 int (*set_pasid_vmid_mapping)(struct amdgpu_device *adev, u32 pasid,
223 unsigned int vmid, uint32_t inst);
224
225 int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id,
226 uint32_t inst);
227
228 int (*hqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id,
229 uint32_t queue_id, uint32_t __user *wptr,
230 uint32_t wptr_shift, uint32_t wptr_mask,
231 struct mm_struct *mm, uint32_t inst);
232
233 int (*hiq_mqd_load)(struct amdgpu_device *adev, void *mqd,
234 uint32_t pipe_id, uint32_t queue_id,
235 uint32_t doorbell_off, uint32_t inst);
236
237 int (*hqd_sdma_load)(struct amdgpu_device *adev, void *mqd,
238 uint32_t __user *wptr, struct mm_struct *mm);
239
240 int (*hqd_dump)(struct amdgpu_device *adev,
241 uint32_t pipe_id, uint32_t queue_id,
242 uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst);
243
244 int (*hqd_sdma_dump)(struct amdgpu_device *adev,
245 uint32_t engine_id, uint32_t queue_id,
246 uint32_t (**dump)[2], uint32_t *n_regs);
247
248 bool (*hqd_is_occupied)(struct amdgpu_device *adev,
249 uint64_t queue_address, uint32_t pipe_id,
250 uint32_t queue_id, uint32_t inst);
251
252 int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd,
253 enum kfd_preempt_type reset_type,
254 unsigned int timeout, uint32_t pipe_id,
255 uint32_t queue_id, uint32_t inst);
256
257 bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd);
258
259 int (*hqd_sdma_destroy)(struct amdgpu_device *adev, void *mqd,
260 unsigned int timeout);
261
262 int (*wave_control_execute)(struct amdgpu_device *adev,
263 uint32_t gfx_index_val,
264 uint32_t sq_cmd, uint32_t inst);
265 bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev,
266 uint8_t vmid,
267 uint16_t *p_pasid);
268
269 /* No longer needed from GFXv9 onward. The scratch base address is
270 * passed to the shader by the CP. It's the user mode driver's
271 * responsibility.
272 */
273 void (*set_scratch_backing_va)(struct amdgpu_device *adev,
274 uint64_t va, uint32_t vmid);
275
276 void (*set_vm_context_page_table_base)(struct amdgpu_device *adev,
277 uint32_t vmid, uint64_t page_table_base);
278 uint32_t (*read_vmid_from_vmfault_reg)(struct amdgpu_device *adev);
279
280 uint32_t (*enable_debug_trap)(struct amdgpu_device *adev,
281 bool restore_dbg_registers,
282 uint32_t vmid);
283 uint32_t (*disable_debug_trap)(struct amdgpu_device *adev,
284 bool keep_trap_enabled,
285 uint32_t vmid);
286 int (*validate_trap_override_request)(struct amdgpu_device *adev,
287 uint32_t trap_override,
288 uint32_t *trap_mask_supported);
289 uint32_t (*set_wave_launch_trap_override)(struct amdgpu_device *adev,
290 uint32_t vmid,
291 uint32_t trap_override,
292 uint32_t trap_mask_bits,
293 uint32_t trap_mask_request,
294 uint32_t *trap_mask_prev,
295 uint32_t kfd_dbg_trap_cntl_prev);
296 uint32_t (*set_wave_launch_mode)(struct amdgpu_device *adev,
297 uint8_t wave_launch_mode,
298 uint32_t vmid);
299 uint32_t (*set_address_watch)(struct amdgpu_device *adev,
300 uint64_t watch_address,
301 uint32_t watch_address_mask,
302 uint32_t watch_id,
303 uint32_t watch_mode,
304 uint32_t debug_vmid,
305 uint32_t inst);
306 uint32_t (*clear_address_watch)(struct amdgpu_device *adev,
307 uint32_t watch_id);
308 void (*get_iq_wait_times)(struct amdgpu_device *adev,
309 uint32_t *wait_times,
310 uint32_t inst);
311 void (*build_grace_period_packet_info)(struct amdgpu_device *adev,
312 uint32_t wait_times,
313 uint32_t grace_period,
314 uint32_t *reg_offset,
315 uint32_t *reg_data);
316 void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid,
317 int *wave_cnt, int *max_waves_per_cu, uint32_t inst);
318 void (*program_trap_handler_settings)(struct amdgpu_device *adev,
319 uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
320 uint32_t inst);
321};
322
323#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23/*
24 * This file defines the private interface between the
25 * AMD kernel graphics drivers and the AMD KFD.
26 */
27
28#ifndef KGD_KFD_INTERFACE_H_INCLUDED
29#define KGD_KFD_INTERFACE_H_INCLUDED
30
31#include <linux/types.h>
32#include <linux/bitmap.h>
33#include <linux/dma-fence.h>
34
35struct pci_dev;
36
37#define KFD_INTERFACE_VERSION 2
38#define KGD_MAX_QUEUES 128
39
40struct kfd_dev;
41struct kgd_dev;
42
43struct kgd_mem;
44
45enum kfd_preempt_type {
46 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
47 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
48};
49
50struct kfd_cu_info {
51 uint32_t num_shader_engines;
52 uint32_t num_shader_arrays_per_engine;
53 uint32_t num_cu_per_sh;
54 uint32_t cu_active_number;
55 uint32_t cu_ao_mask;
56 uint32_t simd_per_cu;
57 uint32_t max_waves_per_simd;
58 uint32_t wave_front_size;
59 uint32_t max_scratch_slots_per_cu;
60 uint32_t lds_size;
61 uint32_t cu_bitmap[4][4];
62};
63
64/* For getting GPU local memory information from KGD */
65struct kfd_local_mem_info {
66 uint64_t local_mem_size_private;
67 uint64_t local_mem_size_public;
68 uint32_t vram_width;
69 uint32_t mem_clk_max;
70};
71
72enum kgd_memory_pool {
73 KGD_POOL_SYSTEM_CACHEABLE = 1,
74 KGD_POOL_SYSTEM_WRITECOMBINE = 2,
75 KGD_POOL_FRAMEBUFFER = 3,
76};
77
78enum kgd_engine_type {
79 KGD_ENGINE_PFP = 1,
80 KGD_ENGINE_ME,
81 KGD_ENGINE_CE,
82 KGD_ENGINE_MEC1,
83 KGD_ENGINE_MEC2,
84 KGD_ENGINE_RLC,
85 KGD_ENGINE_SDMA1,
86 KGD_ENGINE_SDMA2,
87 KGD_ENGINE_MAX
88};
89
90struct kgd2kfd_shared_resources {
91 /* Bit n == 1 means VMID n is available for KFD. */
92 unsigned int compute_vmid_bitmap;
93
94 /* number of pipes per mec */
95 uint32_t num_pipe_per_mec;
96
97 /* number of queues per pipe */
98 uint32_t num_queue_per_pipe;
99
100 /* Bit n == 1 means Queue n is available for KFD */
101 DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
102
103 /* Base address of doorbell aperture. */
104 phys_addr_t doorbell_physical_address;
105
106 /* Size in bytes of doorbell aperture. */
107 size_t doorbell_aperture_size;
108
109 /* Number of bytes at start of aperture reserved for KGD. */
110 size_t doorbell_start_offset;
111
112 /* GPUVM address space size in bytes */
113 uint64_t gpuvm_size;
114
115 /* Minor device number of the render node */
116 int drm_render_minor;
117};
118
119struct tile_config {
120 uint32_t *tile_config_ptr;
121 uint32_t *macro_tile_config_ptr;
122 uint32_t num_tile_configs;
123 uint32_t num_macro_tile_configs;
124
125 uint32_t gb_addr_config;
126 uint32_t num_banks;
127 uint32_t num_ranks;
128};
129
130
131/*
132 * Allocation flag domains
133 * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
134 */
135#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
136#define ALLOC_MEM_FLAGS_GTT (1 << 1)
137#define ALLOC_MEM_FLAGS_USERPTR (1 << 2) /* TODO */
138#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) /* TODO */
139
140/*
141 * Allocation flags attributes/access options.
142 * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
143 */
144#define ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
145#define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
146#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
147#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */
148#define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
149#define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */
150
151/**
152 * struct kfd2kgd_calls
153 *
154 * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
155 * The buffer can be used for mqds, hpds, kernel queue, fence and runlists
156 *
157 * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
158 *
159 * @get_local_mem_info: Retrieves information about GPU local memory
160 *
161 * @get_gpu_clock_counter: Retrieves GPU clock counter
162 *
163 * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
164 *
165 * @alloc_pasid: Allocate a PASID
166 * @free_pasid: Free a PASID
167 *
168 * @program_sh_mem_settings: A function that should initiate the memory
169 * properties such as main aperture memory type (cache / non cached) and
170 * secondary aperture base address, size and memory type.
171 * This function is used only for no cp scheduling mode.
172 *
173 * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
174 * scheduling mode. Only used for no cp scheduling mode.
175 *
176 * @init_pipeline: Initialized the compute pipelines.
177 *
178 * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
179 * sceduling mode.
180 *
181 * @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot.
182 * used only for no HWS mode.
183 *
184 * @hqd_dump: Dumps CPC HQD registers to an array of address-value pairs.
185 * Array is allocated with kmalloc, needs to be freed with kfree by caller.
186 *
187 * @hqd_sdma_dump: Dumps SDMA HQD registers to an array of address-value pairs.
188 * Array is allocated with kmalloc, needs to be freed with kfree by caller.
189 *
190 * @hqd_is_occupies: Checks if a hqd slot is occupied.
191 *
192 * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
193 *
194 * @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied.
195 *
196 * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
197 * SDMA hqd slot.
198 *
199 * @get_fw_version: Returns FW versions from the header
200 *
201 * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
202 * Only used for no cp scheduling mode
203 *
204 * @get_tile_config: Returns GPU-specific tiling mode information
205 *
206 * @get_cu_info: Retrieves activated cu info
207 *
208 * @get_vram_usage: Returns current VRAM usage
209 *
210 * @create_process_vm: Create a VM address space for a given process and GPU
211 *
212 * @destroy_process_vm: Destroy a VM
213 *
214 * @get_process_page_dir: Get physical address of a VM page directory
215 *
216 * @set_vm_context_page_table_base: Program page table base for a VMID
217 *
218 * @alloc_memory_of_gpu: Allocate GPUVM memory
219 *
220 * @free_memory_of_gpu: Free GPUVM memory
221 *
222 * @map_memory_to_gpu: Map GPUVM memory into a specific VM address
223 * space. Allocates and updates page tables and page directories as
224 * needed. This function may return before all page table updates have
225 * completed. This allows multiple map operations (on multiple GPUs)
226 * to happen concurrently. Use sync_memory to synchronize with all
227 * pending updates.
228 *
229 * @unmap_memor_to_gpu: Unmap GPUVM memory from a specific VM address space
230 *
231 * @sync_memory: Wait for pending page table updates to complete
232 *
233 * @map_gtt_bo_to_kernel: Map a GTT BO for kernel access
234 * Pins the BO, maps it to kernel address space. Such BOs are never evicted.
235 * The kernel virtual address remains valid until the BO is freed.
236 *
237 * @restore_process_bos: Restore all BOs that belong to the
238 * process. This is intended for restoring memory mappings after a TTM
239 * eviction.
240 *
241 * @invalidate_tlbs: Invalidate TLBs for a specific PASID
242 *
243 * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
244 *
245 * @submit_ib: Submits an IB to the engine specified by inserting the
246 * IB to the corresponding ring (ring type). The IB is executed with the
247 * specified VMID in a user mode context.
248 *
249 * This structure contains function pointers to services that the kgd driver
250 * provides to amdkfd driver.
251 *
252 */
253struct kfd2kgd_calls {
254 int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
255 void **mem_obj, uint64_t *gpu_addr,
256 void **cpu_ptr);
257
258 void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
259
260 void (*get_local_mem_info)(struct kgd_dev *kgd,
261 struct kfd_local_mem_info *mem_info);
262 uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
263
264 uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
265
266 int (*alloc_pasid)(unsigned int bits);
267 void (*free_pasid)(unsigned int pasid);
268
269 /* Register access functions */
270 void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
271 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
272 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
273
274 int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
275 unsigned int vmid);
276
277 int (*init_pipeline)(struct kgd_dev *kgd, uint32_t pipe_id,
278 uint32_t hpd_size, uint64_t hpd_gpu_addr);
279
280 int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
281
282 int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
283 uint32_t queue_id, uint32_t __user *wptr,
284 uint32_t wptr_shift, uint32_t wptr_mask,
285 struct mm_struct *mm);
286
287 int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
288 uint32_t __user *wptr, struct mm_struct *mm);
289
290 int (*hqd_dump)(struct kgd_dev *kgd,
291 uint32_t pipe_id, uint32_t queue_id,
292 uint32_t (**dump)[2], uint32_t *n_regs);
293
294 int (*hqd_sdma_dump)(struct kgd_dev *kgd,
295 uint32_t engine_id, uint32_t queue_id,
296 uint32_t (**dump)[2], uint32_t *n_regs);
297
298 bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
299 uint32_t pipe_id, uint32_t queue_id);
300
301 int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
302 unsigned int timeout, uint32_t pipe_id,
303 uint32_t queue_id);
304
305 bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd);
306
307 int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd,
308 unsigned int timeout);
309
310 int (*address_watch_disable)(struct kgd_dev *kgd);
311 int (*address_watch_execute)(struct kgd_dev *kgd,
312 unsigned int watch_point_id,
313 uint32_t cntl_val,
314 uint32_t addr_hi,
315 uint32_t addr_lo);
316 int (*wave_control_execute)(struct kgd_dev *kgd,
317 uint32_t gfx_index_val,
318 uint32_t sq_cmd);
319 uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd,
320 unsigned int watch_point_id,
321 unsigned int reg_offset);
322 bool (*get_atc_vmid_pasid_mapping_valid)(
323 struct kgd_dev *kgd,
324 uint8_t vmid);
325 uint16_t (*get_atc_vmid_pasid_mapping_pasid)(
326 struct kgd_dev *kgd,
327 uint8_t vmid);
328
329 uint16_t (*get_fw_version)(struct kgd_dev *kgd,
330 enum kgd_engine_type type);
331 void (*set_scratch_backing_va)(struct kgd_dev *kgd,
332 uint64_t va, uint32_t vmid);
333 int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
334
335 void (*get_cu_info)(struct kgd_dev *kgd,
336 struct kfd_cu_info *cu_info);
337 uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
338
339 int (*create_process_vm)(struct kgd_dev *kgd, void **vm,
340 void **process_info, struct dma_fence **ef);
341 int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp,
342 void **vm, void **process_info, struct dma_fence **ef);
343 void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
344 uint32_t (*get_process_page_dir)(void *vm);
345 void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
346 uint32_t vmid, uint32_t page_table_base);
347 int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
348 uint64_t size, void *vm,
349 struct kgd_mem **mem, uint64_t *offset,
350 uint32_t flags);
351 int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem);
352 int (*map_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
353 void *vm);
354 int (*unmap_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
355 void *vm);
356 int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
357 int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd, struct kgd_mem *mem,
358 void **kptr, uint64_t *size);
359 int (*restore_process_bos)(void *process_info, struct dma_fence **ef);
360
361 int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
362 int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
363
364 int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
365 uint32_t vmid, uint64_t gpu_addr,
366 uint32_t *ib_cmd, uint32_t ib_len);
367};
368
369/**
370 * struct kgd2kfd_calls
371 *
372 * @exit: Notifies amdkfd that kgd module is unloaded
373 *
374 * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
375 *
376 * @device_init: Initialize the newly probed device (if it is a device that
377 * amdkfd supports)
378 *
379 * @device_exit: Notifies amdkfd about a removal of a kgd device
380 *
381 * @suspend: Notifies amdkfd about a suspend action done to a kgd device
382 *
383 * @resume: Notifies amdkfd about a resume action done to a kgd device
384 *
385 * @schedule_evict_and_restore_process: Schedules work queue that will prepare
386 * for safe eviction of KFD BOs that belong to the specified process.
387 *
388 * This structure contains function callback pointers so the kgd driver
389 * will notify to the amdkfd about certain status changes.
390 *
391 */
392struct kgd2kfd_calls {
393 void (*exit)(void);
394 struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev,
395 const struct kfd2kgd_calls *f2g);
396 bool (*device_init)(struct kfd_dev *kfd,
397 const struct kgd2kfd_shared_resources *gpu_resources);
398 void (*device_exit)(struct kfd_dev *kfd);
399 void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
400 void (*suspend)(struct kfd_dev *kfd);
401 int (*resume)(struct kfd_dev *kfd);
402 int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
403 struct dma_fence *fence);
404};
405
406int kgd2kfd_init(unsigned interface_version,
407 const struct kgd2kfd_calls **g2f);
408
409#endif /* KGD_KFD_INTERFACE_H_INCLUDED */