Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/module.h>
24#include <linux/fdtable.h>
25#include <linux/uaccess.h>
26#include <linux/mmu_context.h>
27
28#include "amdgpu.h"
29#include "amdgpu_amdkfd.h"
30#include "gfx_v8_0.h"
31#include "gca/gfx_8_0_sh_mask.h"
32#include "gca/gfx_8_0_d.h"
33#include "gca/gfx_8_0_enum.h"
34#include "oss/oss_3_0_sh_mask.h"
35#include "oss/oss_3_0_d.h"
36#include "gmc/gmc_8_1_sh_mask.h"
37#include "gmc/gmc_8_1_d.h"
38#include "vi_structs.h"
39#include "vid.h"
40
41enum hqd_dequeue_request_type {
42 NO_ACTION = 0,
43 DRAIN_PIPE,
44 RESET_WAVES
45};
46
47/*
48 * Register access functions
49 */
50
51static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
52 uint32_t sh_mem_config,
53 uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
54 uint32_t sh_mem_bases);
55static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
56 unsigned int vmid);
57static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
58static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
59 uint32_t queue_id, uint32_t __user *wptr,
60 uint32_t wptr_shift, uint32_t wptr_mask,
61 struct mm_struct *mm);
62static int kgd_hqd_dump(struct kgd_dev *kgd,
63 uint32_t pipe_id, uint32_t queue_id,
64 uint32_t (**dump)[2], uint32_t *n_regs);
65static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
66 uint32_t __user *wptr, struct mm_struct *mm);
67static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
68 uint32_t engine_id, uint32_t queue_id,
69 uint32_t (**dump)[2], uint32_t *n_regs);
70static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
71 uint32_t pipe_id, uint32_t queue_id);
72static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
73static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
74 enum kfd_preempt_type reset_type,
75 unsigned int utimeout, uint32_t pipe_id,
76 uint32_t queue_id);
77static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
78 unsigned int utimeout);
79static int kgd_address_watch_disable(struct kgd_dev *kgd);
80static int kgd_address_watch_execute(struct kgd_dev *kgd,
81 unsigned int watch_point_id,
82 uint32_t cntl_val,
83 uint32_t addr_hi,
84 uint32_t addr_lo);
85static int kgd_wave_control_execute(struct kgd_dev *kgd,
86 uint32_t gfx_index_val,
87 uint32_t sq_cmd);
88static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
89 unsigned int watch_point_id,
90 unsigned int reg_offset);
91
92static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
93 uint8_t vmid);
94static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
95 uint8_t vmid);
96static void set_scratch_backing_va(struct kgd_dev *kgd,
97 uint64_t va, uint32_t vmid);
98static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
99 uint64_t page_table_base);
100static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
101static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
102
103/* Because of REG_GET_FIELD() being used, we put this function in the
104 * asic specific file.
105 */
106static int get_tile_config(struct kgd_dev *kgd,
107 struct tile_config *config)
108{
109 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
110
111 config->gb_addr_config = adev->gfx.config.gb_addr_config;
112 config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
113 MC_ARB_RAMCFG, NOOFBANK);
114 config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
115 MC_ARB_RAMCFG, NOOFRANKS);
116
117 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
118 config->num_tile_configs =
119 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
120 config->macro_tile_config_ptr =
121 adev->gfx.config.macrotile_mode_array;
122 config->num_macro_tile_configs =
123 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
124
125 return 0;
126}
127
128static const struct kfd2kgd_calls kfd2kgd = {
129 .program_sh_mem_settings = kgd_program_sh_mem_settings,
130 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
131 .init_interrupts = kgd_init_interrupts,
132 .hqd_load = kgd_hqd_load,
133 .hqd_sdma_load = kgd_hqd_sdma_load,
134 .hqd_dump = kgd_hqd_dump,
135 .hqd_sdma_dump = kgd_hqd_sdma_dump,
136 .hqd_is_occupied = kgd_hqd_is_occupied,
137 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
138 .hqd_destroy = kgd_hqd_destroy,
139 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
140 .address_watch_disable = kgd_address_watch_disable,
141 .address_watch_execute = kgd_address_watch_execute,
142 .wave_control_execute = kgd_wave_control_execute,
143 .address_watch_get_offset = kgd_address_watch_get_offset,
144 .get_atc_vmid_pasid_mapping_pasid =
145 get_atc_vmid_pasid_mapping_pasid,
146 .get_atc_vmid_pasid_mapping_valid =
147 get_atc_vmid_pasid_mapping_valid,
148 .set_scratch_backing_va = set_scratch_backing_va,
149 .get_tile_config = get_tile_config,
150 .set_vm_context_page_table_base = set_vm_context_page_table_base,
151 .invalidate_tlbs = invalidate_tlbs,
152 .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
153};
154
155struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
156{
157 return (struct kfd2kgd_calls *)&kfd2kgd;
158}
159
160static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
161{
162 return (struct amdgpu_device *)kgd;
163}
164
165static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
166 uint32_t queue, uint32_t vmid)
167{
168 struct amdgpu_device *adev = get_amdgpu_device(kgd);
169 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
170
171 mutex_lock(&adev->srbm_mutex);
172 WREG32(mmSRBM_GFX_CNTL, value);
173}
174
175static void unlock_srbm(struct kgd_dev *kgd)
176{
177 struct amdgpu_device *adev = get_amdgpu_device(kgd);
178
179 WREG32(mmSRBM_GFX_CNTL, 0);
180 mutex_unlock(&adev->srbm_mutex);
181}
182
183static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
184 uint32_t queue_id)
185{
186 struct amdgpu_device *adev = get_amdgpu_device(kgd);
187
188 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
189 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
190
191 lock_srbm(kgd, mec, pipe, queue_id, 0);
192}
193
194static void release_queue(struct kgd_dev *kgd)
195{
196 unlock_srbm(kgd);
197}
198
199static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
200 uint32_t sh_mem_config,
201 uint32_t sh_mem_ape1_base,
202 uint32_t sh_mem_ape1_limit,
203 uint32_t sh_mem_bases)
204{
205 struct amdgpu_device *adev = get_amdgpu_device(kgd);
206
207 lock_srbm(kgd, 0, 0, 0, vmid);
208
209 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
210 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
211 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
212 WREG32(mmSH_MEM_BASES, sh_mem_bases);
213
214 unlock_srbm(kgd);
215}
216
217static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
218 unsigned int vmid)
219{
220 struct amdgpu_device *adev = get_amdgpu_device(kgd);
221
222 /*
223 * We have to assume that there is no outstanding mapping.
224 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
225 * a mapping is in progress or because a mapping finished
226 * and the SW cleared it.
227 * So the protocol is to always wait & clear.
228 */
229 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
230 ATC_VMID0_PASID_MAPPING__VALID_MASK;
231
232 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
233
234 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
235 cpu_relax();
236 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
237
238 /* Mapping vmid to pasid also for IH block */
239 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
240
241 return 0;
242}
243
244static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
245{
246 struct amdgpu_device *adev = get_amdgpu_device(kgd);
247 uint32_t mec;
248 uint32_t pipe;
249
250 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
251 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
252
253 lock_srbm(kgd, mec, pipe, 0, 0);
254
255 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
256 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
257
258 unlock_srbm(kgd);
259
260 return 0;
261}
262
263static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
264{
265 uint32_t retval;
266
267 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
268 m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
269 pr_debug("sdma base address: 0x%x\n", retval);
270
271 return retval;
272}
273
274static inline struct vi_mqd *get_mqd(void *mqd)
275{
276 return (struct vi_mqd *)mqd;
277}
278
279static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
280{
281 return (struct vi_sdma_mqd *)mqd;
282}
283
284static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
285 uint32_t queue_id, uint32_t __user *wptr,
286 uint32_t wptr_shift, uint32_t wptr_mask,
287 struct mm_struct *mm)
288{
289 struct amdgpu_device *adev = get_amdgpu_device(kgd);
290 struct vi_mqd *m;
291 uint32_t *mqd_hqd;
292 uint32_t reg, wptr_val, data;
293 bool valid_wptr = false;
294
295 m = get_mqd(mqd);
296
297 acquire_queue(kgd, pipe_id, queue_id);
298
299 /* HIQ is set during driver init period with vmid set to 0*/
300 if (m->cp_hqd_vmid == 0) {
301 uint32_t value, mec, pipe;
302
303 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
304 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
305
306 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
307 mec, pipe, queue_id);
308 value = RREG32(mmRLC_CP_SCHEDULERS);
309 value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
310 ((mec << 5) | (pipe << 3) | queue_id | 0x80));
311 WREG32(mmRLC_CP_SCHEDULERS, value);
312 }
313
314 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
315 mqd_hqd = &m->cp_mqd_base_addr_lo;
316
317 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
318 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
319
320 /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
321 * This is safe since EOP RPTR==WPTR for any inactive HQD
322 * on ASICs that do not support context-save.
323 * EOP writes/reads can start anywhere in the ring.
324 */
325 if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
326 WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
327 WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
328 WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
329 }
330
331 for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
332 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
333
334 /* Copy userspace write pointer value to register.
335 * Activate doorbell logic to monitor subsequent changes.
336 */
337 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
338 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
339 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
340
341 /* read_user_ptr may take the mm->mmap_sem.
342 * release srbm_mutex to avoid circular dependency between
343 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
344 */
345 release_queue(kgd);
346 valid_wptr = read_user_wptr(mm, wptr, wptr_val);
347 acquire_queue(kgd, pipe_id, queue_id);
348 if (valid_wptr)
349 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
350
351 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
352 WREG32(mmCP_HQD_ACTIVE, data);
353
354 release_queue(kgd);
355
356 return 0;
357}
358
359static int kgd_hqd_dump(struct kgd_dev *kgd,
360 uint32_t pipe_id, uint32_t queue_id,
361 uint32_t (**dump)[2], uint32_t *n_regs)
362{
363 struct amdgpu_device *adev = get_amdgpu_device(kgd);
364 uint32_t i = 0, reg;
365#define HQD_N_REGS (54+4)
366#define DUMP_REG(addr) do { \
367 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
368 break; \
369 (*dump)[i][0] = (addr) << 2; \
370 (*dump)[i++][1] = RREG32(addr); \
371 } while (0)
372
373 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
374 if (*dump == NULL)
375 return -ENOMEM;
376
377 acquire_queue(kgd, pipe_id, queue_id);
378
379 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
380 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
381 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
382 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
383
384 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
385 DUMP_REG(reg);
386
387 release_queue(kgd);
388
389 WARN_ON_ONCE(i != HQD_N_REGS);
390 *n_regs = i;
391
392 return 0;
393}
394
395static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
396 uint32_t __user *wptr, struct mm_struct *mm)
397{
398 struct amdgpu_device *adev = get_amdgpu_device(kgd);
399 struct vi_sdma_mqd *m;
400 unsigned long end_jiffies;
401 uint32_t sdma_base_addr;
402 uint32_t data;
403
404 m = get_sdma_mqd(mqd);
405 sdma_base_addr = get_sdma_base_addr(m);
406 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
407 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
408
409 end_jiffies = msecs_to_jiffies(2000) + jiffies;
410 while (true) {
411 data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
412 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
413 break;
414 if (time_after(jiffies, end_jiffies))
415 return -ETIME;
416 usleep_range(500, 1000);
417 }
418 if (m->sdma_engine_id) {
419 data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
420 data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
421 RESUME_CTX, 0);
422 WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
423 } else {
424 data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
425 data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
426 RESUME_CTX, 0);
427 WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
428 }
429
430 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
431 ENABLE, 1);
432 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
433 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
434
435 if (read_user_wptr(mm, wptr, data))
436 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
437 else
438 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
439 m->sdmax_rlcx_rb_rptr);
440
441 WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
442 m->sdmax_rlcx_virtual_addr);
443 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
444 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
445 m->sdmax_rlcx_rb_base_hi);
446 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
447 m->sdmax_rlcx_rb_rptr_addr_lo);
448 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
449 m->sdmax_rlcx_rb_rptr_addr_hi);
450
451 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
452 RB_ENABLE, 1);
453 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
454
455 return 0;
456}
457
458static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
459 uint32_t engine_id, uint32_t queue_id,
460 uint32_t (**dump)[2], uint32_t *n_regs)
461{
462 struct amdgpu_device *adev = get_amdgpu_device(kgd);
463 uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
464 queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
465 uint32_t i = 0, reg;
466#undef HQD_N_REGS
467#define HQD_N_REGS (19+4+2+3+7)
468
469 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
470 if (*dump == NULL)
471 return -ENOMEM;
472
473 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
474 DUMP_REG(sdma_offset + reg);
475 for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
476 reg++)
477 DUMP_REG(sdma_offset + reg);
478 for (reg = mmSDMA0_RLC0_CSA_ADDR_LO; reg <= mmSDMA0_RLC0_CSA_ADDR_HI;
479 reg++)
480 DUMP_REG(sdma_offset + reg);
481 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_DUMMY_REG;
482 reg++)
483 DUMP_REG(sdma_offset + reg);
484 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL;
485 reg++)
486 DUMP_REG(sdma_offset + reg);
487
488 WARN_ON_ONCE(i != HQD_N_REGS);
489 *n_regs = i;
490
491 return 0;
492}
493
494static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
495 uint32_t pipe_id, uint32_t queue_id)
496{
497 struct amdgpu_device *adev = get_amdgpu_device(kgd);
498 uint32_t act;
499 bool retval = false;
500 uint32_t low, high;
501
502 acquire_queue(kgd, pipe_id, queue_id);
503 act = RREG32(mmCP_HQD_ACTIVE);
504 if (act) {
505 low = lower_32_bits(queue_address >> 8);
506 high = upper_32_bits(queue_address >> 8);
507
508 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
509 high == RREG32(mmCP_HQD_PQ_BASE_HI))
510 retval = true;
511 }
512 release_queue(kgd);
513 return retval;
514}
515
516static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
517{
518 struct amdgpu_device *adev = get_amdgpu_device(kgd);
519 struct vi_sdma_mqd *m;
520 uint32_t sdma_base_addr;
521 uint32_t sdma_rlc_rb_cntl;
522
523 m = get_sdma_mqd(mqd);
524 sdma_base_addr = get_sdma_base_addr(m);
525
526 sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
527
528 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
529 return true;
530
531 return false;
532}
533
534static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
535 enum kfd_preempt_type reset_type,
536 unsigned int utimeout, uint32_t pipe_id,
537 uint32_t queue_id)
538{
539 struct amdgpu_device *adev = get_amdgpu_device(kgd);
540 uint32_t temp;
541 enum hqd_dequeue_request_type type;
542 unsigned long flags, end_jiffies;
543 int retry;
544 struct vi_mqd *m = get_mqd(mqd);
545
546 if (adev->in_gpu_reset)
547 return -EIO;
548
549 acquire_queue(kgd, pipe_id, queue_id);
550
551 if (m->cp_hqd_vmid == 0)
552 WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
553
554 switch (reset_type) {
555 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
556 type = DRAIN_PIPE;
557 break;
558 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
559 type = RESET_WAVES;
560 break;
561 default:
562 type = DRAIN_PIPE;
563 break;
564 }
565
566 /* Workaround: If IQ timer is active and the wait time is close to or
567 * equal to 0, dequeueing is not safe. Wait until either the wait time
568 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
569 * cleared before continuing. Also, ensure wait times are set to at
570 * least 0x3.
571 */
572 local_irq_save(flags);
573 preempt_disable();
574 retry = 5000; /* wait for 500 usecs at maximum */
575 while (true) {
576 temp = RREG32(mmCP_HQD_IQ_TIMER);
577 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
578 pr_debug("HW is processing IQ\n");
579 goto loop;
580 }
581 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
582 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
583 == 3) /* SEM-rearm is safe */
584 break;
585 /* Wait time 3 is safe for CP, but our MMIO read/write
586 * time is close to 1 microsecond, so check for 10 to
587 * leave more buffer room
588 */
589 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
590 >= 10)
591 break;
592 pr_debug("IQ timer is active\n");
593 } else
594 break;
595loop:
596 if (!retry) {
597 pr_err("CP HQD IQ timer status time out\n");
598 break;
599 }
600 ndelay(100);
601 --retry;
602 }
603 retry = 1000;
604 while (true) {
605 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
606 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
607 break;
608 pr_debug("Dequeue request is pending\n");
609
610 if (!retry) {
611 pr_err("CP HQD dequeue request time out\n");
612 break;
613 }
614 ndelay(100);
615 --retry;
616 }
617 local_irq_restore(flags);
618 preempt_enable();
619
620 WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
621
622 end_jiffies = (utimeout * HZ / 1000) + jiffies;
623 while (true) {
624 temp = RREG32(mmCP_HQD_ACTIVE);
625 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
626 break;
627 if (time_after(jiffies, end_jiffies)) {
628 pr_err("cp queue preemption time out.\n");
629 release_queue(kgd);
630 return -ETIME;
631 }
632 usleep_range(500, 1000);
633 }
634
635 release_queue(kgd);
636 return 0;
637}
638
639static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
640 unsigned int utimeout)
641{
642 struct amdgpu_device *adev = get_amdgpu_device(kgd);
643 struct vi_sdma_mqd *m;
644 uint32_t sdma_base_addr;
645 uint32_t temp;
646 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
647
648 m = get_sdma_mqd(mqd);
649 sdma_base_addr = get_sdma_base_addr(m);
650
651 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
652 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
653 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
654
655 while (true) {
656 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
657 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
658 break;
659 if (time_after(jiffies, end_jiffies))
660 return -ETIME;
661 usleep_range(500, 1000);
662 }
663
664 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
665 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
666 RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
667 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
668
669 m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
670
671 return 0;
672}
673
674static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
675 uint8_t vmid)
676{
677 uint32_t reg;
678 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
679
680 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
681 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
682}
683
684static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
685 uint8_t vmid)
686{
687 uint32_t reg;
688 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
689
690 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
691 return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
692}
693
694static int kgd_address_watch_disable(struct kgd_dev *kgd)
695{
696 return 0;
697}
698
699static int kgd_address_watch_execute(struct kgd_dev *kgd,
700 unsigned int watch_point_id,
701 uint32_t cntl_val,
702 uint32_t addr_hi,
703 uint32_t addr_lo)
704{
705 return 0;
706}
707
708static int kgd_wave_control_execute(struct kgd_dev *kgd,
709 uint32_t gfx_index_val,
710 uint32_t sq_cmd)
711{
712 struct amdgpu_device *adev = get_amdgpu_device(kgd);
713 uint32_t data = 0;
714
715 mutex_lock(&adev->grbm_idx_mutex);
716
717 WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
718 WREG32(mmSQ_CMD, sq_cmd);
719
720 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
721 INSTANCE_BROADCAST_WRITES, 1);
722 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
723 SH_BROADCAST_WRITES, 1);
724 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
725 SE_BROADCAST_WRITES, 1);
726
727 WREG32(mmGRBM_GFX_INDEX, data);
728 mutex_unlock(&adev->grbm_idx_mutex);
729
730 return 0;
731}
732
733static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
734 unsigned int watch_point_id,
735 unsigned int reg_offset)
736{
737 return 0;
738}
739
740static void set_scratch_backing_va(struct kgd_dev *kgd,
741 uint64_t va, uint32_t vmid)
742{
743 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
744
745 lock_srbm(kgd, 0, 0, 0, vmid);
746 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
747 unlock_srbm(kgd);
748}
749
750static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
751 uint64_t page_table_base)
752{
753 struct amdgpu_device *adev = get_amdgpu_device(kgd);
754
755 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
756 pr_err("trying to set page table base for wrong VMID\n");
757 return;
758 }
759 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
760 lower_32_bits(page_table_base));
761}
762
763static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
764{
765 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
766 int vmid;
767 unsigned int tmp;
768
769 if (adev->in_gpu_reset)
770 return -EIO;
771
772 for (vmid = 0; vmid < 16; vmid++) {
773 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
774 continue;
775
776 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
777 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
778 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
779 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
780 RREG32(mmVM_INVALIDATE_RESPONSE);
781 break;
782 }
783 }
784
785 return 0;
786}
787
788static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
789{
790 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
791
792 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
793 pr_err("non kfd vmid %d\n", vmid);
794 return -EINVAL;
795 }
796
797 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
798 RREG32(mmVM_INVALIDATE_RESPONSE);
799 return 0;
800}
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "amdgpu.h"
24#include "amdgpu_amdkfd.h"
25#include "gfx_v8_0.h"
26#include "gca/gfx_8_0_sh_mask.h"
27#include "gca/gfx_8_0_d.h"
28#include "gca/gfx_8_0_enum.h"
29#include "oss/oss_3_0_sh_mask.h"
30#include "oss/oss_3_0_d.h"
31#include "gmc/gmc_8_1_sh_mask.h"
32#include "gmc/gmc_8_1_d.h"
33#include "vi_structs.h"
34#include "vid.h"
35
36enum hqd_dequeue_request_type {
37 NO_ACTION = 0,
38 DRAIN_PIPE,
39 RESET_WAVES
40};
41
42static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
43{
44 return (struct amdgpu_device *)kgd;
45}
46
47static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
48 uint32_t queue, uint32_t vmid)
49{
50 struct amdgpu_device *adev = get_amdgpu_device(kgd);
51 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
52
53 mutex_lock(&adev->srbm_mutex);
54 WREG32(mmSRBM_GFX_CNTL, value);
55}
56
57static void unlock_srbm(struct kgd_dev *kgd)
58{
59 struct amdgpu_device *adev = get_amdgpu_device(kgd);
60
61 WREG32(mmSRBM_GFX_CNTL, 0);
62 mutex_unlock(&adev->srbm_mutex);
63}
64
65static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
66 uint32_t queue_id)
67{
68 struct amdgpu_device *adev = get_amdgpu_device(kgd);
69
70 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
71 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
72
73 lock_srbm(kgd, mec, pipe, queue_id, 0);
74}
75
76static void release_queue(struct kgd_dev *kgd)
77{
78 unlock_srbm(kgd);
79}
80
81static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
82 uint32_t sh_mem_config,
83 uint32_t sh_mem_ape1_base,
84 uint32_t sh_mem_ape1_limit,
85 uint32_t sh_mem_bases)
86{
87 struct amdgpu_device *adev = get_amdgpu_device(kgd);
88
89 lock_srbm(kgd, 0, 0, 0, vmid);
90
91 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
92 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
93 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
94 WREG32(mmSH_MEM_BASES, sh_mem_bases);
95
96 unlock_srbm(kgd);
97}
98
99static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
100 unsigned int vmid)
101{
102 struct amdgpu_device *adev = get_amdgpu_device(kgd);
103
104 /*
105 * We have to assume that there is no outstanding mapping.
106 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
107 * a mapping is in progress or because a mapping finished
108 * and the SW cleared it.
109 * So the protocol is to always wait & clear.
110 */
111 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
112 ATC_VMID0_PASID_MAPPING__VALID_MASK;
113
114 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
115
116 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
117 cpu_relax();
118 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
119
120 /* Mapping vmid to pasid also for IH block */
121 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
122
123 return 0;
124}
125
126static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
127{
128 struct amdgpu_device *adev = get_amdgpu_device(kgd);
129 uint32_t mec;
130 uint32_t pipe;
131
132 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
133 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
134
135 lock_srbm(kgd, mec, pipe, 0, 0);
136
137 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
138 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
139
140 unlock_srbm(kgd);
141
142 return 0;
143}
144
145static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m)
146{
147 uint32_t retval;
148
149 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
150 m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
151
152 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
153 m->sdma_engine_id, m->sdma_queue_id, retval);
154
155 return retval;
156}
157
158static inline struct vi_mqd *get_mqd(void *mqd)
159{
160 return (struct vi_mqd *)mqd;
161}
162
163static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
164{
165 return (struct vi_sdma_mqd *)mqd;
166}
167
168static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
169 uint32_t queue_id, uint32_t __user *wptr,
170 uint32_t wptr_shift, uint32_t wptr_mask,
171 struct mm_struct *mm)
172{
173 struct amdgpu_device *adev = get_amdgpu_device(kgd);
174 struct vi_mqd *m;
175 uint32_t *mqd_hqd;
176 uint32_t reg, wptr_val, data;
177 bool valid_wptr = false;
178
179 m = get_mqd(mqd);
180
181 acquire_queue(kgd, pipe_id, queue_id);
182
183 /* HIQ is set during driver init period with vmid set to 0*/
184 if (m->cp_hqd_vmid == 0) {
185 uint32_t value, mec, pipe;
186
187 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
188 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
189
190 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
191 mec, pipe, queue_id);
192 value = RREG32(mmRLC_CP_SCHEDULERS);
193 value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
194 ((mec << 5) | (pipe << 3) | queue_id | 0x80));
195 WREG32(mmRLC_CP_SCHEDULERS, value);
196 }
197
198 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
199 mqd_hqd = &m->cp_mqd_base_addr_lo;
200
201 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
202 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
203
204 /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
205 * This is safe since EOP RPTR==WPTR for any inactive HQD
206 * on ASICs that do not support context-save.
207 * EOP writes/reads can start anywhere in the ring.
208 */
209 if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
210 WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
211 WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
212 WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
213 }
214
215 for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
216 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
217
218 /* Copy userspace write pointer value to register.
219 * Activate doorbell logic to monitor subsequent changes.
220 */
221 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
222 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
223 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
224
225 /* read_user_ptr may take the mm->mmap_lock.
226 * release srbm_mutex to avoid circular dependency between
227 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
228 */
229 release_queue(kgd);
230 valid_wptr = read_user_wptr(mm, wptr, wptr_val);
231 acquire_queue(kgd, pipe_id, queue_id);
232 if (valid_wptr)
233 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
234
235 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
236 WREG32(mmCP_HQD_ACTIVE, data);
237
238 release_queue(kgd);
239
240 return 0;
241}
242
243static int kgd_hqd_dump(struct kgd_dev *kgd,
244 uint32_t pipe_id, uint32_t queue_id,
245 uint32_t (**dump)[2], uint32_t *n_regs)
246{
247 struct amdgpu_device *adev = get_amdgpu_device(kgd);
248 uint32_t i = 0, reg;
249#define HQD_N_REGS (54+4)
250#define DUMP_REG(addr) do { \
251 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
252 break; \
253 (*dump)[i][0] = (addr) << 2; \
254 (*dump)[i++][1] = RREG32(addr); \
255 } while (0)
256
257 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
258 if (*dump == NULL)
259 return -ENOMEM;
260
261 acquire_queue(kgd, pipe_id, queue_id);
262
263 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
264 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
265 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
266 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
267
268 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
269 DUMP_REG(reg);
270
271 release_queue(kgd);
272
273 WARN_ON_ONCE(i != HQD_N_REGS);
274 *n_regs = i;
275
276 return 0;
277}
278
279static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
280 uint32_t __user *wptr, struct mm_struct *mm)
281{
282 struct amdgpu_device *adev = get_amdgpu_device(kgd);
283 struct vi_sdma_mqd *m;
284 unsigned long end_jiffies;
285 uint32_t sdma_rlc_reg_offset;
286 uint32_t data;
287
288 m = get_sdma_mqd(mqd);
289 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
290 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
291 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
292
293 end_jiffies = msecs_to_jiffies(2000) + jiffies;
294 while (true) {
295 data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
296 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
297 break;
298 if (time_after(jiffies, end_jiffies)) {
299 pr_err("SDMA RLC not idle in %s\n", __func__);
300 return -ETIME;
301 }
302 usleep_range(500, 1000);
303 }
304
305 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
306 ENABLE, 1);
307 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
308 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
309 m->sdmax_rlcx_rb_rptr);
310
311 if (read_user_wptr(mm, wptr, data))
312 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
313 else
314 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
315 m->sdmax_rlcx_rb_rptr);
316
317 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
318 m->sdmax_rlcx_virtual_addr);
319 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
320 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
321 m->sdmax_rlcx_rb_base_hi);
322 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
323 m->sdmax_rlcx_rb_rptr_addr_lo);
324 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
325 m->sdmax_rlcx_rb_rptr_addr_hi);
326
327 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
328 RB_ENABLE, 1);
329 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
330
331 return 0;
332}
333
334static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
335 uint32_t engine_id, uint32_t queue_id,
336 uint32_t (**dump)[2], uint32_t *n_regs)
337{
338 struct amdgpu_device *adev = get_amdgpu_device(kgd);
339 uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
340 queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
341 uint32_t i = 0, reg;
342#undef HQD_N_REGS
343#define HQD_N_REGS (19+4+2+3+7)
344
345 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
346 if (*dump == NULL)
347 return -ENOMEM;
348
349 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
350 DUMP_REG(sdma_offset + reg);
351 for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
352 reg++)
353 DUMP_REG(sdma_offset + reg);
354 for (reg = mmSDMA0_RLC0_CSA_ADDR_LO; reg <= mmSDMA0_RLC0_CSA_ADDR_HI;
355 reg++)
356 DUMP_REG(sdma_offset + reg);
357 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_DUMMY_REG;
358 reg++)
359 DUMP_REG(sdma_offset + reg);
360 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL;
361 reg++)
362 DUMP_REG(sdma_offset + reg);
363
364 WARN_ON_ONCE(i != HQD_N_REGS);
365 *n_regs = i;
366
367 return 0;
368}
369
370static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
371 uint32_t pipe_id, uint32_t queue_id)
372{
373 struct amdgpu_device *adev = get_amdgpu_device(kgd);
374 uint32_t act;
375 bool retval = false;
376 uint32_t low, high;
377
378 acquire_queue(kgd, pipe_id, queue_id);
379 act = RREG32(mmCP_HQD_ACTIVE);
380 if (act) {
381 low = lower_32_bits(queue_address >> 8);
382 high = upper_32_bits(queue_address >> 8);
383
384 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
385 high == RREG32(mmCP_HQD_PQ_BASE_HI))
386 retval = true;
387 }
388 release_queue(kgd);
389 return retval;
390}
391
392static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
393{
394 struct amdgpu_device *adev = get_amdgpu_device(kgd);
395 struct vi_sdma_mqd *m;
396 uint32_t sdma_rlc_reg_offset;
397 uint32_t sdma_rlc_rb_cntl;
398
399 m = get_sdma_mqd(mqd);
400 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
401
402 sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
403
404 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
405 return true;
406
407 return false;
408}
409
410static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
411 enum kfd_preempt_type reset_type,
412 unsigned int utimeout, uint32_t pipe_id,
413 uint32_t queue_id)
414{
415 struct amdgpu_device *adev = get_amdgpu_device(kgd);
416 uint32_t temp;
417 enum hqd_dequeue_request_type type;
418 unsigned long flags, end_jiffies;
419 int retry;
420 struct vi_mqd *m = get_mqd(mqd);
421
422 if (adev->in_gpu_reset)
423 return -EIO;
424
425 acquire_queue(kgd, pipe_id, queue_id);
426
427 if (m->cp_hqd_vmid == 0)
428 WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
429
430 switch (reset_type) {
431 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
432 type = DRAIN_PIPE;
433 break;
434 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
435 type = RESET_WAVES;
436 break;
437 default:
438 type = DRAIN_PIPE;
439 break;
440 }
441
442 /* Workaround: If IQ timer is active and the wait time is close to or
443 * equal to 0, dequeueing is not safe. Wait until either the wait time
444 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
445 * cleared before continuing. Also, ensure wait times are set to at
446 * least 0x3.
447 */
448 local_irq_save(flags);
449 preempt_disable();
450 retry = 5000; /* wait for 500 usecs at maximum */
451 while (true) {
452 temp = RREG32(mmCP_HQD_IQ_TIMER);
453 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
454 pr_debug("HW is processing IQ\n");
455 goto loop;
456 }
457 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
458 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
459 == 3) /* SEM-rearm is safe */
460 break;
461 /* Wait time 3 is safe for CP, but our MMIO read/write
462 * time is close to 1 microsecond, so check for 10 to
463 * leave more buffer room
464 */
465 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
466 >= 10)
467 break;
468 pr_debug("IQ timer is active\n");
469 } else
470 break;
471loop:
472 if (!retry) {
473 pr_err("CP HQD IQ timer status time out\n");
474 break;
475 }
476 ndelay(100);
477 --retry;
478 }
479 retry = 1000;
480 while (true) {
481 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
482 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
483 break;
484 pr_debug("Dequeue request is pending\n");
485
486 if (!retry) {
487 pr_err("CP HQD dequeue request time out\n");
488 break;
489 }
490 ndelay(100);
491 --retry;
492 }
493 local_irq_restore(flags);
494 preempt_enable();
495
496 WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
497
498 end_jiffies = (utimeout * HZ / 1000) + jiffies;
499 while (true) {
500 temp = RREG32(mmCP_HQD_ACTIVE);
501 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
502 break;
503 if (time_after(jiffies, end_jiffies)) {
504 pr_err("cp queue preemption time out.\n");
505 release_queue(kgd);
506 return -ETIME;
507 }
508 usleep_range(500, 1000);
509 }
510
511 release_queue(kgd);
512 return 0;
513}
514
515static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
516 unsigned int utimeout)
517{
518 struct amdgpu_device *adev = get_amdgpu_device(kgd);
519 struct vi_sdma_mqd *m;
520 uint32_t sdma_rlc_reg_offset;
521 uint32_t temp;
522 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
523
524 m = get_sdma_mqd(mqd);
525 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
526
527 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
528 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
529 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
530
531 while (true) {
532 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
533 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
534 break;
535 if (time_after(jiffies, end_jiffies)) {
536 pr_err("SDMA RLC not idle in %s\n", __func__);
537 return -ETIME;
538 }
539 usleep_range(500, 1000);
540 }
541
542 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
543 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
544 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
545 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
546
547 m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
548
549 return 0;
550}
551
552static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
553 uint8_t vmid, uint16_t *p_pasid)
554{
555 uint32_t value;
556 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
557
558 value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
559 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
560
561 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
562}
563
564static int kgd_address_watch_disable(struct kgd_dev *kgd)
565{
566 return 0;
567}
568
569static int kgd_address_watch_execute(struct kgd_dev *kgd,
570 unsigned int watch_point_id,
571 uint32_t cntl_val,
572 uint32_t addr_hi,
573 uint32_t addr_lo)
574{
575 return 0;
576}
577
578static int kgd_wave_control_execute(struct kgd_dev *kgd,
579 uint32_t gfx_index_val,
580 uint32_t sq_cmd)
581{
582 struct amdgpu_device *adev = get_amdgpu_device(kgd);
583 uint32_t data = 0;
584
585 mutex_lock(&adev->grbm_idx_mutex);
586
587 WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
588 WREG32(mmSQ_CMD, sq_cmd);
589
590 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
591 INSTANCE_BROADCAST_WRITES, 1);
592 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
593 SH_BROADCAST_WRITES, 1);
594 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
595 SE_BROADCAST_WRITES, 1);
596
597 WREG32(mmGRBM_GFX_INDEX, data);
598 mutex_unlock(&adev->grbm_idx_mutex);
599
600 return 0;
601}
602
603static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
604 unsigned int watch_point_id,
605 unsigned int reg_offset)
606{
607 return 0;
608}
609
610static void set_scratch_backing_va(struct kgd_dev *kgd,
611 uint64_t va, uint32_t vmid)
612{
613 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
614
615 lock_srbm(kgd, 0, 0, 0, vmid);
616 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
617 unlock_srbm(kgd);
618}
619
620static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
621 uint64_t page_table_base)
622{
623 struct amdgpu_device *adev = get_amdgpu_device(kgd);
624
625 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
626 pr_err("trying to set page table base for wrong VMID\n");
627 return;
628 }
629 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
630 lower_32_bits(page_table_base));
631}
632
633const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
634 .program_sh_mem_settings = kgd_program_sh_mem_settings,
635 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
636 .init_interrupts = kgd_init_interrupts,
637 .hqd_load = kgd_hqd_load,
638 .hqd_sdma_load = kgd_hqd_sdma_load,
639 .hqd_dump = kgd_hqd_dump,
640 .hqd_sdma_dump = kgd_hqd_sdma_dump,
641 .hqd_is_occupied = kgd_hqd_is_occupied,
642 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
643 .hqd_destroy = kgd_hqd_destroy,
644 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
645 .address_watch_disable = kgd_address_watch_disable,
646 .address_watch_execute = kgd_address_watch_execute,
647 .wave_control_execute = kgd_wave_control_execute,
648 .address_watch_get_offset = kgd_address_watch_get_offset,
649 .get_atc_vmid_pasid_mapping_info =
650 get_atc_vmid_pasid_mapping_info,
651 .set_scratch_backing_va = set_scratch_backing_va,
652 .set_vm_context_page_table_base = set_vm_context_page_table_base,
653};