Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "amdgpu.h"
23#include "amdgpu_amdkfd.h"
24#include "gc/gc_10_1_0_offset.h"
25#include "gc/gc_10_1_0_sh_mask.h"
26#include "athub/athub_2_0_0_offset.h"
27#include "athub/athub_2_0_0_sh_mask.h"
28#include "oss/osssys_5_0_0_offset.h"
29#include "oss/osssys_5_0_0_sh_mask.h"
30#include "soc15_common.h"
31#include "v10_structs.h"
32#include "nv.h"
33#include "nvd.h"
34
35enum hqd_dequeue_request_type {
36 NO_ACTION = 0,
37 DRAIN_PIPE,
38 RESET_WAVES,
39 SAVE_WAVES
40};
41
42static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
43 uint32_t queue, uint32_t vmid)
44{
45 mutex_lock(&adev->srbm_mutex);
46 nv_grbm_select(adev, mec, pipe, queue, vmid);
47}
48
49static void unlock_srbm(struct amdgpu_device *adev)
50{
51 nv_grbm_select(adev, 0, 0, 0, 0);
52 mutex_unlock(&adev->srbm_mutex);
53}
54
55static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
56 uint32_t queue_id)
57{
58 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
59 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
60
61 lock_srbm(adev, mec, pipe, queue_id, 0);
62}
63
64static uint64_t get_queue_mask(struct amdgpu_device *adev,
65 uint32_t pipe_id, uint32_t queue_id)
66{
67 unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
68 queue_id;
69
70 return 1ull << bit;
71}
72
73static void release_queue(struct amdgpu_device *adev)
74{
75 unlock_srbm(adev);
76}
77
78static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
79 uint32_t sh_mem_config,
80 uint32_t sh_mem_ape1_base,
81 uint32_t sh_mem_ape1_limit,
82 uint32_t sh_mem_bases)
83{
84 lock_srbm(adev, 0, 0, 0, vmid);
85
86 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
87 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
88 /* APE1 no longer exists on GFX9 */
89
90 unlock_srbm(adev);
91}
92
93static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
94 unsigned int vmid)
95{
96 /*
97 * We have to assume that there is no outstanding mapping.
98 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
99 * a mapping is in progress or because a mapping finished
100 * and the SW cleared it.
101 * So the protocol is to always wait & clear.
102 */
103 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
104 ATC_VMID0_PASID_MAPPING__VALID_MASK;
105
106 pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
107
108 pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
109 WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
110 pasid_mapping);
111
112#if 0
113 /* TODO: uncomment this code when the hardware support is ready. */
114 while (!(RREG32(SOC15_REG_OFFSET(
115 ATHUB, 0,
116 mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
117 (1U << vmid)))
118 cpu_relax();
119
120 pr_debug("ATHUB mapping update finished\n");
121 WREG32(SOC15_REG_OFFSET(ATHUB, 0,
122 mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
123 1U << vmid);
124#endif
125
126 /* Mapping vmid to pasid also for IH block */
127 pr_debug("update mapping for IH block and mmhub");
128 WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
129 pasid_mapping);
130
131 return 0;
132}
133
134/* TODO - RING0 form of field is obsolete, seems to date back to SI
135 * but still works
136 */
137
138static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
139{
140 uint32_t mec;
141 uint32_t pipe;
142
143 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
144 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
145
146 lock_srbm(adev, mec, pipe, 0, 0);
147
148 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
149 CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
150 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
151
152 unlock_srbm(adev);
153
154 return 0;
155}
156
157static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
158 unsigned int engine_id,
159 unsigned int queue_id)
160{
161 uint32_t sdma_engine_reg_base[2] = {
162 SOC15_REG_OFFSET(SDMA0, 0,
163 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
164 /* On gfx10, mmSDMA1_xxx registers are defined NOT based
165 * on SDMA1 base address (dw 0x1860) but based on SDMA0
166 * base address (dw 0x1260). Therefore use mmSDMA0_RLC0_RB_CNTL
167 * instead of mmSDMA1_RLC0_RB_CNTL for the base address calc
168 * below
169 */
170 SOC15_REG_OFFSET(SDMA1, 0,
171 mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
172 };
173
174 uint32_t retval = sdma_engine_reg_base[engine_id]
175 + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
176
177 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
178 queue_id, retval);
179
180 return retval;
181}
182
183#if 0
184static uint32_t get_watch_base_addr(struct amdgpu_device *adev)
185{
186 uint32_t retval = SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) -
187 mmTCP_WATCH0_ADDR_H;
188
189 pr_debug("kfd: reg watch base address: 0x%x\n", retval);
190
191 return retval;
192}
193#endif
194
195static inline struct v10_compute_mqd *get_mqd(void *mqd)
196{
197 return (struct v10_compute_mqd *)mqd;
198}
199
200static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
201{
202 return (struct v10_sdma_mqd *)mqd;
203}
204
205static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
206 uint32_t pipe_id, uint32_t queue_id,
207 uint32_t __user *wptr, uint32_t wptr_shift,
208 uint32_t wptr_mask, struct mm_struct *mm)
209{
210 struct v10_compute_mqd *m;
211 uint32_t *mqd_hqd;
212 uint32_t reg, hqd_base, data;
213
214 m = get_mqd(mqd);
215
216 pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
217 acquire_queue(adev, pipe_id, queue_id);
218
219 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
220 mqd_hqd = &m->cp_mqd_base_addr_lo;
221 hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
222
223 for (reg = hqd_base;
224 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
225 WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]);
226
227
228 /* Activate doorbell logic before triggering WPTR poll. */
229 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
230 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
231 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data);
232
233 if (wptr) {
234 /* Don't read wptr with get_user because the user
235 * context may not be accessible (if this function
236 * runs in a work queue). Instead trigger a one-shot
237 * polling read from memory in the CP. This assumes
238 * that wptr is GPU-accessible in the queue's VMID via
239 * ATC or SVM. WPTR==RPTR before starting the poll so
240 * the CP starts fetching new commands from the right
241 * place.
242 *
243 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
244 * tricky. Assume that the queue didn't overflow. The
245 * number of valid bits in the 32-bit RPTR depends on
246 * the queue size. The remaining bits are taken from
247 * the saved 64-bit WPTR. If the WPTR wrapped, add the
248 * queue size.
249 */
250 uint32_t queue_size =
251 2 << REG_GET_FIELD(m->cp_hqd_pq_control,
252 CP_HQD_PQ_CONTROL, QUEUE_SIZE);
253 uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
254
255 if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
256 guessed_wptr += queue_size;
257 guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
258 guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
259
260 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
261 lower_32_bits(guessed_wptr));
262 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
263 upper_32_bits(guessed_wptr));
264 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
265 lower_32_bits((uint64_t)wptr));
266 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
267 upper_32_bits((uint64_t)wptr));
268 pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
269 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
270 WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
271 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
272 }
273
274 /* Start the EOP fetcher */
275 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_RPTR,
276 REG_SET_FIELD(m->cp_hqd_eop_rptr,
277 CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
278
279 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
280 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
281
282 release_queue(adev);
283
284 return 0;
285}
286
287static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
288 uint32_t pipe_id, uint32_t queue_id,
289 uint32_t doorbell_off)
290{
291 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
292 struct v10_compute_mqd *m;
293 uint32_t mec, pipe;
294 int r;
295
296 m = get_mqd(mqd);
297
298 acquire_queue(adev, pipe_id, queue_id);
299
300 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
301 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
302
303 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
304 mec, pipe, queue_id);
305
306 spin_lock(&adev->gfx.kiq.ring_lock);
307 r = amdgpu_ring_alloc(kiq_ring, 7);
308 if (r) {
309 pr_err("Failed to alloc KIQ (%d).\n", r);
310 goto out_unlock;
311 }
312
313 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
314 amdgpu_ring_write(kiq_ring,
315 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
316 PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
317 PACKET3_MAP_QUEUES_QUEUE(queue_id) |
318 PACKET3_MAP_QUEUES_PIPE(pipe) |
319 PACKET3_MAP_QUEUES_ME((mec - 1)) |
320 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
321 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
322 PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
323 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
324 amdgpu_ring_write(kiq_ring,
325 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
326 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
327 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
328 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
329 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
330 amdgpu_ring_commit(kiq_ring);
331
332out_unlock:
333 spin_unlock(&adev->gfx.kiq.ring_lock);
334 release_queue(adev);
335
336 return r;
337}
338
339static int kgd_hqd_dump(struct amdgpu_device *adev,
340 uint32_t pipe_id, uint32_t queue_id,
341 uint32_t (**dump)[2], uint32_t *n_regs)
342{
343 uint32_t i = 0, reg;
344#define HQD_N_REGS 56
345#define DUMP_REG(addr) do { \
346 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
347 break; \
348 (*dump)[i][0] = (addr) << 2; \
349 (*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
350 } while (0)
351
352 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
353 if (*dump == NULL)
354 return -ENOMEM;
355
356 acquire_queue(adev, pipe_id, queue_id);
357
358 for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
359 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
360 DUMP_REG(reg);
361
362 release_queue(adev);
363
364 WARN_ON_ONCE(i != HQD_N_REGS);
365 *n_regs = i;
366
367 return 0;
368}
369
370static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
371 uint32_t __user *wptr, struct mm_struct *mm)
372{
373 struct v10_sdma_mqd *m;
374 uint32_t sdma_rlc_reg_offset;
375 unsigned long end_jiffies;
376 uint32_t data;
377 uint64_t data64;
378 uint64_t __user *wptr64 = (uint64_t __user *)wptr;
379
380 m = get_sdma_mqd(mqd);
381 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
382 m->sdma_queue_id);
383
384 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
385 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
386
387 end_jiffies = msecs_to_jiffies(2000) + jiffies;
388 while (true) {
389 data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
390 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
391 break;
392 if (time_after(jiffies, end_jiffies)) {
393 pr_err("SDMA RLC not idle in %s\n", __func__);
394 return -ETIME;
395 }
396 usleep_range(500, 1000);
397 }
398
399 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
400 m->sdmax_rlcx_doorbell_offset);
401
402 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
403 ENABLE, 1);
404 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
405 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
406 m->sdmax_rlcx_rb_rptr);
407 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
408 m->sdmax_rlcx_rb_rptr_hi);
409
410 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
411 if (read_user_wptr(mm, wptr64, data64)) {
412 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
413 lower_32_bits(data64));
414 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
415 upper_32_bits(data64));
416 } else {
417 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
418 m->sdmax_rlcx_rb_rptr);
419 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
420 m->sdmax_rlcx_rb_rptr_hi);
421 }
422 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
423
424 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
425 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
426 m->sdmax_rlcx_rb_base_hi);
427 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
428 m->sdmax_rlcx_rb_rptr_addr_lo);
429 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
430 m->sdmax_rlcx_rb_rptr_addr_hi);
431
432 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
433 RB_ENABLE, 1);
434 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
435
436 return 0;
437}
438
439static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
440 uint32_t engine_id, uint32_t queue_id,
441 uint32_t (**dump)[2], uint32_t *n_regs)
442{
443 uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
444 engine_id, queue_id);
445 uint32_t i = 0, reg;
446#undef HQD_N_REGS
447#define HQD_N_REGS (19+6+7+10)
448
449 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
450 if (*dump == NULL)
451 return -ENOMEM;
452
453 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
454 DUMP_REG(sdma_rlc_reg_offset + reg);
455 for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
456 DUMP_REG(sdma_rlc_reg_offset + reg);
457 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
458 reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
459 DUMP_REG(sdma_rlc_reg_offset + reg);
460 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
461 reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
462 DUMP_REG(sdma_rlc_reg_offset + reg);
463
464 WARN_ON_ONCE(i != HQD_N_REGS);
465 *n_regs = i;
466
467 return 0;
468}
469
470static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
471 uint64_t queue_address, uint32_t pipe_id,
472 uint32_t queue_id)
473{
474 uint32_t act;
475 bool retval = false;
476 uint32_t low, high;
477
478 acquire_queue(adev, pipe_id, queue_id);
479 act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
480 if (act) {
481 low = lower_32_bits(queue_address >> 8);
482 high = upper_32_bits(queue_address >> 8);
483
484 if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
485 high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
486 retval = true;
487 }
488 release_queue(adev);
489 return retval;
490}
491
492static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
493{
494 struct v10_sdma_mqd *m;
495 uint32_t sdma_rlc_reg_offset;
496 uint32_t sdma_rlc_rb_cntl;
497
498 m = get_sdma_mqd(mqd);
499 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
500 m->sdma_queue_id);
501
502 sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
503
504 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
505 return true;
506
507 return false;
508}
509
510static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
511 enum kfd_preempt_type reset_type,
512 unsigned int utimeout, uint32_t pipe_id,
513 uint32_t queue_id)
514{
515 enum hqd_dequeue_request_type type;
516 unsigned long end_jiffies;
517 uint32_t temp;
518 struct v10_compute_mqd *m = get_mqd(mqd);
519
520 if (amdgpu_in_reset(adev))
521 return -EIO;
522
523#if 0
524 unsigned long flags;
525 int retry;
526#endif
527
528 acquire_queue(adev, pipe_id, queue_id);
529
530 if (m->cp_hqd_vmid == 0)
531 WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
532
533 switch (reset_type) {
534 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
535 type = DRAIN_PIPE;
536 break;
537 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
538 type = RESET_WAVES;
539 break;
540 case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
541 type = SAVE_WAVES;
542 break;
543 default:
544 type = DRAIN_PIPE;
545 break;
546 }
547
548#if 0 /* Is this still needed? */
549 /* Workaround: If IQ timer is active and the wait time is close to or
550 * equal to 0, dequeueing is not safe. Wait until either the wait time
551 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
552 * cleared before continuing. Also, ensure wait times are set to at
553 * least 0x3.
554 */
555 local_irq_save(flags);
556 preempt_disable();
557 retry = 5000; /* wait for 500 usecs at maximum */
558 while (true) {
559 temp = RREG32(mmCP_HQD_IQ_TIMER);
560 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
561 pr_debug("HW is processing IQ\n");
562 goto loop;
563 }
564 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
565 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
566 == 3) /* SEM-rearm is safe */
567 break;
568 /* Wait time 3 is safe for CP, but our MMIO read/write
569 * time is close to 1 microsecond, so check for 10 to
570 * leave more buffer room
571 */
572 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
573 >= 10)
574 break;
575 pr_debug("IQ timer is active\n");
576 } else
577 break;
578loop:
579 if (!retry) {
580 pr_err("CP HQD IQ timer status time out\n");
581 break;
582 }
583 ndelay(100);
584 --retry;
585 }
586 retry = 1000;
587 while (true) {
588 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
589 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
590 break;
591 pr_debug("Dequeue request is pending\n");
592
593 if (!retry) {
594 pr_err("CP HQD dequeue request time out\n");
595 break;
596 }
597 ndelay(100);
598 --retry;
599 }
600 local_irq_restore(flags);
601 preempt_enable();
602#endif
603
604 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type);
605
606 end_jiffies = (utimeout * HZ / 1000) + jiffies;
607 while (true) {
608 temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
609 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
610 break;
611 if (time_after(jiffies, end_jiffies)) {
612 pr_err("cp queue preemption time out.\n");
613 release_queue(adev);
614 return -ETIME;
615 }
616 usleep_range(500, 1000);
617 }
618
619 release_queue(adev);
620 return 0;
621}
622
623static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
624 unsigned int utimeout)
625{
626 struct v10_sdma_mqd *m;
627 uint32_t sdma_rlc_reg_offset;
628 uint32_t temp;
629 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
630
631 m = get_sdma_mqd(mqd);
632 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
633 m->sdma_queue_id);
634
635 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
636 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
637 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
638
639 while (true) {
640 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
641 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
642 break;
643 if (time_after(jiffies, end_jiffies)) {
644 pr_err("SDMA RLC not idle in %s\n", __func__);
645 return -ETIME;
646 }
647 usleep_range(500, 1000);
648 }
649
650 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
651 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
652 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
653 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
654
655 m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
656 m->sdmax_rlcx_rb_rptr_hi =
657 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
658
659 return 0;
660}
661
662static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
663 uint8_t vmid, uint16_t *p_pasid)
664{
665 uint32_t value;
666
667 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
668 + vmid);
669 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
670
671 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
672}
673
674static int kgd_wave_control_execute(struct amdgpu_device *adev,
675 uint32_t gfx_index_val,
676 uint32_t sq_cmd)
677{
678 uint32_t data = 0;
679
680 mutex_lock(&adev->grbm_idx_mutex);
681
682 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
683 WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd);
684
685 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
686 INSTANCE_BROADCAST_WRITES, 1);
687 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
688 SA_BROADCAST_WRITES, 1);
689 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
690 SE_BROADCAST_WRITES, 1);
691
692 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
693 mutex_unlock(&adev->grbm_idx_mutex);
694
695 return 0;
696}
697
698static void set_vm_context_page_table_base(struct amdgpu_device *adev,
699 uint32_t vmid, uint64_t page_table_base)
700{
701 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
702 pr_err("trying to set page table base for wrong VMID %u\n",
703 vmid);
704 return;
705 }
706
707 /* SDMA is on gfxhub as well for Navi1* series */
708 adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
709}
710
711static void program_trap_handler_settings(struct amdgpu_device *adev,
712 uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
713{
714 lock_srbm(adev, 0, 0, 0, vmid);
715
716 /*
717 * Program TBA registers
718 */
719 WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
720 lower_32_bits(tba_addr >> 8));
721 WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
722 upper_32_bits(tba_addr >> 8) |
723 (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
724
725 /*
726 * Program TMA registers
727 */
728 WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
729 lower_32_bits(tma_addr >> 8));
730 WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
731 upper_32_bits(tma_addr >> 8));
732
733 unlock_srbm(adev);
734}
735
736const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
737 .program_sh_mem_settings = kgd_program_sh_mem_settings,
738 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
739 .init_interrupts = kgd_init_interrupts,
740 .hqd_load = kgd_hqd_load,
741 .hiq_mqd_load = kgd_hiq_mqd_load,
742 .hqd_sdma_load = kgd_hqd_sdma_load,
743 .hqd_dump = kgd_hqd_dump,
744 .hqd_sdma_dump = kgd_hqd_sdma_dump,
745 .hqd_is_occupied = kgd_hqd_is_occupied,
746 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
747 .hqd_destroy = kgd_hqd_destroy,
748 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
749 .wave_control_execute = kgd_wave_control_execute,
750 .get_atc_vmid_pasid_mapping_info =
751 get_atc_vmid_pasid_mapping_info,
752 .set_vm_context_page_table_base = set_vm_context_page_table_base,
753 .program_trap_handler_settings = program_trap_handler_settings,
754};
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "amdgpu.h"
23#include "amdgpu_amdkfd.h"
24#include "gc/gc_10_1_0_offset.h"
25#include "gc/gc_10_1_0_sh_mask.h"
26#include "athub/athub_2_0_0_offset.h"
27#include "athub/athub_2_0_0_sh_mask.h"
28#include "oss/osssys_5_0_0_offset.h"
29#include "oss/osssys_5_0_0_sh_mask.h"
30#include "soc15_common.h"
31#include "v10_structs.h"
32#include "nv.h"
33#include "nvd.h"
34
35enum hqd_dequeue_request_type {
36 NO_ACTION = 0,
37 DRAIN_PIPE,
38 RESET_WAVES,
39 SAVE_WAVES
40};
41
42static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
43{
44 return (struct amdgpu_device *)kgd;
45}
46
47static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
48 uint32_t queue, uint32_t vmid)
49{
50 struct amdgpu_device *adev = get_amdgpu_device(kgd);
51
52 mutex_lock(&adev->srbm_mutex);
53 nv_grbm_select(adev, mec, pipe, queue, vmid);
54}
55
56static void unlock_srbm(struct kgd_dev *kgd)
57{
58 struct amdgpu_device *adev = get_amdgpu_device(kgd);
59
60 nv_grbm_select(adev, 0, 0, 0, 0);
61 mutex_unlock(&adev->srbm_mutex);
62}
63
64static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
65 uint32_t queue_id)
66{
67 struct amdgpu_device *adev = get_amdgpu_device(kgd);
68
69 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
70 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
71
72 lock_srbm(kgd, mec, pipe, queue_id, 0);
73}
74
75static uint64_t get_queue_mask(struct amdgpu_device *adev,
76 uint32_t pipe_id, uint32_t queue_id)
77{
78 unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
79 queue_id;
80
81 return 1ull << bit;
82}
83
84static void release_queue(struct kgd_dev *kgd)
85{
86 unlock_srbm(kgd);
87}
88
89static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
90 uint32_t sh_mem_config,
91 uint32_t sh_mem_ape1_base,
92 uint32_t sh_mem_ape1_limit,
93 uint32_t sh_mem_bases)
94{
95 struct amdgpu_device *adev = get_amdgpu_device(kgd);
96
97 lock_srbm(kgd, 0, 0, 0, vmid);
98
99 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
100 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
101 /* APE1 no longer exists on GFX9 */
102
103 unlock_srbm(kgd);
104}
105
106static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
107 unsigned int vmid)
108{
109 struct amdgpu_device *adev = get_amdgpu_device(kgd);
110
111 /*
112 * We have to assume that there is no outstanding mapping.
113 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
114 * a mapping is in progress or because a mapping finished
115 * and the SW cleared it.
116 * So the protocol is to always wait & clear.
117 */
118 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
119 ATC_VMID0_PASID_MAPPING__VALID_MASK;
120
121 pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
122
123 pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
124 WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
125 pasid_mapping);
126
127#if 0
128 /* TODO: uncomment this code when the hardware support is ready. */
129 while (!(RREG32(SOC15_REG_OFFSET(
130 ATHUB, 0,
131 mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
132 (1U << vmid)))
133 cpu_relax();
134
135 pr_debug("ATHUB mapping update finished\n");
136 WREG32(SOC15_REG_OFFSET(ATHUB, 0,
137 mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
138 1U << vmid);
139#endif
140
141 /* Mapping vmid to pasid also for IH block */
142 pr_debug("update mapping for IH block and mmhub");
143 WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
144 pasid_mapping);
145
146 return 0;
147}
148
149/* TODO - RING0 form of field is obsolete, seems to date back to SI
150 * but still works
151 */
152
153static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
154{
155 struct amdgpu_device *adev = get_amdgpu_device(kgd);
156 uint32_t mec;
157 uint32_t pipe;
158
159 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
160 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
161
162 lock_srbm(kgd, mec, pipe, 0, 0);
163
164 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
165 CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
166 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
167
168 unlock_srbm(kgd);
169
170 return 0;
171}
172
173static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
174 unsigned int engine_id,
175 unsigned int queue_id)
176{
177 uint32_t sdma_engine_reg_base[2] = {
178 SOC15_REG_OFFSET(SDMA0, 0,
179 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
180 /* On gfx10, mmSDMA1_xxx registers are defined NOT based
181 * on SDMA1 base address (dw 0x1860) but based on SDMA0
182 * base address (dw 0x1260). Therefore use mmSDMA0_RLC0_RB_CNTL
183 * instead of mmSDMA1_RLC0_RB_CNTL for the base address calc
184 * below
185 */
186 SOC15_REG_OFFSET(SDMA1, 0,
187 mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
188 };
189
190 uint32_t retval = sdma_engine_reg_base[engine_id]
191 + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
192
193 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
194 queue_id, retval);
195
196 return retval;
197}
198
199#if 0
200static uint32_t get_watch_base_addr(struct amdgpu_device *adev)
201{
202 uint32_t retval = SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) -
203 mmTCP_WATCH0_ADDR_H;
204
205 pr_debug("kfd: reg watch base address: 0x%x\n", retval);
206
207 return retval;
208}
209#endif
210
211static inline struct v10_compute_mqd *get_mqd(void *mqd)
212{
213 return (struct v10_compute_mqd *)mqd;
214}
215
216static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
217{
218 return (struct v10_sdma_mqd *)mqd;
219}
220
221static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
222 uint32_t queue_id, uint32_t __user *wptr,
223 uint32_t wptr_shift, uint32_t wptr_mask,
224 struct mm_struct *mm)
225{
226 struct amdgpu_device *adev = get_amdgpu_device(kgd);
227 struct v10_compute_mqd *m;
228 uint32_t *mqd_hqd;
229 uint32_t reg, hqd_base, data;
230
231 m = get_mqd(mqd);
232
233 pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
234 acquire_queue(kgd, pipe_id, queue_id);
235
236 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
237 mqd_hqd = &m->cp_mqd_base_addr_lo;
238 hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
239
240 for (reg = hqd_base;
241 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
242 WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]);
243
244
245 /* Activate doorbell logic before triggering WPTR poll. */
246 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
247 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
248 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data);
249
250 if (wptr) {
251 /* Don't read wptr with get_user because the user
252 * context may not be accessible (if this function
253 * runs in a work queue). Instead trigger a one-shot
254 * polling read from memory in the CP. This assumes
255 * that wptr is GPU-accessible in the queue's VMID via
256 * ATC or SVM. WPTR==RPTR before starting the poll so
257 * the CP starts fetching new commands from the right
258 * place.
259 *
260 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
261 * tricky. Assume that the queue didn't overflow. The
262 * number of valid bits in the 32-bit RPTR depends on
263 * the queue size. The remaining bits are taken from
264 * the saved 64-bit WPTR. If the WPTR wrapped, add the
265 * queue size.
266 */
267 uint32_t queue_size =
268 2 << REG_GET_FIELD(m->cp_hqd_pq_control,
269 CP_HQD_PQ_CONTROL, QUEUE_SIZE);
270 uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
271
272 if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
273 guessed_wptr += queue_size;
274 guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
275 guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
276
277 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
278 lower_32_bits(guessed_wptr));
279 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
280 upper_32_bits(guessed_wptr));
281 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
282 lower_32_bits((uint64_t)wptr));
283 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
284 upper_32_bits((uint64_t)wptr));
285 pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
286 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
287 WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
288 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
289 }
290
291 /* Start the EOP fetcher */
292 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_RPTR,
293 REG_SET_FIELD(m->cp_hqd_eop_rptr,
294 CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
295
296 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
297 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
298
299 release_queue(kgd);
300
301 return 0;
302}
303
304static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
305 uint32_t pipe_id, uint32_t queue_id,
306 uint32_t doorbell_off)
307{
308 struct amdgpu_device *adev = get_amdgpu_device(kgd);
309 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
310 struct v10_compute_mqd *m;
311 uint32_t mec, pipe;
312 int r;
313
314 m = get_mqd(mqd);
315
316 acquire_queue(kgd, pipe_id, queue_id);
317
318 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
319 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
320
321 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
322 mec, pipe, queue_id);
323
324 spin_lock(&adev->gfx.kiq.ring_lock);
325 r = amdgpu_ring_alloc(kiq_ring, 7);
326 if (r) {
327 pr_err("Failed to alloc KIQ (%d).\n", r);
328 goto out_unlock;
329 }
330
331 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
332 amdgpu_ring_write(kiq_ring,
333 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
334 PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
335 PACKET3_MAP_QUEUES_QUEUE(queue_id) |
336 PACKET3_MAP_QUEUES_PIPE(pipe) |
337 PACKET3_MAP_QUEUES_ME((mec - 1)) |
338 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
339 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
340 PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
341 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
342 amdgpu_ring_write(kiq_ring,
343 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
344 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
345 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
346 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
347 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
348 amdgpu_ring_commit(kiq_ring);
349
350out_unlock:
351 spin_unlock(&adev->gfx.kiq.ring_lock);
352 release_queue(kgd);
353
354 return r;
355}
356
357static int kgd_hqd_dump(struct kgd_dev *kgd,
358 uint32_t pipe_id, uint32_t queue_id,
359 uint32_t (**dump)[2], uint32_t *n_regs)
360{
361 struct amdgpu_device *adev = get_amdgpu_device(kgd);
362 uint32_t i = 0, reg;
363#define HQD_N_REGS 56
364#define DUMP_REG(addr) do { \
365 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
366 break; \
367 (*dump)[i][0] = (addr) << 2; \
368 (*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
369 } while (0)
370
371 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
372 if (*dump == NULL)
373 return -ENOMEM;
374
375 acquire_queue(kgd, pipe_id, queue_id);
376
377 for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
378 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
379 DUMP_REG(reg);
380
381 release_queue(kgd);
382
383 WARN_ON_ONCE(i != HQD_N_REGS);
384 *n_regs = i;
385
386 return 0;
387}
388
389static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
390 uint32_t __user *wptr, struct mm_struct *mm)
391{
392 struct amdgpu_device *adev = get_amdgpu_device(kgd);
393 struct v10_sdma_mqd *m;
394 uint32_t sdma_rlc_reg_offset;
395 unsigned long end_jiffies;
396 uint32_t data;
397 uint64_t data64;
398 uint64_t __user *wptr64 = (uint64_t __user *)wptr;
399
400 m = get_sdma_mqd(mqd);
401 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
402 m->sdma_queue_id);
403
404 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
405 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
406
407 end_jiffies = msecs_to_jiffies(2000) + jiffies;
408 while (true) {
409 data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
410 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
411 break;
412 if (time_after(jiffies, end_jiffies)) {
413 pr_err("SDMA RLC not idle in %s\n", __func__);
414 return -ETIME;
415 }
416 usleep_range(500, 1000);
417 }
418
419 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
420 m->sdmax_rlcx_doorbell_offset);
421
422 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
423 ENABLE, 1);
424 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
425 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
426 m->sdmax_rlcx_rb_rptr);
427 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
428 m->sdmax_rlcx_rb_rptr_hi);
429
430 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
431 if (read_user_wptr(mm, wptr64, data64)) {
432 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
433 lower_32_bits(data64));
434 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
435 upper_32_bits(data64));
436 } else {
437 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
438 m->sdmax_rlcx_rb_rptr);
439 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
440 m->sdmax_rlcx_rb_rptr_hi);
441 }
442 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
443
444 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
445 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
446 m->sdmax_rlcx_rb_base_hi);
447 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
448 m->sdmax_rlcx_rb_rptr_addr_lo);
449 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
450 m->sdmax_rlcx_rb_rptr_addr_hi);
451
452 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
453 RB_ENABLE, 1);
454 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
455
456 return 0;
457}
458
459static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
460 uint32_t engine_id, uint32_t queue_id,
461 uint32_t (**dump)[2], uint32_t *n_regs)
462{
463 struct amdgpu_device *adev = get_amdgpu_device(kgd);
464 uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
465 engine_id, queue_id);
466 uint32_t i = 0, reg;
467#undef HQD_N_REGS
468#define HQD_N_REGS (19+6+7+10)
469
470 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
471 if (*dump == NULL)
472 return -ENOMEM;
473
474 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
475 DUMP_REG(sdma_rlc_reg_offset + reg);
476 for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
477 DUMP_REG(sdma_rlc_reg_offset + reg);
478 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
479 reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
480 DUMP_REG(sdma_rlc_reg_offset + reg);
481 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
482 reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
483 DUMP_REG(sdma_rlc_reg_offset + reg);
484
485 WARN_ON_ONCE(i != HQD_N_REGS);
486 *n_regs = i;
487
488 return 0;
489}
490
491static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
492 uint32_t pipe_id, uint32_t queue_id)
493{
494 struct amdgpu_device *adev = get_amdgpu_device(kgd);
495 uint32_t act;
496 bool retval = false;
497 uint32_t low, high;
498
499 acquire_queue(kgd, pipe_id, queue_id);
500 act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
501 if (act) {
502 low = lower_32_bits(queue_address >> 8);
503 high = upper_32_bits(queue_address >> 8);
504
505 if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
506 high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
507 retval = true;
508 }
509 release_queue(kgd);
510 return retval;
511}
512
513static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
514{
515 struct amdgpu_device *adev = get_amdgpu_device(kgd);
516 struct v10_sdma_mqd *m;
517 uint32_t sdma_rlc_reg_offset;
518 uint32_t sdma_rlc_rb_cntl;
519
520 m = get_sdma_mqd(mqd);
521 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
522 m->sdma_queue_id);
523
524 sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
525
526 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
527 return true;
528
529 return false;
530}
531
532static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
533 enum kfd_preempt_type reset_type,
534 unsigned int utimeout, uint32_t pipe_id,
535 uint32_t queue_id)
536{
537 struct amdgpu_device *adev = get_amdgpu_device(kgd);
538 enum hqd_dequeue_request_type type;
539 unsigned long end_jiffies;
540 uint32_t temp;
541 struct v10_compute_mqd *m = get_mqd(mqd);
542
543 if (amdgpu_in_reset(adev))
544 return -EIO;
545
546#if 0
547 unsigned long flags;
548 int retry;
549#endif
550
551 acquire_queue(kgd, pipe_id, queue_id);
552
553 if (m->cp_hqd_vmid == 0)
554 WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
555
556 switch (reset_type) {
557 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
558 type = DRAIN_PIPE;
559 break;
560 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
561 type = RESET_WAVES;
562 break;
563 default:
564 type = DRAIN_PIPE;
565 break;
566 }
567
568#if 0 /* Is this still needed? */
569 /* Workaround: If IQ timer is active and the wait time is close to or
570 * equal to 0, dequeueing is not safe. Wait until either the wait time
571 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
572 * cleared before continuing. Also, ensure wait times are set to at
573 * least 0x3.
574 */
575 local_irq_save(flags);
576 preempt_disable();
577 retry = 5000; /* wait for 500 usecs at maximum */
578 while (true) {
579 temp = RREG32(mmCP_HQD_IQ_TIMER);
580 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
581 pr_debug("HW is processing IQ\n");
582 goto loop;
583 }
584 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
585 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
586 == 3) /* SEM-rearm is safe */
587 break;
588 /* Wait time 3 is safe for CP, but our MMIO read/write
589 * time is close to 1 microsecond, so check for 10 to
590 * leave more buffer room
591 */
592 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
593 >= 10)
594 break;
595 pr_debug("IQ timer is active\n");
596 } else
597 break;
598loop:
599 if (!retry) {
600 pr_err("CP HQD IQ timer status time out\n");
601 break;
602 }
603 ndelay(100);
604 --retry;
605 }
606 retry = 1000;
607 while (true) {
608 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
609 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
610 break;
611 pr_debug("Dequeue request is pending\n");
612
613 if (!retry) {
614 pr_err("CP HQD dequeue request time out\n");
615 break;
616 }
617 ndelay(100);
618 --retry;
619 }
620 local_irq_restore(flags);
621 preempt_enable();
622#endif
623
624 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type);
625
626 end_jiffies = (utimeout * HZ / 1000) + jiffies;
627 while (true) {
628 temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
629 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
630 break;
631 if (time_after(jiffies, end_jiffies)) {
632 pr_err("cp queue preemption time out.\n");
633 release_queue(kgd);
634 return -ETIME;
635 }
636 usleep_range(500, 1000);
637 }
638
639 release_queue(kgd);
640 return 0;
641}
642
643static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
644 unsigned int utimeout)
645{
646 struct amdgpu_device *adev = get_amdgpu_device(kgd);
647 struct v10_sdma_mqd *m;
648 uint32_t sdma_rlc_reg_offset;
649 uint32_t temp;
650 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
651
652 m = get_sdma_mqd(mqd);
653 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
654 m->sdma_queue_id);
655
656 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
657 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
658 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
659
660 while (true) {
661 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
662 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
663 break;
664 if (time_after(jiffies, end_jiffies)) {
665 pr_err("SDMA RLC not idle in %s\n", __func__);
666 return -ETIME;
667 }
668 usleep_range(500, 1000);
669 }
670
671 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
672 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
673 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
674 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
675
676 m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
677 m->sdmax_rlcx_rb_rptr_hi =
678 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
679
680 return 0;
681}
682
683static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
684 uint8_t vmid, uint16_t *p_pasid)
685{
686 uint32_t value;
687 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
688
689 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
690 + vmid);
691 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
692
693 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
694}
695
696static int kgd_address_watch_disable(struct kgd_dev *kgd)
697{
698 return 0;
699}
700
701static int kgd_address_watch_execute(struct kgd_dev *kgd,
702 unsigned int watch_point_id,
703 uint32_t cntl_val,
704 uint32_t addr_hi,
705 uint32_t addr_lo)
706{
707 return 0;
708}
709
710static int kgd_wave_control_execute(struct kgd_dev *kgd,
711 uint32_t gfx_index_val,
712 uint32_t sq_cmd)
713{
714 struct amdgpu_device *adev = get_amdgpu_device(kgd);
715 uint32_t data = 0;
716
717 mutex_lock(&adev->grbm_idx_mutex);
718
719 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
720 WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd);
721
722 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
723 INSTANCE_BROADCAST_WRITES, 1);
724 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
725 SA_BROADCAST_WRITES, 1);
726 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
727 SE_BROADCAST_WRITES, 1);
728
729 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
730 mutex_unlock(&adev->grbm_idx_mutex);
731
732 return 0;
733}
734
735static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
736 unsigned int watch_point_id,
737 unsigned int reg_offset)
738{
739 return 0;
740}
741
742static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
743 uint64_t page_table_base)
744{
745 struct amdgpu_device *adev = get_amdgpu_device(kgd);
746
747 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
748 pr_err("trying to set page table base for wrong VMID %u\n",
749 vmid);
750 return;
751 }
752
753 /* SDMA is on gfxhub as well for Navi1* series */
754 adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
755}
756
757const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
758 .program_sh_mem_settings = kgd_program_sh_mem_settings,
759 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
760 .init_interrupts = kgd_init_interrupts,
761 .hqd_load = kgd_hqd_load,
762 .hiq_mqd_load = kgd_hiq_mqd_load,
763 .hqd_sdma_load = kgd_hqd_sdma_load,
764 .hqd_dump = kgd_hqd_dump,
765 .hqd_sdma_dump = kgd_hqd_sdma_dump,
766 .hqd_is_occupied = kgd_hqd_is_occupied,
767 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
768 .hqd_destroy = kgd_hqd_destroy,
769 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
770 .address_watch_disable = kgd_address_watch_disable,
771 .address_watch_execute = kgd_address_watch_execute,
772 .wave_control_execute = kgd_wave_control_execute,
773 .address_watch_get_offset = kgd_address_watch_get_offset,
774 .get_atc_vmid_pasid_mapping_info =
775 get_atc_vmid_pasid_mapping_info,
776 .set_vm_context_page_table_base = set_vm_context_page_table_base,
777};