Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/firmware.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ucode.h"
31#include "amdgpu_trace.h"
32
33#include "gc/gc_10_3_0_offset.h"
34#include "gc/gc_10_3_0_sh_mask.h"
35#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
36#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
37#include "ivsrcid/sdma2/irqsrcs_sdma2_5_0.h"
38#include "ivsrcid/sdma3/irqsrcs_sdma3_5_0.h"
39
40#include "soc15_common.h"
41#include "soc15.h"
42#include "navi10_sdma_pkt_open.h"
43#include "nbio_v2_3.h"
44#include "sdma_common.h"
45#include "sdma_v5_2.h"
46
47MODULE_FIRMWARE("amdgpu/sienna_cichlid_sdma.bin");
48MODULE_FIRMWARE("amdgpu/navy_flounder_sdma.bin");
49MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_sdma.bin");
50MODULE_FIRMWARE("amdgpu/beige_goby_sdma.bin");
51
52MODULE_FIRMWARE("amdgpu/vangogh_sdma.bin");
53MODULE_FIRMWARE("amdgpu/yellow_carp_sdma.bin");
54MODULE_FIRMWARE("amdgpu/sdma_5_2_6.bin");
55MODULE_FIRMWARE("amdgpu/sdma_5_2_7.bin");
56
57#define SDMA1_REG_OFFSET 0x600
58#define SDMA3_REG_OFFSET 0x400
59#define SDMA0_HYP_DEC_REG_START 0x5880
60#define SDMA0_HYP_DEC_REG_END 0x5893
61#define SDMA1_HYP_DEC_REG_OFFSET 0x20
62
63static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
64static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
65static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
66static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev);
67
68static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
69{
70 u32 base;
71
72 if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
73 internal_offset <= SDMA0_HYP_DEC_REG_END) {
74 base = adev->reg_offset[GC_HWIP][0][1];
75 if (instance != 0)
76 internal_offset += SDMA1_HYP_DEC_REG_OFFSET * instance;
77 } else {
78 if (instance < 2) {
79 base = adev->reg_offset[GC_HWIP][0][0];
80 if (instance == 1)
81 internal_offset += SDMA1_REG_OFFSET;
82 } else {
83 base = adev->reg_offset[GC_HWIP][0][2];
84 if (instance == 3)
85 internal_offset += SDMA3_REG_OFFSET;
86 }
87 }
88
89 return base + internal_offset;
90}
91
92static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring)
93{
94 unsigned ret;
95
96 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
97 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
98 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
99 amdgpu_ring_write(ring, 1);
100 ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
101 amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
102
103 return ret;
104}
105
106static void sdma_v5_2_ring_patch_cond_exec(struct amdgpu_ring *ring,
107 unsigned offset)
108{
109 unsigned cur;
110
111 BUG_ON(offset > ring->buf_mask);
112 BUG_ON(ring->ring[offset] != 0x55aa55aa);
113
114 cur = (ring->wptr - 1) & ring->buf_mask;
115 if (cur > offset)
116 ring->ring[offset] = cur - offset;
117 else
118 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
119}
120
121/**
122 * sdma_v5_2_ring_get_rptr - get the current read pointer
123 *
124 * @ring: amdgpu ring pointer
125 *
126 * Get the current rptr from the hardware (NAVI10+).
127 */
128static uint64_t sdma_v5_2_ring_get_rptr(struct amdgpu_ring *ring)
129{
130 u64 *rptr;
131
132 /* XXX check if swapping is necessary on BE */
133 rptr = (u64 *)ring->rptr_cpu_addr;
134
135 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
136 return ((*rptr) >> 2);
137}
138
139/**
140 * sdma_v5_2_ring_get_wptr - get the current write pointer
141 *
142 * @ring: amdgpu ring pointer
143 *
144 * Get the current wptr from the hardware (NAVI10+).
145 */
146static uint64_t sdma_v5_2_ring_get_wptr(struct amdgpu_ring *ring)
147{
148 struct amdgpu_device *adev = ring->adev;
149 u64 wptr;
150
151 if (ring->use_doorbell) {
152 /* XXX check if swapping is necessary on BE */
153 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
154 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
155 } else {
156 wptr = RREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
157 wptr = wptr << 32;
158 wptr |= RREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
159 DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
160 }
161
162 return wptr >> 2;
163}
164
165/**
166 * sdma_v5_2_ring_set_wptr - commit the write pointer
167 *
168 * @ring: amdgpu ring pointer
169 *
170 * Write the wptr back to the hardware (NAVI10+).
171 */
172static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring)
173{
174 struct amdgpu_device *adev = ring->adev;
175
176 DRM_DEBUG("Setting write pointer\n");
177 if (ring->use_doorbell) {
178 DRM_DEBUG("Using doorbell -- "
179 "wptr_offs == 0x%08x "
180 "lower_32_bits(ring->wptr << 2) == 0x%08x "
181 "upper_32_bits(ring->wptr << 2) == 0x%08x\n",
182 ring->wptr_offs,
183 lower_32_bits(ring->wptr << 2),
184 upper_32_bits(ring->wptr << 2));
185 /* XXX check if swapping is necessary on BE */
186 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
187 ring->wptr << 2);
188 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
189 ring->doorbell_index, ring->wptr << 2);
190 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
191 } else {
192 DRM_DEBUG("Not using doorbell -- "
193 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
194 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
195 ring->me,
196 lower_32_bits(ring->wptr << 2),
197 ring->me,
198 upper_32_bits(ring->wptr << 2));
199 WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
200 lower_32_bits(ring->wptr << 2));
201 WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
202 upper_32_bits(ring->wptr << 2));
203 }
204}
205
206static void sdma_v5_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
207{
208 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
209 int i;
210
211 for (i = 0; i < count; i++)
212 if (sdma && sdma->burst_nop && (i == 0))
213 amdgpu_ring_write(ring, ring->funcs->nop |
214 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
215 else
216 amdgpu_ring_write(ring, ring->funcs->nop);
217}
218
219/**
220 * sdma_v5_2_ring_emit_ib - Schedule an IB on the DMA engine
221 *
222 * @ring: amdgpu ring pointer
223 * @job: job to retrieve vmid from
224 * @ib: IB object to schedule
225 * @flags: unused
226 *
227 * Schedule an IB in the DMA ring.
228 */
229static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring,
230 struct amdgpu_job *job,
231 struct amdgpu_ib *ib,
232 uint32_t flags)
233{
234 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
235 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
236
237 /* An IB packet must end on a 8 DW boundary--the next dword
238 * must be on a 8-dword boundary. Our IB packet below is 6
239 * dwords long, thus add x number of NOPs, such that, in
240 * modular arithmetic,
241 * wptr + 6 + x = 8k, k >= 0, which in C is,
242 * (wptr + 6 + x) % 8 = 0.
243 * The expression below, is a solution of x.
244 */
245 sdma_v5_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
246
247 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
248 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
249 /* base must be 32 byte aligned */
250 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
251 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
252 amdgpu_ring_write(ring, ib->length_dw);
253 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
254 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
255}
256
257/**
258 * sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse
259 *
260 * @ring: amdgpu ring pointer
261 *
262 * flush the IB by graphics cache rinse.
263 */
264static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring)
265{
266 uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB |
267 SDMA_GCR_GLM_INV | SDMA_GCR_GL1_INV |
268 SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
269 SDMA_GCR_GLI_INV(1);
270
271 /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
272 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
273 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
274 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
275 SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
276 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
277 SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
278 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
279 SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
280}
281
282/**
283 * sdma_v5_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
284 *
285 * @ring: amdgpu ring pointer
286 *
287 * Emit an hdp flush packet on the requested DMA ring.
288 */
289static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
290{
291 struct amdgpu_device *adev = ring->adev;
292 u32 ref_and_mask = 0;
293 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
294
295 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
296
297 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
298 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
299 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
300 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
301 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
302 amdgpu_ring_write(ring, ref_and_mask); /* reference */
303 amdgpu_ring_write(ring, ref_and_mask); /* mask */
304 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
305 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
306}
307
308/**
309 * sdma_v5_2_ring_emit_fence - emit a fence on the DMA ring
310 *
311 * @ring: amdgpu ring pointer
312 * @addr: address
313 * @seq: sequence number
314 * @flags: fence related flags
315 *
316 * Add a DMA fence packet to the ring to write
317 * the fence seq number and DMA trap packet to generate
318 * an interrupt if needed.
319 */
320static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
321 unsigned flags)
322{
323 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
324 /* write the fence */
325 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
326 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
327 /* zero in first two bits */
328 BUG_ON(addr & 0x3);
329 amdgpu_ring_write(ring, lower_32_bits(addr));
330 amdgpu_ring_write(ring, upper_32_bits(addr));
331 amdgpu_ring_write(ring, lower_32_bits(seq));
332
333 /* optionally write high bits as well */
334 if (write64bit) {
335 addr += 4;
336 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
337 SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
338 /* zero in first two bits */
339 BUG_ON(addr & 0x3);
340 amdgpu_ring_write(ring, lower_32_bits(addr));
341 amdgpu_ring_write(ring, upper_32_bits(addr));
342 amdgpu_ring_write(ring, upper_32_bits(seq));
343 }
344
345 if ((flags & AMDGPU_FENCE_FLAG_INT)) {
346 uint32_t ctx = ring->is_mes_queue ?
347 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
348 /* generate an interrupt */
349 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
350 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
351 }
352}
353
354
355/**
356 * sdma_v5_2_gfx_stop - stop the gfx async dma engines
357 *
358 * @adev: amdgpu_device pointer
359 *
360 * Stop the gfx async dma ring buffers.
361 */
362static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
363{
364 u32 rb_cntl, ib_cntl;
365 int i;
366
367 for (i = 0; i < adev->sdma.num_instances; i++) {
368 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
369 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
370 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
371 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
372 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
373 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
374 }
375}
376
377/**
378 * sdma_v5_2_rlc_stop - stop the compute async dma engines
379 *
380 * @adev: amdgpu_device pointer
381 *
382 * Stop the compute async dma queues.
383 */
384static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
385{
386 /* XXX todo */
387}
388
389/**
390 * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
391 *
392 * @adev: amdgpu_device pointer
393 * @enable: enable/disable the DMA MEs context switch.
394 *
395 * Halt or unhalt the async dma engines context switch.
396 */
397static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
398{
399 u32 f32_cntl, phase_quantum = 0;
400 int i;
401
402 if (amdgpu_sdma_phase_quantum) {
403 unsigned value = amdgpu_sdma_phase_quantum;
404 unsigned unit = 0;
405
406 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
407 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
408 value = (value + 1) >> 1;
409 unit++;
410 }
411 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
412 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
413 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
414 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
415 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
416 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
417 WARN_ONCE(1,
418 "clamping sdma_phase_quantum to %uK clock cycles\n",
419 value << unit);
420 }
421 phase_quantum =
422 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
423 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
424 }
425
426 for (i = 0; i < adev->sdma.num_instances; i++) {
427 if (enable && amdgpu_sdma_phase_quantum) {
428 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
429 phase_quantum);
430 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
431 phase_quantum);
432 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
433 phase_quantum);
434 }
435
436 if (!amdgpu_sriov_vf(adev)) {
437 f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
438 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
439 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
440 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
441 }
442 }
443
444}
445
446/**
447 * sdma_v5_2_enable - stop the async dma engines
448 *
449 * @adev: amdgpu_device pointer
450 * @enable: enable/disable the DMA MEs.
451 *
452 * Halt or unhalt the async dma engines.
453 */
454static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
455{
456 u32 f32_cntl;
457 int i;
458
459 if (!enable) {
460 sdma_v5_2_gfx_stop(adev);
461 sdma_v5_2_rlc_stop(adev);
462 }
463
464 if (!amdgpu_sriov_vf(adev)) {
465 for (i = 0; i < adev->sdma.num_instances; i++) {
466 f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
467 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
468 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
469 }
470 }
471}
472
473/**
474 * sdma_v5_2_gfx_resume - setup and start the async dma engines
475 *
476 * @adev: amdgpu_device pointer
477 *
478 * Set up the gfx DMA ring buffers and enable them.
479 * Returns 0 for success, error for failure.
480 */
481static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
482{
483 struct amdgpu_ring *ring;
484 u32 rb_cntl, ib_cntl;
485 u32 rb_bufsz;
486 u32 doorbell;
487 u32 doorbell_offset;
488 u32 temp;
489 u32 wptr_poll_cntl;
490 u64 wptr_gpu_addr;
491 int i, r;
492
493 for (i = 0; i < adev->sdma.num_instances; i++) {
494 ring = &adev->sdma.instance[i].ring;
495
496 if (!amdgpu_sriov_vf(adev))
497 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
498
499 /* Set ring buffer size in dwords */
500 rb_bufsz = order_base_2(ring->ring_size / 4);
501 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
502 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
503#ifdef __BIG_ENDIAN
504 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
505 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
506 RPTR_WRITEBACK_SWAP_ENABLE, 1);
507#endif
508 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
509
510 /* Initialize the ring buffer's read and write pointers */
511 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
512 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
513 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
514 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
515
516 /* setup the wptr shadow polling */
517 wptr_gpu_addr = ring->wptr_gpu_addr;
518 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
519 lower_32_bits(wptr_gpu_addr));
520 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
521 upper_32_bits(wptr_gpu_addr));
522 wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i,
523 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
524 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
525 SDMA0_GFX_RB_WPTR_POLL_CNTL,
526 F32_POLL_ENABLE, 1);
527 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
528 wptr_poll_cntl);
529
530 /* set the wb address whether it's enabled or not */
531 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
532 upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
533 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
534 lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
535
536 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
537
538 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
539 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
540
541 ring->wptr = 0;
542
543 /* before programing wptr to a less value, need set minor_ptr_update first */
544 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
545
546 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
547 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
548 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
549 }
550
551 doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
552 doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
553
554 if (ring->use_doorbell) {
555 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
556 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
557 OFFSET, ring->doorbell_index);
558 } else {
559 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
560 }
561 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
562 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
563
564 adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
565 ring->doorbell_index,
566 adev->doorbell_index.sdma_doorbell_range);
567
568 if (amdgpu_sriov_vf(adev))
569 sdma_v5_2_ring_set_wptr(ring);
570
571 /* set minor_ptr_update to 0 after wptr programed */
572
573 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
574
575 /* SRIOV VF has no control of any of registers below */
576 if (!amdgpu_sriov_vf(adev)) {
577 /* set utc l1 enable flag always to 1 */
578 temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
579 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
580
581 /* enable MCBP */
582 temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
583 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
584
585 /* Set up RESP_MODE to non-copy addresses */
586 temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
587 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
588 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
589 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
590
591 /* program default cache read and write policy */
592 temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
593 /* clean read policy and write policy bits */
594 temp &= 0xFF0FFF;
595 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
596 (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
597 SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
598 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
599
600 /* unhalt engine */
601 temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
602 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
603 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
604 }
605
606 /* enable DMA RB */
607 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
608 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
609
610 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
611 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
612#ifdef __BIG_ENDIAN
613 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
614#endif
615 /* enable DMA IBs */
616 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
617
618 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
619 sdma_v5_2_ctx_switch_enable(adev, true);
620 sdma_v5_2_enable(adev, true);
621 }
622
623 r = amdgpu_ring_test_helper(ring);
624 if (r)
625 return r;
626 }
627
628 return 0;
629}
630
631/**
632 * sdma_v5_2_rlc_resume - setup and start the async dma engines
633 *
634 * @adev: amdgpu_device pointer
635 *
636 * Set up the compute DMA queues and enable them.
637 * Returns 0 for success, error for failure.
638 */
639static int sdma_v5_2_rlc_resume(struct amdgpu_device *adev)
640{
641 return 0;
642}
643
644/**
645 * sdma_v5_2_load_microcode - load the sDMA ME ucode
646 *
647 * @adev: amdgpu_device pointer
648 *
649 * Loads the sDMA0/1/2/3 ucode.
650 * Returns 0 for success, -EINVAL if the ucode is not available.
651 */
652static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
653{
654 const struct sdma_firmware_header_v1_0 *hdr;
655 const __le32 *fw_data;
656 u32 fw_size;
657 int i, j;
658
659 /* halt the MEs */
660 sdma_v5_2_enable(adev, false);
661
662 for (i = 0; i < adev->sdma.num_instances; i++) {
663 if (!adev->sdma.instance[i].fw)
664 return -EINVAL;
665
666 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
667 amdgpu_ucode_print_sdma_hdr(&hdr->header);
668 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
669
670 fw_data = (const __le32 *)
671 (adev->sdma.instance[i].fw->data +
672 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
673
674 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
675
676 for (j = 0; j < fw_size; j++) {
677 if (amdgpu_emu_mode == 1 && j % 500 == 0)
678 msleep(1);
679 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
680 }
681
682 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
683 }
684
685 return 0;
686}
687
688static int sdma_v5_2_soft_reset(void *handle)
689{
690 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
691 u32 grbm_soft_reset;
692 u32 tmp;
693 int i;
694
695 for (i = 0; i < adev->sdma.num_instances; i++) {
696 grbm_soft_reset = REG_SET_FIELD(0,
697 GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
698 1);
699 grbm_soft_reset <<= i;
700
701 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
702 tmp |= grbm_soft_reset;
703 DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
704 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
705 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
706
707 udelay(50);
708
709 tmp &= ~grbm_soft_reset;
710 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
711 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
712
713 udelay(50);
714 }
715
716 return 0;
717}
718
719/**
720 * sdma_v5_2_start - setup and start the async dma engines
721 *
722 * @adev: amdgpu_device pointer
723 *
724 * Set up the DMA engines and enable them.
725 * Returns 0 for success, error for failure.
726 */
727static int sdma_v5_2_start(struct amdgpu_device *adev)
728{
729 int r = 0;
730
731 if (amdgpu_sriov_vf(adev)) {
732 sdma_v5_2_ctx_switch_enable(adev, false);
733 sdma_v5_2_enable(adev, false);
734
735 /* set RB registers */
736 r = sdma_v5_2_gfx_resume(adev);
737 return r;
738 }
739
740 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
741 r = sdma_v5_2_load_microcode(adev);
742 if (r)
743 return r;
744
745 /* The value of mmSDMA_F32_CNTL is invalid the moment after loading fw */
746 if (amdgpu_emu_mode == 1)
747 msleep(1000);
748 }
749
750 sdma_v5_2_soft_reset(adev);
751 /* unhalt the MEs */
752 sdma_v5_2_enable(adev, true);
753 /* enable sdma ring preemption */
754 sdma_v5_2_ctx_switch_enable(adev, true);
755
756 /* start the gfx rings and rlc compute queues */
757 r = sdma_v5_2_gfx_resume(adev);
758 if (r)
759 return r;
760 r = sdma_v5_2_rlc_resume(adev);
761
762 return r;
763}
764
765static int sdma_v5_2_mqd_init(struct amdgpu_device *adev, void *mqd,
766 struct amdgpu_mqd_prop *prop)
767{
768 struct v10_sdma_mqd *m = mqd;
769 uint64_t wb_gpu_addr;
770
771 m->sdmax_rlcx_rb_cntl =
772 order_base_2(prop->queue_size / 4) << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
773 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
774 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
775 1 << SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT;
776
777 m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
778 m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
779
780 m->sdmax_rlcx_rb_wptr_poll_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, 0,
781 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
782
783 wb_gpu_addr = prop->wptr_gpu_addr;
784 m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr);
785 m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr);
786
787 wb_gpu_addr = prop->rptr_gpu_addr;
788 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr);
789 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr);
790
791 m->sdmax_rlcx_ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, 0,
792 mmSDMA0_GFX_IB_CNTL));
793
794 m->sdmax_rlcx_doorbell_offset =
795 prop->doorbell_index << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
796
797 m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_RLC0_DOORBELL, ENABLE, 1);
798
799 return 0;
800}
801
802static void sdma_v5_2_set_mqd_funcs(struct amdgpu_device *adev)
803{
804 adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v10_sdma_mqd);
805 adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v5_2_mqd_init;
806}
807
808/**
809 * sdma_v5_2_ring_test_ring - simple async dma engine test
810 *
811 * @ring: amdgpu_ring structure holding ring information
812 *
813 * Test the DMA engine by writing using it to write an
814 * value to memory.
815 * Returns 0 for success, error for failure.
816 */
817static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
818{
819 struct amdgpu_device *adev = ring->adev;
820 unsigned i;
821 unsigned index;
822 int r;
823 u32 tmp;
824 u64 gpu_addr;
825 volatile uint32_t *cpu_ptr = NULL;
826
827 tmp = 0xCAFEDEAD;
828
829 if (ring->is_mes_queue) {
830 uint32_t offset = 0;
831 offset = amdgpu_mes_ctx_get_offs(ring,
832 AMDGPU_MES_CTX_PADDING_OFFS);
833 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
834 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
835 *cpu_ptr = tmp;
836 } else {
837 r = amdgpu_device_wb_get(adev, &index);
838 if (r) {
839 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
840 return r;
841 }
842
843 gpu_addr = adev->wb.gpu_addr + (index * 4);
844 adev->wb.wb[index] = cpu_to_le32(tmp);
845 }
846
847 r = amdgpu_ring_alloc(ring, 20);
848 if (r) {
849 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
850 amdgpu_device_wb_free(adev, index);
851 return r;
852 }
853
854 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
855 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
856 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
857 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
858 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
859 amdgpu_ring_write(ring, 0xDEADBEEF);
860 amdgpu_ring_commit(ring);
861
862 for (i = 0; i < adev->usec_timeout; i++) {
863 if (ring->is_mes_queue)
864 tmp = le32_to_cpu(*cpu_ptr);
865 else
866 tmp = le32_to_cpu(adev->wb.wb[index]);
867 if (tmp == 0xDEADBEEF)
868 break;
869 if (amdgpu_emu_mode == 1)
870 msleep(1);
871 else
872 udelay(1);
873 }
874
875 if (i >= adev->usec_timeout)
876 r = -ETIMEDOUT;
877
878 if (!ring->is_mes_queue)
879 amdgpu_device_wb_free(adev, index);
880
881 return r;
882}
883
884/**
885 * sdma_v5_2_ring_test_ib - test an IB on the DMA engine
886 *
887 * @ring: amdgpu_ring structure holding ring information
888 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
889 *
890 * Test a simple IB in the DMA ring.
891 * Returns 0 on success, error on failure.
892 */
893static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
894{
895 struct amdgpu_device *adev = ring->adev;
896 struct amdgpu_ib ib;
897 struct dma_fence *f = NULL;
898 unsigned index;
899 long r;
900 u32 tmp = 0;
901 u64 gpu_addr;
902 volatile uint32_t *cpu_ptr = NULL;
903
904 tmp = 0xCAFEDEAD;
905 memset(&ib, 0, sizeof(ib));
906
907 if (ring->is_mes_queue) {
908 uint32_t offset = 0;
909 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
910 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
911 ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
912
913 offset = amdgpu_mes_ctx_get_offs(ring,
914 AMDGPU_MES_CTX_PADDING_OFFS);
915 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
916 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
917 *cpu_ptr = tmp;
918 } else {
919 r = amdgpu_device_wb_get(adev, &index);
920 if (r) {
921 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
922 return r;
923 }
924
925 gpu_addr = adev->wb.gpu_addr + (index * 4);
926 adev->wb.wb[index] = cpu_to_le32(tmp);
927
928 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
929 if (r) {
930 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
931 goto err0;
932 }
933 }
934
935 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
936 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
937 ib.ptr[1] = lower_32_bits(gpu_addr);
938 ib.ptr[2] = upper_32_bits(gpu_addr);
939 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
940 ib.ptr[4] = 0xDEADBEEF;
941 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
942 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
943 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
944 ib.length_dw = 8;
945
946 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
947 if (r)
948 goto err1;
949
950 r = dma_fence_wait_timeout(f, false, timeout);
951 if (r == 0) {
952 DRM_ERROR("amdgpu: IB test timed out\n");
953 r = -ETIMEDOUT;
954 goto err1;
955 } else if (r < 0) {
956 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
957 goto err1;
958 }
959
960 if (ring->is_mes_queue)
961 tmp = le32_to_cpu(*cpu_ptr);
962 else
963 tmp = le32_to_cpu(adev->wb.wb[index]);
964
965 if (tmp == 0xDEADBEEF)
966 r = 0;
967 else
968 r = -EINVAL;
969
970err1:
971 amdgpu_ib_free(adev, &ib, NULL);
972 dma_fence_put(f);
973err0:
974 if (!ring->is_mes_queue)
975 amdgpu_device_wb_free(adev, index);
976 return r;
977}
978
979
980/**
981 * sdma_v5_2_vm_copy_pte - update PTEs by copying them from the GART
982 *
983 * @ib: indirect buffer to fill with commands
984 * @pe: addr of the page entry
985 * @src: src addr to copy from
986 * @count: number of page entries to update
987 *
988 * Update PTEs by copying them from the GART using sDMA.
989 */
990static void sdma_v5_2_vm_copy_pte(struct amdgpu_ib *ib,
991 uint64_t pe, uint64_t src,
992 unsigned count)
993{
994 unsigned bytes = count * 8;
995
996 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
997 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
998 ib->ptr[ib->length_dw++] = bytes - 1;
999 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1000 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1001 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1002 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1003 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1004
1005}
1006
1007/**
1008 * sdma_v5_2_vm_write_pte - update PTEs by writing them manually
1009 *
1010 * @ib: indirect buffer to fill with commands
1011 * @pe: addr of the page entry
1012 * @value: dst addr to write into pe
1013 * @count: number of page entries to update
1014 * @incr: increase next addr by incr bytes
1015 *
1016 * Update PTEs by writing them manually using sDMA.
1017 */
1018static void sdma_v5_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1019 uint64_t value, unsigned count,
1020 uint32_t incr)
1021{
1022 unsigned ndw = count * 2;
1023
1024 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1025 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1026 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1027 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1028 ib->ptr[ib->length_dw++] = ndw - 1;
1029 for (; ndw > 0; ndw -= 2) {
1030 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1031 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1032 value += incr;
1033 }
1034}
1035
1036/**
1037 * sdma_v5_2_vm_set_pte_pde - update the page tables using sDMA
1038 *
1039 * @ib: indirect buffer to fill with commands
1040 * @pe: addr of the page entry
1041 * @addr: dst addr to write into pe
1042 * @count: number of page entries to update
1043 * @incr: increase next addr by incr bytes
1044 * @flags: access flags
1045 *
1046 * Update the page tables using sDMA.
1047 */
1048static void sdma_v5_2_vm_set_pte_pde(struct amdgpu_ib *ib,
1049 uint64_t pe,
1050 uint64_t addr, unsigned count,
1051 uint32_t incr, uint64_t flags)
1052{
1053 /* for physically contiguous pages (vram) */
1054 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1055 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1056 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1057 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1058 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1059 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1060 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1061 ib->ptr[ib->length_dw++] = incr; /* increment size */
1062 ib->ptr[ib->length_dw++] = 0;
1063 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1064}
1065
1066/**
1067 * sdma_v5_2_ring_pad_ib - pad the IB
1068 *
1069 * @ib: indirect buffer to fill with padding
1070 * @ring: amdgpu_ring structure holding ring information
1071 *
1072 * Pad the IB with NOPs to a boundary multiple of 8.
1073 */
1074static void sdma_v5_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1075{
1076 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1077 u32 pad_count;
1078 int i;
1079
1080 pad_count = (-ib->length_dw) & 0x7;
1081 for (i = 0; i < pad_count; i++)
1082 if (sdma && sdma->burst_nop && (i == 0))
1083 ib->ptr[ib->length_dw++] =
1084 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1085 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1086 else
1087 ib->ptr[ib->length_dw++] =
1088 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1089}
1090
1091
1092/**
1093 * sdma_v5_2_ring_emit_pipeline_sync - sync the pipeline
1094 *
1095 * @ring: amdgpu_ring pointer
1096 *
1097 * Make sure all previous operations are completed (CIK).
1098 */
1099static void sdma_v5_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1100{
1101 uint32_t seq = ring->fence_drv.sync_seq;
1102 uint64_t addr = ring->fence_drv.gpu_addr;
1103
1104 /* wait for idle */
1105 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1106 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1107 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1108 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1109 amdgpu_ring_write(ring, addr & 0xfffffffc);
1110 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1111 amdgpu_ring_write(ring, seq); /* reference */
1112 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1113 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1114 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1115}
1116
1117
1118/**
1119 * sdma_v5_2_ring_emit_vm_flush - vm flush using sDMA
1120 *
1121 * @ring: amdgpu_ring pointer
1122 * @vmid: vmid number to use
1123 * @pd_addr: address
1124 *
1125 * Update the page table base and flush the VM TLB
1126 * using sDMA.
1127 */
1128static void sdma_v5_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
1129 unsigned vmid, uint64_t pd_addr)
1130{
1131 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1132}
1133
1134static void sdma_v5_2_ring_emit_wreg(struct amdgpu_ring *ring,
1135 uint32_t reg, uint32_t val)
1136{
1137 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1138 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1139 amdgpu_ring_write(ring, reg);
1140 amdgpu_ring_write(ring, val);
1141}
1142
1143static void sdma_v5_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1144 uint32_t val, uint32_t mask)
1145{
1146 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1147 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1148 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1149 amdgpu_ring_write(ring, reg << 2);
1150 amdgpu_ring_write(ring, 0);
1151 amdgpu_ring_write(ring, val); /* reference */
1152 amdgpu_ring_write(ring, mask); /* mask */
1153 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1154 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1155}
1156
1157static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1158 uint32_t reg0, uint32_t reg1,
1159 uint32_t ref, uint32_t mask)
1160{
1161 amdgpu_ring_emit_wreg(ring, reg0, ref);
1162 /* wait for a cycle to reset vm_inv_eng*_ack */
1163 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1164 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1165}
1166
1167static int sdma_v5_2_early_init(void *handle)
1168{
1169 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1170 int r;
1171
1172 r = amdgpu_sdma_init_microcode(adev, 0, true);
1173 if (r)
1174 return r;
1175
1176 sdma_v5_2_set_ring_funcs(adev);
1177 sdma_v5_2_set_buffer_funcs(adev);
1178 sdma_v5_2_set_vm_pte_funcs(adev);
1179 sdma_v5_2_set_irq_funcs(adev);
1180 sdma_v5_2_set_mqd_funcs(adev);
1181
1182 return 0;
1183}
1184
1185static unsigned sdma_v5_2_seq_to_irq_id(int seq_num)
1186{
1187 switch (seq_num) {
1188 case 0:
1189 return SOC15_IH_CLIENTID_SDMA0;
1190 case 1:
1191 return SOC15_IH_CLIENTID_SDMA1;
1192 case 2:
1193 return SOC15_IH_CLIENTID_SDMA2;
1194 case 3:
1195 return SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid;
1196 default:
1197 break;
1198 }
1199 return -EINVAL;
1200}
1201
1202static unsigned sdma_v5_2_seq_to_trap_id(int seq_num)
1203{
1204 switch (seq_num) {
1205 case 0:
1206 return SDMA0_5_0__SRCID__SDMA_TRAP;
1207 case 1:
1208 return SDMA1_5_0__SRCID__SDMA_TRAP;
1209 case 2:
1210 return SDMA2_5_0__SRCID__SDMA_TRAP;
1211 case 3:
1212 return SDMA3_5_0__SRCID__SDMA_TRAP;
1213 default:
1214 break;
1215 }
1216 return -EINVAL;
1217}
1218
1219static int sdma_v5_2_sw_init(void *handle)
1220{
1221 struct amdgpu_ring *ring;
1222 int r, i;
1223 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1224
1225 /* SDMA trap event */
1226 for (i = 0; i < adev->sdma.num_instances; i++) {
1227 r = amdgpu_irq_add_id(adev, sdma_v5_2_seq_to_irq_id(i),
1228 sdma_v5_2_seq_to_trap_id(i),
1229 &adev->sdma.trap_irq);
1230 if (r)
1231 return r;
1232 }
1233
1234 for (i = 0; i < adev->sdma.num_instances; i++) {
1235 ring = &adev->sdma.instance[i].ring;
1236 ring->ring_obj = NULL;
1237 ring->use_doorbell = true;
1238 ring->me = i;
1239
1240 DRM_INFO("use_doorbell being set to: [%s]\n",
1241 ring->use_doorbell?"true":"false");
1242
1243 ring->doorbell_index =
1244 (adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset
1245
1246 ring->vm_hub = AMDGPU_GFXHUB(0);
1247 sprintf(ring->name, "sdma%d", i);
1248 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1249 AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1250 AMDGPU_RING_PRIO_DEFAULT, NULL);
1251 if (r)
1252 return r;
1253 }
1254
1255 return r;
1256}
1257
1258static int sdma_v5_2_sw_fini(void *handle)
1259{
1260 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1261 int i;
1262
1263 for (i = 0; i < adev->sdma.num_instances; i++)
1264 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1265
1266 amdgpu_sdma_destroy_inst_ctx(adev, true);
1267
1268 return 0;
1269}
1270
1271static int sdma_v5_2_hw_init(void *handle)
1272{
1273 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1274
1275 return sdma_v5_2_start(adev);
1276}
1277
1278static int sdma_v5_2_hw_fini(void *handle)
1279{
1280 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1281
1282 if (amdgpu_sriov_vf(adev))
1283 return 0;
1284
1285 sdma_v5_2_ctx_switch_enable(adev, false);
1286 sdma_v5_2_enable(adev, false);
1287
1288 return 0;
1289}
1290
1291static int sdma_v5_2_suspend(void *handle)
1292{
1293 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1294
1295 return sdma_v5_2_hw_fini(adev);
1296}
1297
1298static int sdma_v5_2_resume(void *handle)
1299{
1300 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1301
1302 return sdma_v5_2_hw_init(adev);
1303}
1304
1305static bool sdma_v5_2_is_idle(void *handle)
1306{
1307 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1308 u32 i;
1309
1310 for (i = 0; i < adev->sdma.num_instances; i++) {
1311 u32 tmp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
1312
1313 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1314 return false;
1315 }
1316
1317 return true;
1318}
1319
1320static int sdma_v5_2_wait_for_idle(void *handle)
1321{
1322 unsigned i;
1323 u32 sdma0, sdma1, sdma2, sdma3;
1324 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1325
1326 for (i = 0; i < adev->usec_timeout; i++) {
1327 sdma0 = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
1328 sdma1 = RREG32(sdma_v5_2_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
1329 sdma2 = RREG32(sdma_v5_2_get_reg_offset(adev, 2, mmSDMA0_STATUS_REG));
1330 sdma3 = RREG32(sdma_v5_2_get_reg_offset(adev, 3, mmSDMA0_STATUS_REG));
1331
1332 if (sdma0 & sdma1 & sdma2 & sdma3 & SDMA0_STATUS_REG__IDLE_MASK)
1333 return 0;
1334 udelay(1);
1335 }
1336 return -ETIMEDOUT;
1337}
1338
1339static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)
1340{
1341 int i, r = 0;
1342 struct amdgpu_device *adev = ring->adev;
1343 u32 index = 0;
1344 u64 sdma_gfx_preempt;
1345
1346 amdgpu_sdma_get_index_from_ring(ring, &index);
1347 sdma_gfx_preempt =
1348 sdma_v5_2_get_reg_offset(adev, index, mmSDMA0_GFX_PREEMPT);
1349
1350 /* assert preemption condition */
1351 amdgpu_ring_set_preempt_cond_exec(ring, false);
1352
1353 /* emit the trailing fence */
1354 ring->trail_seq += 1;
1355 amdgpu_ring_alloc(ring, 10);
1356 sdma_v5_2_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1357 ring->trail_seq, 0);
1358 amdgpu_ring_commit(ring);
1359
1360 /* assert IB preemption */
1361 WREG32(sdma_gfx_preempt, 1);
1362
1363 /* poll the trailing fence */
1364 for (i = 0; i < adev->usec_timeout; i++) {
1365 if (ring->trail_seq ==
1366 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1367 break;
1368 udelay(1);
1369 }
1370
1371 if (i >= adev->usec_timeout) {
1372 r = -EINVAL;
1373 DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1374 }
1375
1376 /* deassert IB preemption */
1377 WREG32(sdma_gfx_preempt, 0);
1378
1379 /* deassert the preemption condition */
1380 amdgpu_ring_set_preempt_cond_exec(ring, true);
1381 return r;
1382}
1383
1384static int sdma_v5_2_set_trap_irq_state(struct amdgpu_device *adev,
1385 struct amdgpu_irq_src *source,
1386 unsigned type,
1387 enum amdgpu_interrupt_state state)
1388{
1389 u32 sdma_cntl;
1390 u32 reg_offset = sdma_v5_2_get_reg_offset(adev, type, mmSDMA0_CNTL);
1391
1392 if (!amdgpu_sriov_vf(adev)) {
1393 sdma_cntl = RREG32(reg_offset);
1394 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1395 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1396 WREG32(reg_offset, sdma_cntl);
1397 }
1398
1399 return 0;
1400}
1401
1402static int sdma_v5_2_process_trap_irq(struct amdgpu_device *adev,
1403 struct amdgpu_irq_src *source,
1404 struct amdgpu_iv_entry *entry)
1405{
1406 uint32_t mes_queue_id = entry->src_data[0];
1407
1408 DRM_DEBUG("IH: SDMA trap\n");
1409
1410 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
1411 struct amdgpu_mes_queue *queue;
1412
1413 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
1414
1415 spin_lock(&adev->mes.queue_id_lock);
1416 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
1417 if (queue) {
1418 DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
1419 amdgpu_fence_process(queue->ring);
1420 }
1421 spin_unlock(&adev->mes.queue_id_lock);
1422 return 0;
1423 }
1424
1425 switch (entry->client_id) {
1426 case SOC15_IH_CLIENTID_SDMA0:
1427 switch (entry->ring_id) {
1428 case 0:
1429 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1430 break;
1431 case 1:
1432 /* XXX compute */
1433 break;
1434 case 2:
1435 /* XXX compute */
1436 break;
1437 case 3:
1438 /* XXX page queue*/
1439 break;
1440 }
1441 break;
1442 case SOC15_IH_CLIENTID_SDMA1:
1443 switch (entry->ring_id) {
1444 case 0:
1445 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1446 break;
1447 case 1:
1448 /* XXX compute */
1449 break;
1450 case 2:
1451 /* XXX compute */
1452 break;
1453 case 3:
1454 /* XXX page queue*/
1455 break;
1456 }
1457 break;
1458 case SOC15_IH_CLIENTID_SDMA2:
1459 switch (entry->ring_id) {
1460 case 0:
1461 amdgpu_fence_process(&adev->sdma.instance[2].ring);
1462 break;
1463 case 1:
1464 /* XXX compute */
1465 break;
1466 case 2:
1467 /* XXX compute */
1468 break;
1469 case 3:
1470 /* XXX page queue*/
1471 break;
1472 }
1473 break;
1474 case SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid:
1475 switch (entry->ring_id) {
1476 case 0:
1477 amdgpu_fence_process(&adev->sdma.instance[3].ring);
1478 break;
1479 case 1:
1480 /* XXX compute */
1481 break;
1482 case 2:
1483 /* XXX compute */
1484 break;
1485 case 3:
1486 /* XXX page queue*/
1487 break;
1488 }
1489 break;
1490 }
1491 return 0;
1492}
1493
1494static int sdma_v5_2_process_illegal_inst_irq(struct amdgpu_device *adev,
1495 struct amdgpu_irq_src *source,
1496 struct amdgpu_iv_entry *entry)
1497{
1498 return 0;
1499}
1500
1501static bool sdma_v5_2_firmware_mgcg_support(struct amdgpu_device *adev,
1502 int i)
1503{
1504 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1505 case IP_VERSION(5, 2, 1):
1506 if (adev->sdma.instance[i].fw_version < 70)
1507 return false;
1508 break;
1509 case IP_VERSION(5, 2, 3):
1510 if (adev->sdma.instance[i].fw_version < 47)
1511 return false;
1512 break;
1513 case IP_VERSION(5, 2, 7):
1514 if (adev->sdma.instance[i].fw_version < 9)
1515 return false;
1516 break;
1517 default:
1518 return true;
1519 }
1520
1521 return true;
1522
1523}
1524
1525static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
1526 bool enable)
1527{
1528 uint32_t data, def;
1529 int i;
1530
1531 for (i = 0; i < adev->sdma.num_instances; i++) {
1532
1533 if (!sdma_v5_2_firmware_mgcg_support(adev, i))
1534 adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_MGCG;
1535
1536 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1537 /* Enable sdma clock gating */
1538 def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1539 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1540 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1541 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1542 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1543 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK |
1544 SDMA0_CLK_CTRL__SOFT_OVERRIDER_REG_MASK);
1545 if (def != data)
1546 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1547 } else {
1548 /* Disable sdma clock gating */
1549 def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1550 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1551 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1552 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1553 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1554 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK |
1555 SDMA0_CLK_CTRL__SOFT_OVERRIDER_REG_MASK);
1556 if (def != data)
1557 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1558 }
1559 }
1560}
1561
1562static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev,
1563 bool enable)
1564{
1565 uint32_t data, def;
1566 int i;
1567
1568 for (i = 0; i < adev->sdma.num_instances; i++) {
1569 if (adev->sdma.instance[i].fw_version < 70 &&
1570 amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
1571 IP_VERSION(5, 2, 1))
1572 adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_LS;
1573
1574 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1575 /* Enable sdma mem light sleep */
1576 def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1577 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1578 if (def != data)
1579 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1580
1581 } else {
1582 /* Disable sdma mem light sleep */
1583 def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1584 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1585 if (def != data)
1586 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1587
1588 }
1589 }
1590}
1591
1592static int sdma_v5_2_set_clockgating_state(void *handle,
1593 enum amd_clockgating_state state)
1594{
1595 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1596
1597 if (amdgpu_sriov_vf(adev))
1598 return 0;
1599
1600 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1601 case IP_VERSION(5, 2, 0):
1602 case IP_VERSION(5, 2, 2):
1603 case IP_VERSION(5, 2, 1):
1604 case IP_VERSION(5, 2, 4):
1605 case IP_VERSION(5, 2, 5):
1606 case IP_VERSION(5, 2, 6):
1607 case IP_VERSION(5, 2, 3):
1608 case IP_VERSION(5, 2, 7):
1609 sdma_v5_2_update_medium_grain_clock_gating(adev,
1610 state == AMD_CG_STATE_GATE);
1611 sdma_v5_2_update_medium_grain_light_sleep(adev,
1612 state == AMD_CG_STATE_GATE);
1613 break;
1614 default:
1615 break;
1616 }
1617
1618 return 0;
1619}
1620
1621static int sdma_v5_2_set_powergating_state(void *handle,
1622 enum amd_powergating_state state)
1623{
1624 return 0;
1625}
1626
1627static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
1628{
1629 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1630 int data;
1631
1632 if (amdgpu_sriov_vf(adev))
1633 *flags = 0;
1634
1635 /* AMD_CG_SUPPORT_SDMA_MGCG */
1636 data = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL));
1637 if (!(data & SDMA0_CLK_CTRL__CGCG_EN_OVERRIDE_MASK))
1638 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1639
1640 /* AMD_CG_SUPPORT_SDMA_LS */
1641 data = RREG32_KIQ(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL));
1642 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1643 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1644}
1645
1646static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
1647{
1648 struct amdgpu_device *adev = ring->adev;
1649
1650 /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
1651 * disallow GFXOFF in some cases leading to
1652 * hangs in SDMA. Disallow GFXOFF while SDMA is active.
1653 * We can probably just limit this to 5.2.3,
1654 * but it shouldn't hurt for other parts since
1655 * this GFXOFF will be disallowed anyway when SDMA is
1656 * active, this just makes it explicit.
1657 */
1658 amdgpu_gfx_off_ctrl(adev, false);
1659}
1660
1661static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
1662{
1663 struct amdgpu_device *adev = ring->adev;
1664
1665 /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
1666 * disallow GFXOFF in some cases leading to
1667 * hangs in SDMA. Allow GFXOFF when SDMA is complete.
1668 */
1669 amdgpu_gfx_off_ctrl(adev, true);
1670}
1671
1672const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
1673 .name = "sdma_v5_2",
1674 .early_init = sdma_v5_2_early_init,
1675 .late_init = NULL,
1676 .sw_init = sdma_v5_2_sw_init,
1677 .sw_fini = sdma_v5_2_sw_fini,
1678 .hw_init = sdma_v5_2_hw_init,
1679 .hw_fini = sdma_v5_2_hw_fini,
1680 .suspend = sdma_v5_2_suspend,
1681 .resume = sdma_v5_2_resume,
1682 .is_idle = sdma_v5_2_is_idle,
1683 .wait_for_idle = sdma_v5_2_wait_for_idle,
1684 .soft_reset = sdma_v5_2_soft_reset,
1685 .set_clockgating_state = sdma_v5_2_set_clockgating_state,
1686 .set_powergating_state = sdma_v5_2_set_powergating_state,
1687 .get_clockgating_state = sdma_v5_2_get_clockgating_state,
1688};
1689
1690static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
1691 .type = AMDGPU_RING_TYPE_SDMA,
1692 .align_mask = 0xf,
1693 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1694 .support_64bit_ptrs = true,
1695 .secure_submission_supported = true,
1696 .get_rptr = sdma_v5_2_ring_get_rptr,
1697 .get_wptr = sdma_v5_2_ring_get_wptr,
1698 .set_wptr = sdma_v5_2_ring_set_wptr,
1699 .emit_frame_size =
1700 5 + /* sdma_v5_2_ring_init_cond_exec */
1701 6 + /* sdma_v5_2_ring_emit_hdp_flush */
1702 3 + /* hdp_invalidate */
1703 6 + /* sdma_v5_2_ring_emit_pipeline_sync */
1704 /* sdma_v5_2_ring_emit_vm_flush */
1705 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1706 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1707 10 + 10 + 10, /* sdma_v5_2_ring_emit_fence x3 for user fence, vm fence */
1708 .emit_ib_size = 7 + 6, /* sdma_v5_2_ring_emit_ib */
1709 .emit_ib = sdma_v5_2_ring_emit_ib,
1710 .emit_mem_sync = sdma_v5_2_ring_emit_mem_sync,
1711 .emit_fence = sdma_v5_2_ring_emit_fence,
1712 .emit_pipeline_sync = sdma_v5_2_ring_emit_pipeline_sync,
1713 .emit_vm_flush = sdma_v5_2_ring_emit_vm_flush,
1714 .emit_hdp_flush = sdma_v5_2_ring_emit_hdp_flush,
1715 .test_ring = sdma_v5_2_ring_test_ring,
1716 .test_ib = sdma_v5_2_ring_test_ib,
1717 .insert_nop = sdma_v5_2_ring_insert_nop,
1718 .pad_ib = sdma_v5_2_ring_pad_ib,
1719 .begin_use = sdma_v5_2_ring_begin_use,
1720 .end_use = sdma_v5_2_ring_end_use,
1721 .emit_wreg = sdma_v5_2_ring_emit_wreg,
1722 .emit_reg_wait = sdma_v5_2_ring_emit_reg_wait,
1723 .emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
1724 .init_cond_exec = sdma_v5_2_ring_init_cond_exec,
1725 .patch_cond_exec = sdma_v5_2_ring_patch_cond_exec,
1726 .preempt_ib = sdma_v5_2_ring_preempt_ib,
1727};
1728
1729static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev)
1730{
1731 int i;
1732
1733 for (i = 0; i < adev->sdma.num_instances; i++) {
1734 adev->sdma.instance[i].ring.funcs = &sdma_v5_2_ring_funcs;
1735 adev->sdma.instance[i].ring.me = i;
1736 }
1737}
1738
1739static const struct amdgpu_irq_src_funcs sdma_v5_2_trap_irq_funcs = {
1740 .set = sdma_v5_2_set_trap_irq_state,
1741 .process = sdma_v5_2_process_trap_irq,
1742};
1743
1744static const struct amdgpu_irq_src_funcs sdma_v5_2_illegal_inst_irq_funcs = {
1745 .process = sdma_v5_2_process_illegal_inst_irq,
1746};
1747
1748static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev)
1749{
1750 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
1751 adev->sdma.num_instances;
1752 adev->sdma.trap_irq.funcs = &sdma_v5_2_trap_irq_funcs;
1753 adev->sdma.illegal_inst_irq.funcs = &sdma_v5_2_illegal_inst_irq_funcs;
1754}
1755
1756/**
1757 * sdma_v5_2_emit_copy_buffer - copy buffer using the sDMA engine
1758 *
1759 * @ib: indirect buffer to copy to
1760 * @src_offset: src GPU address
1761 * @dst_offset: dst GPU address
1762 * @byte_count: number of bytes to xfer
1763 * @tmz: if a secure copy should be used
1764 *
1765 * Copy GPU buffers using the DMA engine.
1766 * Used by the amdgpu ttm implementation to move pages if
1767 * registered as the asic copy callback.
1768 */
1769static void sdma_v5_2_emit_copy_buffer(struct amdgpu_ib *ib,
1770 uint64_t src_offset,
1771 uint64_t dst_offset,
1772 uint32_t byte_count,
1773 bool tmz)
1774{
1775 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1776 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
1777 SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
1778 ib->ptr[ib->length_dw++] = byte_count - 1;
1779 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1780 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1781 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1782 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1783 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1784}
1785
1786/**
1787 * sdma_v5_2_emit_fill_buffer - fill buffer using the sDMA engine
1788 *
1789 * @ib: indirect buffer to fill
1790 * @src_data: value to write to buffer
1791 * @dst_offset: dst GPU address
1792 * @byte_count: number of bytes to xfer
1793 *
1794 * Fill GPU buffers using the DMA engine.
1795 */
1796static void sdma_v5_2_emit_fill_buffer(struct amdgpu_ib *ib,
1797 uint32_t src_data,
1798 uint64_t dst_offset,
1799 uint32_t byte_count)
1800{
1801 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1802 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1803 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1804 ib->ptr[ib->length_dw++] = src_data;
1805 ib->ptr[ib->length_dw++] = byte_count - 1;
1806}
1807
1808static const struct amdgpu_buffer_funcs sdma_v5_2_buffer_funcs = {
1809 .copy_max_bytes = 0x400000,
1810 .copy_num_dw = 7,
1811 .emit_copy_buffer = sdma_v5_2_emit_copy_buffer,
1812
1813 .fill_max_bytes = 0x400000,
1814 .fill_num_dw = 5,
1815 .emit_fill_buffer = sdma_v5_2_emit_fill_buffer,
1816};
1817
1818static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev)
1819{
1820 if (adev->mman.buffer_funcs == NULL) {
1821 adev->mman.buffer_funcs = &sdma_v5_2_buffer_funcs;
1822 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1823 }
1824}
1825
1826static const struct amdgpu_vm_pte_funcs sdma_v5_2_vm_pte_funcs = {
1827 .copy_pte_num_dw = 7,
1828 .copy_pte = sdma_v5_2_vm_copy_pte,
1829 .write_pte = sdma_v5_2_vm_write_pte,
1830 .set_pte_pde = sdma_v5_2_vm_set_pte_pde,
1831};
1832
1833static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev)
1834{
1835 unsigned i;
1836
1837 if (adev->vm_manager.vm_pte_funcs == NULL) {
1838 adev->vm_manager.vm_pte_funcs = &sdma_v5_2_vm_pte_funcs;
1839 for (i = 0; i < adev->sdma.num_instances; i++) {
1840 adev->vm_manager.vm_pte_scheds[i] =
1841 &adev->sdma.instance[i].ring.sched;
1842 }
1843 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1844 }
1845}
1846
1847const struct amdgpu_ip_block_version sdma_v5_2_ip_block = {
1848 .type = AMD_IP_BLOCK_TYPE_SDMA,
1849 .major = 5,
1850 .minor = 2,
1851 .rev = 0,
1852 .funcs = &sdma_v5_2_ip_funcs,
1853};
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/firmware.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ucode.h"
31#include "amdgpu_trace.h"
32
33#include "gc/gc_10_3_0_offset.h"
34#include "gc/gc_10_3_0_sh_mask.h"
35#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
36#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
37#include "ivsrcid/sdma2/irqsrcs_sdma2_5_0.h"
38#include "ivsrcid/sdma3/irqsrcs_sdma3_5_0.h"
39
40#include "soc15_common.h"
41#include "soc15.h"
42#include "navi10_sdma_pkt_open.h"
43#include "nbio_v2_3.h"
44#include "sdma_common.h"
45#include "sdma_v5_2.h"
46
47MODULE_FIRMWARE("amdgpu/sienna_cichlid_sdma.bin");
48MODULE_FIRMWARE("amdgpu/navy_flounder_sdma.bin");
49MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_sdma.bin");
50MODULE_FIRMWARE("amdgpu/beige_goby_sdma.bin");
51
52MODULE_FIRMWARE("amdgpu/vangogh_sdma.bin");
53MODULE_FIRMWARE("amdgpu/yellow_carp_sdma.bin");
54MODULE_FIRMWARE("amdgpu/sdma_5_2_6.bin");
55MODULE_FIRMWARE("amdgpu/sdma_5_2_7.bin");
56
57#define SDMA1_REG_OFFSET 0x600
58#define SDMA3_REG_OFFSET 0x400
59#define SDMA0_HYP_DEC_REG_START 0x5880
60#define SDMA0_HYP_DEC_REG_END 0x5893
61#define SDMA1_HYP_DEC_REG_OFFSET 0x20
62
63static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_2[] = {
64 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG),
65 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG),
66 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG),
67 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG),
68 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM),
69 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI),
70 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH),
71 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS),
72 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS),
73 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0),
74 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1),
75 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0),
76 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1),
77 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL),
78 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR),
79 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI),
80 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR),
81 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI),
82 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET),
83 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO),
84 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI),
85 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL),
86 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR),
87 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN),
88 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG),
89 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL),
90 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR),
91 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI),
92 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR),
93 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI),
94 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET),
95 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO),
96 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI),
97 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG),
98 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL),
99 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR),
100 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI),
101 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR),
102 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI),
103 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET),
104 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO),
105 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI),
106 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG),
107 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_INT_STATUS),
108 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL),
109 SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2)
110};
111
112static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
113static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
114static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
115static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev);
116
117static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
118{
119 u32 base;
120
121 if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
122 internal_offset <= SDMA0_HYP_DEC_REG_END) {
123 base = adev->reg_offset[GC_HWIP][0][1];
124 if (instance != 0)
125 internal_offset += SDMA1_HYP_DEC_REG_OFFSET * instance;
126 } else {
127 if (instance < 2) {
128 base = adev->reg_offset[GC_HWIP][0][0];
129 if (instance == 1)
130 internal_offset += SDMA1_REG_OFFSET;
131 } else {
132 base = adev->reg_offset[GC_HWIP][0][2];
133 if (instance == 3)
134 internal_offset += SDMA3_REG_OFFSET;
135 }
136 }
137
138 return base + internal_offset;
139}
140
141static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring,
142 uint64_t addr)
143{
144 unsigned ret;
145
146 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
147 amdgpu_ring_write(ring, lower_32_bits(addr));
148 amdgpu_ring_write(ring, upper_32_bits(addr));
149 amdgpu_ring_write(ring, 1);
150 /* this is the offset we need patch later */
151 ret = ring->wptr & ring->buf_mask;
152 /* insert dummy here and patch it later */
153 amdgpu_ring_write(ring, 0);
154
155 return ret;
156}
157
158/**
159 * sdma_v5_2_ring_get_rptr - get the current read pointer
160 *
161 * @ring: amdgpu ring pointer
162 *
163 * Get the current rptr from the hardware (NAVI10+).
164 */
165static uint64_t sdma_v5_2_ring_get_rptr(struct amdgpu_ring *ring)
166{
167 u64 *rptr;
168
169 /* XXX check if swapping is necessary on BE */
170 rptr = (u64 *)ring->rptr_cpu_addr;
171
172 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
173 return ((*rptr) >> 2);
174}
175
176/**
177 * sdma_v5_2_ring_get_wptr - get the current write pointer
178 *
179 * @ring: amdgpu ring pointer
180 *
181 * Get the current wptr from the hardware (NAVI10+).
182 */
183static uint64_t sdma_v5_2_ring_get_wptr(struct amdgpu_ring *ring)
184{
185 struct amdgpu_device *adev = ring->adev;
186 u64 wptr;
187
188 if (ring->use_doorbell) {
189 /* XXX check if swapping is necessary on BE */
190 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
191 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
192 } else {
193 wptr = RREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
194 wptr = wptr << 32;
195 wptr |= RREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
196 DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
197 }
198
199 return wptr >> 2;
200}
201
202/**
203 * sdma_v5_2_ring_set_wptr - commit the write pointer
204 *
205 * @ring: amdgpu ring pointer
206 *
207 * Write the wptr back to the hardware (NAVI10+).
208 */
209static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring)
210{
211 struct amdgpu_device *adev = ring->adev;
212
213 DRM_DEBUG("Setting write pointer\n");
214 if (ring->use_doorbell) {
215 DRM_DEBUG("Using doorbell -- "
216 "wptr_offs == 0x%08x "
217 "lower_32_bits(ring->wptr << 2) == 0x%08x "
218 "upper_32_bits(ring->wptr << 2) == 0x%08x\n",
219 ring->wptr_offs,
220 lower_32_bits(ring->wptr << 2),
221 upper_32_bits(ring->wptr << 2));
222 /* XXX check if swapping is necessary on BE */
223 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
224 ring->wptr << 2);
225 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
226 ring->doorbell_index, ring->wptr << 2);
227 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
228 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(5, 2, 1)) {
229 /* SDMA seems to miss doorbells sometimes when powergating kicks in.
230 * Updating the wptr directly will wake it. This is only safe because
231 * we disallow gfxoff in begin_use() and then allow it again in end_use().
232 */
233 WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
234 lower_32_bits(ring->wptr << 2));
235 WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
236 upper_32_bits(ring->wptr << 2));
237 }
238 } else {
239 DRM_DEBUG("Not using doorbell -- "
240 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
241 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
242 ring->me,
243 lower_32_bits(ring->wptr << 2),
244 ring->me,
245 upper_32_bits(ring->wptr << 2));
246 WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
247 lower_32_bits(ring->wptr << 2));
248 WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
249 upper_32_bits(ring->wptr << 2));
250 }
251}
252
253static void sdma_v5_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
254{
255 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
256 int i;
257
258 for (i = 0; i < count; i++)
259 if (sdma && sdma->burst_nop && (i == 0))
260 amdgpu_ring_write(ring, ring->funcs->nop |
261 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
262 else
263 amdgpu_ring_write(ring, ring->funcs->nop);
264}
265
266/**
267 * sdma_v5_2_ring_emit_ib - Schedule an IB on the DMA engine
268 *
269 * @ring: amdgpu ring pointer
270 * @job: job to retrieve vmid from
271 * @ib: IB object to schedule
272 * @flags: unused
273 *
274 * Schedule an IB in the DMA ring.
275 */
276static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring,
277 struct amdgpu_job *job,
278 struct amdgpu_ib *ib,
279 uint32_t flags)
280{
281 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
282 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
283
284 /* An IB packet must end on a 8 DW boundary--the next dword
285 * must be on a 8-dword boundary. Our IB packet below is 6
286 * dwords long, thus add x number of NOPs, such that, in
287 * modular arithmetic,
288 * wptr + 6 + x = 8k, k >= 0, which in C is,
289 * (wptr + 6 + x) % 8 = 0.
290 * The expression below, is a solution of x.
291 */
292 sdma_v5_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
293
294 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
295 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
296 /* base must be 32 byte aligned */
297 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
298 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
299 amdgpu_ring_write(ring, ib->length_dw);
300 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
301 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
302}
303
304/**
305 * sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse
306 *
307 * @ring: amdgpu ring pointer
308 *
309 * flush the IB by graphics cache rinse.
310 */
311static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring)
312{
313 uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB |
314 SDMA_GCR_GLM_INV | SDMA_GCR_GL1_INV |
315 SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
316 SDMA_GCR_GLI_INV(1);
317
318 /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
319 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
320 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
321 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
322 SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
323 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
324 SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
325 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
326 SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
327}
328
329/**
330 * sdma_v5_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
331 *
332 * @ring: amdgpu ring pointer
333 *
334 * Emit an hdp flush packet on the requested DMA ring.
335 */
336static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
337{
338 struct amdgpu_device *adev = ring->adev;
339 u32 ref_and_mask = 0;
340 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
341
342 if (ring->me > 1) {
343 amdgpu_asic_flush_hdp(adev, ring);
344 } else {
345 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
346
347 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
348 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
349 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
350 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
351 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
352 amdgpu_ring_write(ring, ref_and_mask); /* reference */
353 amdgpu_ring_write(ring, ref_and_mask); /* mask */
354 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
355 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
356 }
357}
358
359/**
360 * sdma_v5_2_ring_emit_fence - emit a fence on the DMA ring
361 *
362 * @ring: amdgpu ring pointer
363 * @addr: address
364 * @seq: sequence number
365 * @flags: fence related flags
366 *
367 * Add a DMA fence packet to the ring to write
368 * the fence seq number and DMA trap packet to generate
369 * an interrupt if needed.
370 */
371static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
372 unsigned flags)
373{
374 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
375 /* write the fence */
376 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
377 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
378 /* zero in first two bits */
379 BUG_ON(addr & 0x3);
380 amdgpu_ring_write(ring, lower_32_bits(addr));
381 amdgpu_ring_write(ring, upper_32_bits(addr));
382 amdgpu_ring_write(ring, lower_32_bits(seq));
383
384 /* optionally write high bits as well */
385 if (write64bit) {
386 addr += 4;
387 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
388 SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
389 /* zero in first two bits */
390 BUG_ON(addr & 0x3);
391 amdgpu_ring_write(ring, lower_32_bits(addr));
392 amdgpu_ring_write(ring, upper_32_bits(addr));
393 amdgpu_ring_write(ring, upper_32_bits(seq));
394 }
395
396 if ((flags & AMDGPU_FENCE_FLAG_INT)) {
397 uint32_t ctx = ring->is_mes_queue ?
398 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
399 /* generate an interrupt */
400 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
401 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
402 }
403}
404
405
406/**
407 * sdma_v5_2_gfx_stop - stop the gfx async dma engines
408 *
409 * @adev: amdgpu_device pointer
410 *
411 * Stop the gfx async dma ring buffers.
412 */
413static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
414{
415 u32 rb_cntl, ib_cntl;
416 int i;
417
418 for (i = 0; i < adev->sdma.num_instances; i++) {
419 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
420 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
421 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
422 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
423 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
424 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
425 }
426}
427
428/**
429 * sdma_v5_2_rlc_stop - stop the compute async dma engines
430 *
431 * @adev: amdgpu_device pointer
432 *
433 * Stop the compute async dma queues.
434 */
435static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
436{
437 /* XXX todo */
438}
439
440/**
441 * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
442 *
443 * @adev: amdgpu_device pointer
444 * @enable: enable/disable the DMA MEs context switch.
445 *
446 * Halt or unhalt the async dma engines context switch.
447 */
448static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
449{
450 u32 f32_cntl, phase_quantum = 0;
451 int i;
452
453 if (amdgpu_sdma_phase_quantum) {
454 unsigned value = amdgpu_sdma_phase_quantum;
455 unsigned unit = 0;
456
457 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
458 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
459 value = (value + 1) >> 1;
460 unit++;
461 }
462 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
463 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
464 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
465 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
466 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
467 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
468 WARN_ONCE(1,
469 "clamping sdma_phase_quantum to %uK clock cycles\n",
470 value << unit);
471 }
472 phase_quantum =
473 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
474 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
475 }
476
477 for (i = 0; i < adev->sdma.num_instances; i++) {
478 if (enable && amdgpu_sdma_phase_quantum) {
479 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
480 phase_quantum);
481 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
482 phase_quantum);
483 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
484 phase_quantum);
485 }
486
487 if (!amdgpu_sriov_vf(adev)) {
488 f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
489 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
490 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
491 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
492 }
493 }
494
495}
496
497/**
498 * sdma_v5_2_enable - stop the async dma engines
499 *
500 * @adev: amdgpu_device pointer
501 * @enable: enable/disable the DMA MEs.
502 *
503 * Halt or unhalt the async dma engines.
504 */
505static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
506{
507 u32 f32_cntl;
508 int i;
509
510 if (!enable) {
511 sdma_v5_2_gfx_stop(adev);
512 sdma_v5_2_rlc_stop(adev);
513 }
514
515 if (!amdgpu_sriov_vf(adev)) {
516 for (i = 0; i < adev->sdma.num_instances; i++) {
517 f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
518 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
519 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
520 }
521 }
522}
523
524/**
525 * sdma_v5_2_gfx_resume_instance - start/restart a certain sdma engine
526 *
527 * @adev: amdgpu_device pointer
528 * @i: instance
529 * @restore: used to restore wptr when restart
530 *
531 * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
532 * Return 0 for success.
533 */
534
535static int sdma_v5_2_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
536{
537 struct amdgpu_ring *ring;
538 u32 rb_cntl, ib_cntl;
539 u32 rb_bufsz;
540 u32 doorbell;
541 u32 doorbell_offset;
542 u32 temp;
543 u32 wptr_poll_cntl;
544 u64 wptr_gpu_addr;
545
546 ring = &adev->sdma.instance[i].ring;
547
548 if (!amdgpu_sriov_vf(adev))
549 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
550
551 /* Set ring buffer size in dwords */
552 rb_bufsz = order_base_2(ring->ring_size / 4);
553 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
554 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
555#ifdef __BIG_ENDIAN
556 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
557 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
558 RPTR_WRITEBACK_SWAP_ENABLE, 1);
559#endif
560 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
561
562 /* Initialize the ring buffer's read and write pointers */
563 if (restore) {
564 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), lower_32_bits(ring->wptr << 2));
565 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
566 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
567 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
568 } else {
569 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
570 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
571 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
572 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
573 }
574
575 /* setup the wptr shadow polling */
576 wptr_gpu_addr = ring->wptr_gpu_addr;
577 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
578 lower_32_bits(wptr_gpu_addr));
579 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
580 upper_32_bits(wptr_gpu_addr));
581 wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i,
582 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
583 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
584 SDMA0_GFX_RB_WPTR_POLL_CNTL,
585 F32_POLL_ENABLE, 1);
586 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
587 wptr_poll_cntl);
588
589 /* set the wb address whether it's enabled or not */
590 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
591 upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
592 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
593 lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
594
595 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
596
597 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
598 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
599
600 if (!restore)
601 ring->wptr = 0;
602
603 /* before programing wptr to a less value, need set minor_ptr_update first */
604 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
605
606 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
607 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
608 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
609 }
610
611 doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
612 doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
613
614 if (ring->use_doorbell) {
615 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
616 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
617 OFFSET, ring->doorbell_index);
618 } else {
619 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
620 }
621 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
622 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
623
624 adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
625 ring->doorbell_index,
626 adev->doorbell_index.sdma_doorbell_range);
627
628 if (amdgpu_sriov_vf(adev))
629 sdma_v5_2_ring_set_wptr(ring);
630
631 /* set minor_ptr_update to 0 after wptr programed */
632
633 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
634
635 /* SRIOV VF has no control of any of registers below */
636 if (!amdgpu_sriov_vf(adev)) {
637 /* set utc l1 enable flag always to 1 */
638 temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
639 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
640
641 /* enable MCBP */
642 temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
643 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
644
645 /* Set up RESP_MODE to non-copy addresses */
646 temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
647 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
648 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
649 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
650
651 /* program default cache read and write policy */
652 temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
653 /* clean read policy and write policy bits */
654 temp &= 0xFF0FFF;
655 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
656 (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
657 SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
658 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
659
660 /* unhalt engine */
661 temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
662 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
663 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
664 }
665
666 /* enable DMA RB */
667 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
668 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
669
670 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
671 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
672#ifdef __BIG_ENDIAN
673 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
674#endif
675 /* enable DMA IBs */
676 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
677
678 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
679 sdma_v5_2_ctx_switch_enable(adev, true);
680 sdma_v5_2_enable(adev, true);
681 }
682
683 return amdgpu_ring_test_helper(ring);
684}
685
686/**
687 * sdma_v5_2_gfx_resume - setup and start the async dma engines
688 *
689 * @adev: amdgpu_device pointer
690 *
691 * Set up the gfx DMA ring buffers and enable them.
692 * Returns 0 for success, error for failure.
693 */
694static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
695{
696 int i, r;
697
698 for (i = 0; i < adev->sdma.num_instances; i++) {
699 r = sdma_v5_2_gfx_resume_instance(adev, i, false);
700 if (r)
701 return r;
702 }
703
704 return 0;
705}
706
707/**
708 * sdma_v5_2_rlc_resume - setup and start the async dma engines
709 *
710 * @adev: amdgpu_device pointer
711 *
712 * Set up the compute DMA queues and enable them.
713 * Returns 0 for success, error for failure.
714 */
715static int sdma_v5_2_rlc_resume(struct amdgpu_device *adev)
716{
717 return 0;
718}
719
720/**
721 * sdma_v5_2_load_microcode - load the sDMA ME ucode
722 *
723 * @adev: amdgpu_device pointer
724 *
725 * Loads the sDMA0/1/2/3 ucode.
726 * Returns 0 for success, -EINVAL if the ucode is not available.
727 */
728static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
729{
730 const struct sdma_firmware_header_v1_0 *hdr;
731 const __le32 *fw_data;
732 u32 fw_size;
733 int i, j;
734
735 /* halt the MEs */
736 sdma_v5_2_enable(adev, false);
737
738 for (i = 0; i < adev->sdma.num_instances; i++) {
739 if (!adev->sdma.instance[i].fw)
740 return -EINVAL;
741
742 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
743 amdgpu_ucode_print_sdma_hdr(&hdr->header);
744 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
745
746 fw_data = (const __le32 *)
747 (adev->sdma.instance[i].fw->data +
748 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
749
750 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
751
752 for (j = 0; j < fw_size; j++) {
753 if (amdgpu_emu_mode == 1 && j % 500 == 0)
754 msleep(1);
755 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
756 }
757
758 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
759 }
760
761 return 0;
762}
763
764static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block)
765{
766 struct amdgpu_device *adev = ip_block->adev;
767 u32 grbm_soft_reset;
768 u32 tmp;
769 int i;
770
771 for (i = 0; i < adev->sdma.num_instances; i++) {
772 grbm_soft_reset = REG_SET_FIELD(0,
773 GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
774 1);
775 grbm_soft_reset <<= i;
776
777 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
778 tmp |= grbm_soft_reset;
779 DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
780 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
781 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
782
783 udelay(50);
784
785 tmp &= ~grbm_soft_reset;
786 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
787 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
788
789 udelay(50);
790 }
791
792 return 0;
793}
794
795/**
796 * sdma_v5_2_start - setup and start the async dma engines
797 *
798 * @adev: amdgpu_device pointer
799 *
800 * Set up the DMA engines and enable them.
801 * Returns 0 for success, error for failure.
802 */
803static int sdma_v5_2_start(struct amdgpu_device *adev)
804{
805 int r = 0;
806 struct amdgpu_ip_block *ip_block;
807
808 if (amdgpu_sriov_vf(adev)) {
809 sdma_v5_2_ctx_switch_enable(adev, false);
810 sdma_v5_2_enable(adev, false);
811
812 /* set RB registers */
813 r = sdma_v5_2_gfx_resume(adev);
814 return r;
815 }
816
817 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
818 r = sdma_v5_2_load_microcode(adev);
819 if (r)
820 return r;
821
822 /* The value of mmSDMA_F32_CNTL is invalid the moment after loading fw */
823 if (amdgpu_emu_mode == 1)
824 msleep(1000);
825 }
826
827 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SDMA);
828 if (!ip_block)
829 return -EINVAL;
830
831 sdma_v5_2_soft_reset(ip_block);
832 /* unhalt the MEs */
833 sdma_v5_2_enable(adev, true);
834 /* enable sdma ring preemption */
835 sdma_v5_2_ctx_switch_enable(adev, true);
836
837 /* start the gfx rings and rlc compute queues */
838 r = sdma_v5_2_gfx_resume(adev);
839 if (r)
840 return r;
841 r = sdma_v5_2_rlc_resume(adev);
842
843 return r;
844}
845
846static int sdma_v5_2_mqd_init(struct amdgpu_device *adev, void *mqd,
847 struct amdgpu_mqd_prop *prop)
848{
849 struct v10_sdma_mqd *m = mqd;
850 uint64_t wb_gpu_addr;
851
852 m->sdmax_rlcx_rb_cntl =
853 order_base_2(prop->queue_size / 4) << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
854 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
855 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
856 1 << SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT;
857
858 m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
859 m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
860
861 m->sdmax_rlcx_rb_wptr_poll_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, 0,
862 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
863
864 wb_gpu_addr = prop->wptr_gpu_addr;
865 m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr);
866 m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr);
867
868 wb_gpu_addr = prop->rptr_gpu_addr;
869 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr);
870 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr);
871
872 m->sdmax_rlcx_ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, 0,
873 mmSDMA0_GFX_IB_CNTL));
874
875 m->sdmax_rlcx_doorbell_offset =
876 prop->doorbell_index << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
877
878 m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_RLC0_DOORBELL, ENABLE, 1);
879
880 return 0;
881}
882
883static void sdma_v5_2_set_mqd_funcs(struct amdgpu_device *adev)
884{
885 adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v10_sdma_mqd);
886 adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v5_2_mqd_init;
887}
888
889/**
890 * sdma_v5_2_ring_test_ring - simple async dma engine test
891 *
892 * @ring: amdgpu_ring structure holding ring information
893 *
894 * Test the DMA engine by writing using it to write an
895 * value to memory.
896 * Returns 0 for success, error for failure.
897 */
898static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
899{
900 struct amdgpu_device *adev = ring->adev;
901 unsigned i;
902 unsigned index;
903 int r;
904 u32 tmp;
905 u64 gpu_addr;
906 volatile uint32_t *cpu_ptr = NULL;
907
908 tmp = 0xCAFEDEAD;
909
910 if (ring->is_mes_queue) {
911 uint32_t offset = 0;
912 offset = amdgpu_mes_ctx_get_offs(ring,
913 AMDGPU_MES_CTX_PADDING_OFFS);
914 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
915 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
916 *cpu_ptr = tmp;
917 } else {
918 r = amdgpu_device_wb_get(adev, &index);
919 if (r) {
920 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
921 return r;
922 }
923
924 gpu_addr = adev->wb.gpu_addr + (index * 4);
925 adev->wb.wb[index] = cpu_to_le32(tmp);
926 }
927
928 r = amdgpu_ring_alloc(ring, 20);
929 if (r) {
930 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
931 if (!ring->is_mes_queue)
932 amdgpu_device_wb_free(adev, index);
933 return r;
934 }
935
936 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
937 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
938 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
939 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
940 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
941 amdgpu_ring_write(ring, 0xDEADBEEF);
942 amdgpu_ring_commit(ring);
943
944 for (i = 0; i < adev->usec_timeout; i++) {
945 if (ring->is_mes_queue)
946 tmp = le32_to_cpu(*cpu_ptr);
947 else
948 tmp = le32_to_cpu(adev->wb.wb[index]);
949 if (tmp == 0xDEADBEEF)
950 break;
951 if (amdgpu_emu_mode == 1)
952 msleep(1);
953 else
954 udelay(1);
955 }
956
957 if (i >= adev->usec_timeout)
958 r = -ETIMEDOUT;
959
960 if (!ring->is_mes_queue)
961 amdgpu_device_wb_free(adev, index);
962
963 return r;
964}
965
966/**
967 * sdma_v5_2_ring_test_ib - test an IB on the DMA engine
968 *
969 * @ring: amdgpu_ring structure holding ring information
970 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
971 *
972 * Test a simple IB in the DMA ring.
973 * Returns 0 on success, error on failure.
974 */
975static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
976{
977 struct amdgpu_device *adev = ring->adev;
978 struct amdgpu_ib ib;
979 struct dma_fence *f = NULL;
980 unsigned index;
981 long r;
982 u32 tmp = 0;
983 u64 gpu_addr;
984 volatile uint32_t *cpu_ptr = NULL;
985
986 tmp = 0xCAFEDEAD;
987 memset(&ib, 0, sizeof(ib));
988
989 if (ring->is_mes_queue) {
990 uint32_t offset = 0;
991 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
992 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
993 ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
994
995 offset = amdgpu_mes_ctx_get_offs(ring,
996 AMDGPU_MES_CTX_PADDING_OFFS);
997 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
998 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
999 *cpu_ptr = tmp;
1000 } else {
1001 r = amdgpu_device_wb_get(adev, &index);
1002 if (r) {
1003 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
1004 return r;
1005 }
1006
1007 gpu_addr = adev->wb.gpu_addr + (index * 4);
1008 adev->wb.wb[index] = cpu_to_le32(tmp);
1009
1010 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
1011 if (r) {
1012 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
1013 goto err0;
1014 }
1015 }
1016
1017 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1018 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1019 ib.ptr[1] = lower_32_bits(gpu_addr);
1020 ib.ptr[2] = upper_32_bits(gpu_addr);
1021 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1022 ib.ptr[4] = 0xDEADBEEF;
1023 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1024 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1025 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1026 ib.length_dw = 8;
1027
1028 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1029 if (r)
1030 goto err1;
1031
1032 r = dma_fence_wait_timeout(f, false, timeout);
1033 if (r == 0) {
1034 DRM_ERROR("amdgpu: IB test timed out\n");
1035 r = -ETIMEDOUT;
1036 goto err1;
1037 } else if (r < 0) {
1038 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1039 goto err1;
1040 }
1041
1042 if (ring->is_mes_queue)
1043 tmp = le32_to_cpu(*cpu_ptr);
1044 else
1045 tmp = le32_to_cpu(adev->wb.wb[index]);
1046
1047 if (tmp == 0xDEADBEEF)
1048 r = 0;
1049 else
1050 r = -EINVAL;
1051
1052err1:
1053 amdgpu_ib_free(adev, &ib, NULL);
1054 dma_fence_put(f);
1055err0:
1056 if (!ring->is_mes_queue)
1057 amdgpu_device_wb_free(adev, index);
1058 return r;
1059}
1060
1061
1062/**
1063 * sdma_v5_2_vm_copy_pte - update PTEs by copying them from the GART
1064 *
1065 * @ib: indirect buffer to fill with commands
1066 * @pe: addr of the page entry
1067 * @src: src addr to copy from
1068 * @count: number of page entries to update
1069 *
1070 * Update PTEs by copying them from the GART using sDMA.
1071 */
1072static void sdma_v5_2_vm_copy_pte(struct amdgpu_ib *ib,
1073 uint64_t pe, uint64_t src,
1074 unsigned count)
1075{
1076 unsigned bytes = count * 8;
1077
1078 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1079 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1080 ib->ptr[ib->length_dw++] = bytes - 1;
1081 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1082 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1083 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1084 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1085 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1086
1087}
1088
1089/**
1090 * sdma_v5_2_vm_write_pte - update PTEs by writing them manually
1091 *
1092 * @ib: indirect buffer to fill with commands
1093 * @pe: addr of the page entry
1094 * @value: dst addr to write into pe
1095 * @count: number of page entries to update
1096 * @incr: increase next addr by incr bytes
1097 *
1098 * Update PTEs by writing them manually using sDMA.
1099 */
1100static void sdma_v5_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1101 uint64_t value, unsigned count,
1102 uint32_t incr)
1103{
1104 unsigned ndw = count * 2;
1105
1106 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1107 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1108 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1109 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1110 ib->ptr[ib->length_dw++] = ndw - 1;
1111 for (; ndw > 0; ndw -= 2) {
1112 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1113 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1114 value += incr;
1115 }
1116}
1117
1118/**
1119 * sdma_v5_2_vm_set_pte_pde - update the page tables using sDMA
1120 *
1121 * @ib: indirect buffer to fill with commands
1122 * @pe: addr of the page entry
1123 * @addr: dst addr to write into pe
1124 * @count: number of page entries to update
1125 * @incr: increase next addr by incr bytes
1126 * @flags: access flags
1127 *
1128 * Update the page tables using sDMA.
1129 */
1130static void sdma_v5_2_vm_set_pte_pde(struct amdgpu_ib *ib,
1131 uint64_t pe,
1132 uint64_t addr, unsigned count,
1133 uint32_t incr, uint64_t flags)
1134{
1135 /* for physically contiguous pages (vram) */
1136 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1137 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1138 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1139 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1140 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1141 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1142 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1143 ib->ptr[ib->length_dw++] = incr; /* increment size */
1144 ib->ptr[ib->length_dw++] = 0;
1145 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1146}
1147
1148/**
1149 * sdma_v5_2_ring_pad_ib - pad the IB
1150 *
1151 * @ib: indirect buffer to fill with padding
1152 * @ring: amdgpu_ring structure holding ring information
1153 *
1154 * Pad the IB with NOPs to a boundary multiple of 8.
1155 */
1156static void sdma_v5_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1157{
1158 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1159 u32 pad_count;
1160 int i;
1161
1162 pad_count = (-ib->length_dw) & 0x7;
1163 for (i = 0; i < pad_count; i++)
1164 if (sdma && sdma->burst_nop && (i == 0))
1165 ib->ptr[ib->length_dw++] =
1166 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1167 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1168 else
1169 ib->ptr[ib->length_dw++] =
1170 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1171}
1172
1173
1174/**
1175 * sdma_v5_2_ring_emit_pipeline_sync - sync the pipeline
1176 *
1177 * @ring: amdgpu_ring pointer
1178 *
1179 * Make sure all previous operations are completed (CIK).
1180 */
1181static void sdma_v5_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1182{
1183 uint32_t seq = ring->fence_drv.sync_seq;
1184 uint64_t addr = ring->fence_drv.gpu_addr;
1185
1186 /* wait for idle */
1187 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1188 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1189 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1190 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1191 amdgpu_ring_write(ring, addr & 0xfffffffc);
1192 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1193 amdgpu_ring_write(ring, seq); /* reference */
1194 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1195 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1196 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1197}
1198
1199
1200/**
1201 * sdma_v5_2_ring_emit_vm_flush - vm flush using sDMA
1202 *
1203 * @ring: amdgpu_ring pointer
1204 * @vmid: vmid number to use
1205 * @pd_addr: address
1206 *
1207 * Update the page table base and flush the VM TLB
1208 * using sDMA.
1209 */
1210static void sdma_v5_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
1211 unsigned vmid, uint64_t pd_addr)
1212{
1213 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1214 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
1215
1216 /* Update the PD address for this VMID. */
1217 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
1218 (hub->ctx_addr_distance * vmid),
1219 lower_32_bits(pd_addr));
1220 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
1221 (hub->ctx_addr_distance * vmid),
1222 upper_32_bits(pd_addr));
1223
1224 /* Trigger invalidation. */
1225 amdgpu_ring_write(ring,
1226 SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1227 SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATION) |
1228 SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(ring->vm_inv_eng) |
1229 SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(0x1f));
1230 amdgpu_ring_write(ring, req);
1231 amdgpu_ring_write(ring, 0xFFFFFFFF);
1232 amdgpu_ring_write(ring,
1233 SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(1 << vmid) |
1234 SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(0x1F));
1235}
1236
1237static void sdma_v5_2_ring_emit_wreg(struct amdgpu_ring *ring,
1238 uint32_t reg, uint32_t val)
1239{
1240 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1241 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1242 amdgpu_ring_write(ring, reg);
1243 amdgpu_ring_write(ring, val);
1244}
1245
1246static void sdma_v5_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1247 uint32_t val, uint32_t mask)
1248{
1249 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1250 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1251 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1252 amdgpu_ring_write(ring, reg << 2);
1253 amdgpu_ring_write(ring, 0);
1254 amdgpu_ring_write(ring, val); /* reference */
1255 amdgpu_ring_write(ring, mask); /* mask */
1256 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1257 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1258}
1259
1260static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1261 uint32_t reg0, uint32_t reg1,
1262 uint32_t ref, uint32_t mask)
1263{
1264 amdgpu_ring_emit_wreg(ring, reg0, ref);
1265 /* wait for a cycle to reset vm_inv_eng*_ack */
1266 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1267 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1268}
1269
1270static int sdma_v5_2_early_init(struct amdgpu_ip_block *ip_block)
1271{
1272 struct amdgpu_device *adev = ip_block->adev;
1273 int r;
1274
1275 r = amdgpu_sdma_init_microcode(adev, 0, true);
1276 if (r)
1277 return r;
1278
1279 sdma_v5_2_set_ring_funcs(adev);
1280 sdma_v5_2_set_buffer_funcs(adev);
1281 sdma_v5_2_set_vm_pte_funcs(adev);
1282 sdma_v5_2_set_irq_funcs(adev);
1283 sdma_v5_2_set_mqd_funcs(adev);
1284
1285 return 0;
1286}
1287
1288static unsigned sdma_v5_2_seq_to_irq_id(int seq_num)
1289{
1290 switch (seq_num) {
1291 case 0:
1292 return SOC15_IH_CLIENTID_SDMA0;
1293 case 1:
1294 return SOC15_IH_CLIENTID_SDMA1;
1295 case 2:
1296 return SOC15_IH_CLIENTID_SDMA2;
1297 case 3:
1298 return SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid;
1299 default:
1300 break;
1301 }
1302 return -EINVAL;
1303}
1304
1305static unsigned sdma_v5_2_seq_to_trap_id(int seq_num)
1306{
1307 switch (seq_num) {
1308 case 0:
1309 return SDMA0_5_0__SRCID__SDMA_TRAP;
1310 case 1:
1311 return SDMA1_5_0__SRCID__SDMA_TRAP;
1312 case 2:
1313 return SDMA2_5_0__SRCID__SDMA_TRAP;
1314 case 3:
1315 return SDMA3_5_0__SRCID__SDMA_TRAP;
1316 default:
1317 break;
1318 }
1319 return -EINVAL;
1320}
1321
1322static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
1323{
1324 struct amdgpu_ring *ring;
1325 int r, i;
1326 struct amdgpu_device *adev = ip_block->adev;
1327 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
1328 uint32_t *ptr;
1329
1330 /* SDMA trap event */
1331 for (i = 0; i < adev->sdma.num_instances; i++) {
1332 r = amdgpu_irq_add_id(adev, sdma_v5_2_seq_to_irq_id(i),
1333 sdma_v5_2_seq_to_trap_id(i),
1334 &adev->sdma.trap_irq);
1335 if (r)
1336 return r;
1337 }
1338
1339 for (i = 0; i < adev->sdma.num_instances; i++) {
1340 ring = &adev->sdma.instance[i].ring;
1341 ring->ring_obj = NULL;
1342 ring->use_doorbell = true;
1343 ring->me = i;
1344
1345 DRM_INFO("use_doorbell being set to: [%s]\n",
1346 ring->use_doorbell?"true":"false");
1347
1348 ring->doorbell_index =
1349 (adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset
1350
1351 ring->vm_hub = AMDGPU_GFXHUB(0);
1352 sprintf(ring->name, "sdma%d", i);
1353 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1354 AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1355 AMDGPU_RING_PRIO_DEFAULT, NULL);
1356 if (r)
1357 return r;
1358 }
1359
1360 adev->sdma.supported_reset =
1361 amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
1362 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1363 case IP_VERSION(5, 2, 0):
1364 case IP_VERSION(5, 2, 2):
1365 case IP_VERSION(5, 2, 3):
1366 case IP_VERSION(5, 2, 4):
1367 if (adev->sdma.instance[0].fw_version >= 76)
1368 adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1369 break;
1370 case IP_VERSION(5, 2, 5):
1371 if (adev->sdma.instance[0].fw_version >= 34)
1372 adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1373 break;
1374 default:
1375 break;
1376 }
1377
1378 /* Allocate memory for SDMA IP Dump buffer */
1379 ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
1380 if (ptr)
1381 adev->sdma.ip_dump = ptr;
1382 else
1383 DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
1384
1385 r = amdgpu_sdma_sysfs_reset_mask_init(adev);
1386 if (r)
1387 return r;
1388
1389 return r;
1390}
1391
1392static int sdma_v5_2_sw_fini(struct amdgpu_ip_block *ip_block)
1393{
1394 struct amdgpu_device *adev = ip_block->adev;
1395 int i;
1396
1397 for (i = 0; i < adev->sdma.num_instances; i++)
1398 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1399
1400 amdgpu_sdma_sysfs_reset_mask_fini(adev);
1401 amdgpu_sdma_destroy_inst_ctx(adev, true);
1402
1403 kfree(adev->sdma.ip_dump);
1404
1405 return 0;
1406}
1407
1408static int sdma_v5_2_hw_init(struct amdgpu_ip_block *ip_block)
1409{
1410 struct amdgpu_device *adev = ip_block->adev;
1411
1412 return sdma_v5_2_start(adev);
1413}
1414
1415static int sdma_v5_2_hw_fini(struct amdgpu_ip_block *ip_block)
1416{
1417 struct amdgpu_device *adev = ip_block->adev;
1418
1419 if (amdgpu_sriov_vf(adev))
1420 return 0;
1421
1422 sdma_v5_2_ctx_switch_enable(adev, false);
1423 sdma_v5_2_enable(adev, false);
1424
1425 return 0;
1426}
1427
1428static int sdma_v5_2_suspend(struct amdgpu_ip_block *ip_block)
1429{
1430 return sdma_v5_2_hw_fini(ip_block);
1431}
1432
1433static int sdma_v5_2_resume(struct amdgpu_ip_block *ip_block)
1434{
1435 return sdma_v5_2_hw_init(ip_block);
1436}
1437
1438static bool sdma_v5_2_is_idle(void *handle)
1439{
1440 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1441 u32 i;
1442
1443 for (i = 0; i < adev->sdma.num_instances; i++) {
1444 u32 tmp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
1445
1446 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1447 return false;
1448 }
1449
1450 return true;
1451}
1452
1453static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
1454{
1455 unsigned i;
1456 u32 sdma0, sdma1, sdma2, sdma3;
1457 struct amdgpu_device *adev = ip_block->adev;
1458
1459 for (i = 0; i < adev->usec_timeout; i++) {
1460 sdma0 = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
1461 sdma1 = RREG32(sdma_v5_2_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
1462 sdma2 = RREG32(sdma_v5_2_get_reg_offset(adev, 2, mmSDMA0_STATUS_REG));
1463 sdma3 = RREG32(sdma_v5_2_get_reg_offset(adev, 3, mmSDMA0_STATUS_REG));
1464
1465 if (sdma0 & sdma1 & sdma2 & sdma3 & SDMA0_STATUS_REG__IDLE_MASK)
1466 return 0;
1467 udelay(1);
1468 }
1469 return -ETIMEDOUT;
1470}
1471
1472static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
1473{
1474 struct amdgpu_device *adev = ring->adev;
1475 int i, j, r;
1476 u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
1477
1478 if (amdgpu_sriov_vf(adev))
1479 return -EINVAL;
1480
1481 for (i = 0; i < adev->sdma.num_instances; i++) {
1482 if (ring == &adev->sdma.instance[i].ring)
1483 break;
1484 }
1485
1486 if (i == adev->sdma.num_instances) {
1487 DRM_ERROR("sdma instance not found\n");
1488 return -EINVAL;
1489 }
1490
1491 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
1492
1493 /* stop queue */
1494 ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
1495 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
1496 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
1497
1498 rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
1499 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
1500 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
1501
1502 /*engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
1503 freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
1504 freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1);
1505 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
1506
1507 for (j = 0; j < adev->usec_timeout; j++) {
1508 freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
1509
1510 if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1)
1511 break;
1512 udelay(1);
1513 }
1514
1515
1516 if (j == adev->usec_timeout) {
1517 stat1_reg = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG));
1518 if ((stat1_reg & 0x3FF) != 0x3FF) {
1519 DRM_ERROR("cannot soft reset as sdma not idle\n");
1520 r = -ETIMEDOUT;
1521 goto err0;
1522 }
1523 }
1524
1525 f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
1526 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
1527 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
1528
1529 cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
1530 cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
1531 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
1532
1533 /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
1534 preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
1535 preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
1536 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
1537
1538 soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
1539 soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
1540
1541
1542 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
1543
1544 udelay(50);
1545
1546 soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
1547
1548 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
1549
1550 /* unfreeze and unhalt */
1551 freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
1552 freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
1553 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
1554
1555 r = sdma_v5_2_gfx_resume_instance(adev, i, true);
1556
1557err0:
1558 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
1559 return r;
1560}
1561
1562static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)
1563{
1564 int i, r = 0;
1565 struct amdgpu_device *adev = ring->adev;
1566 u32 index = 0;
1567 u64 sdma_gfx_preempt;
1568
1569 amdgpu_sdma_get_index_from_ring(ring, &index);
1570 sdma_gfx_preempt =
1571 sdma_v5_2_get_reg_offset(adev, index, mmSDMA0_GFX_PREEMPT);
1572
1573 /* assert preemption condition */
1574 amdgpu_ring_set_preempt_cond_exec(ring, false);
1575
1576 /* emit the trailing fence */
1577 ring->trail_seq += 1;
1578 amdgpu_ring_alloc(ring, 10);
1579 sdma_v5_2_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1580 ring->trail_seq, 0);
1581 amdgpu_ring_commit(ring);
1582
1583 /* assert IB preemption */
1584 WREG32(sdma_gfx_preempt, 1);
1585
1586 /* poll the trailing fence */
1587 for (i = 0; i < adev->usec_timeout; i++) {
1588 if (ring->trail_seq ==
1589 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1590 break;
1591 udelay(1);
1592 }
1593
1594 if (i >= adev->usec_timeout) {
1595 r = -EINVAL;
1596 DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1597 }
1598
1599 /* deassert IB preemption */
1600 WREG32(sdma_gfx_preempt, 0);
1601
1602 /* deassert the preemption condition */
1603 amdgpu_ring_set_preempt_cond_exec(ring, true);
1604 return r;
1605}
1606
1607static int sdma_v5_2_set_trap_irq_state(struct amdgpu_device *adev,
1608 struct amdgpu_irq_src *source,
1609 unsigned type,
1610 enum amdgpu_interrupt_state state)
1611{
1612 u32 sdma_cntl;
1613 u32 reg_offset = sdma_v5_2_get_reg_offset(adev, type, mmSDMA0_CNTL);
1614
1615 if (!amdgpu_sriov_vf(adev)) {
1616 sdma_cntl = RREG32(reg_offset);
1617 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1618 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1619 WREG32(reg_offset, sdma_cntl);
1620 }
1621
1622 return 0;
1623}
1624
1625static int sdma_v5_2_process_trap_irq(struct amdgpu_device *adev,
1626 struct amdgpu_irq_src *source,
1627 struct amdgpu_iv_entry *entry)
1628{
1629 uint32_t mes_queue_id = entry->src_data[0];
1630
1631 DRM_DEBUG("IH: SDMA trap\n");
1632
1633 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
1634 struct amdgpu_mes_queue *queue;
1635
1636 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
1637
1638 spin_lock(&adev->mes.queue_id_lock);
1639 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
1640 if (queue) {
1641 DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
1642 amdgpu_fence_process(queue->ring);
1643 }
1644 spin_unlock(&adev->mes.queue_id_lock);
1645 return 0;
1646 }
1647
1648 switch (entry->client_id) {
1649 case SOC15_IH_CLIENTID_SDMA0:
1650 switch (entry->ring_id) {
1651 case 0:
1652 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1653 break;
1654 case 1:
1655 /* XXX compute */
1656 break;
1657 case 2:
1658 /* XXX compute */
1659 break;
1660 case 3:
1661 /* XXX page queue*/
1662 break;
1663 }
1664 break;
1665 case SOC15_IH_CLIENTID_SDMA1:
1666 switch (entry->ring_id) {
1667 case 0:
1668 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1669 break;
1670 case 1:
1671 /* XXX compute */
1672 break;
1673 case 2:
1674 /* XXX compute */
1675 break;
1676 case 3:
1677 /* XXX page queue*/
1678 break;
1679 }
1680 break;
1681 case SOC15_IH_CLIENTID_SDMA2:
1682 switch (entry->ring_id) {
1683 case 0:
1684 amdgpu_fence_process(&adev->sdma.instance[2].ring);
1685 break;
1686 case 1:
1687 /* XXX compute */
1688 break;
1689 case 2:
1690 /* XXX compute */
1691 break;
1692 case 3:
1693 /* XXX page queue*/
1694 break;
1695 }
1696 break;
1697 case SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid:
1698 switch (entry->ring_id) {
1699 case 0:
1700 amdgpu_fence_process(&adev->sdma.instance[3].ring);
1701 break;
1702 case 1:
1703 /* XXX compute */
1704 break;
1705 case 2:
1706 /* XXX compute */
1707 break;
1708 case 3:
1709 /* XXX page queue*/
1710 break;
1711 }
1712 break;
1713 }
1714 return 0;
1715}
1716
1717static int sdma_v5_2_process_illegal_inst_irq(struct amdgpu_device *adev,
1718 struct amdgpu_irq_src *source,
1719 struct amdgpu_iv_entry *entry)
1720{
1721 return 0;
1722}
1723
1724static bool sdma_v5_2_firmware_mgcg_support(struct amdgpu_device *adev,
1725 int i)
1726{
1727 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1728 case IP_VERSION(5, 2, 1):
1729 if (adev->sdma.instance[i].fw_version < 70)
1730 return false;
1731 break;
1732 case IP_VERSION(5, 2, 3):
1733 if (adev->sdma.instance[i].fw_version < 47)
1734 return false;
1735 break;
1736 case IP_VERSION(5, 2, 7):
1737 if (adev->sdma.instance[i].fw_version < 9)
1738 return false;
1739 break;
1740 default:
1741 return true;
1742 }
1743
1744 return true;
1745
1746}
1747
1748static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
1749 bool enable)
1750{
1751 uint32_t data, def;
1752 int i;
1753
1754 for (i = 0; i < adev->sdma.num_instances; i++) {
1755
1756 if (!sdma_v5_2_firmware_mgcg_support(adev, i))
1757 adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_MGCG;
1758
1759 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1760 /* Enable sdma clock gating */
1761 def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1762 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1763 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1764 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1765 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1766 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK |
1767 SDMA0_CLK_CTRL__SOFT_OVERRIDER_REG_MASK);
1768 if (def != data)
1769 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1770 } else {
1771 /* Disable sdma clock gating */
1772 def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1773 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1774 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1775 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1776 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1777 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK |
1778 SDMA0_CLK_CTRL__SOFT_OVERRIDER_REG_MASK);
1779 if (def != data)
1780 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1781 }
1782 }
1783}
1784
1785static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev,
1786 bool enable)
1787{
1788 uint32_t data, def;
1789 int i;
1790
1791 for (i = 0; i < adev->sdma.num_instances; i++) {
1792 if (adev->sdma.instance[i].fw_version < 70 &&
1793 amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
1794 IP_VERSION(5, 2, 1))
1795 adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_LS;
1796
1797 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1798 /* Enable sdma mem light sleep */
1799 def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1800 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1801 if (def != data)
1802 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1803
1804 } else {
1805 /* Disable sdma mem light sleep */
1806 def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1807 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1808 if (def != data)
1809 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1810
1811 }
1812 }
1813}
1814
1815static int sdma_v5_2_set_clockgating_state(void *handle,
1816 enum amd_clockgating_state state)
1817{
1818 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1819
1820 if (amdgpu_sriov_vf(adev))
1821 return 0;
1822
1823 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1824 case IP_VERSION(5, 2, 0):
1825 case IP_VERSION(5, 2, 2):
1826 case IP_VERSION(5, 2, 1):
1827 case IP_VERSION(5, 2, 4):
1828 case IP_VERSION(5, 2, 5):
1829 case IP_VERSION(5, 2, 6):
1830 case IP_VERSION(5, 2, 3):
1831 case IP_VERSION(5, 2, 7):
1832 sdma_v5_2_update_medium_grain_clock_gating(adev,
1833 state == AMD_CG_STATE_GATE);
1834 sdma_v5_2_update_medium_grain_light_sleep(adev,
1835 state == AMD_CG_STATE_GATE);
1836 break;
1837 default:
1838 break;
1839 }
1840
1841 return 0;
1842}
1843
1844static int sdma_v5_2_set_powergating_state(void *handle,
1845 enum amd_powergating_state state)
1846{
1847 return 0;
1848}
1849
1850static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
1851{
1852 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1853 int data;
1854
1855 if (amdgpu_sriov_vf(adev))
1856 *flags = 0;
1857
1858 /* AMD_CG_SUPPORT_SDMA_MGCG */
1859 data = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL));
1860 if (!(data & SDMA0_CLK_CTRL__CGCG_EN_OVERRIDE_MASK))
1861 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1862
1863 /* AMD_CG_SUPPORT_SDMA_LS */
1864 data = RREG32_KIQ(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL));
1865 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1866 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1867}
1868
1869static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
1870{
1871 struct amdgpu_device *adev = ring->adev;
1872
1873 /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
1874 * disallow GFXOFF in some cases leading to
1875 * hangs in SDMA. Disallow GFXOFF while SDMA is active.
1876 * We can probably just limit this to 5.2.3,
1877 * but it shouldn't hurt for other parts since
1878 * this GFXOFF will be disallowed anyway when SDMA is
1879 * active, this just makes it explicit.
1880 * sdma_v5_2_ring_set_wptr() takes advantage of this
1881 * to update the wptr because sometimes SDMA seems to miss
1882 * doorbells when entering PG. If you remove this, update
1883 * sdma_v5_2_ring_set_wptr() as well!
1884 */
1885 amdgpu_gfx_off_ctrl(adev, false);
1886}
1887
1888static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
1889{
1890 struct amdgpu_device *adev = ring->adev;
1891
1892 /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
1893 * disallow GFXOFF in some cases leading to
1894 * hangs in SDMA. Allow GFXOFF when SDMA is complete.
1895 */
1896 amdgpu_gfx_off_ctrl(adev, true);
1897}
1898
1899static void sdma_v5_2_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1900{
1901 struct amdgpu_device *adev = ip_block->adev;
1902 int i, j;
1903 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
1904 uint32_t instance_offset;
1905
1906 if (!adev->sdma.ip_dump)
1907 return;
1908
1909 drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
1910 for (i = 0; i < adev->sdma.num_instances; i++) {
1911 instance_offset = i * reg_count;
1912 drm_printf(p, "\nInstance:%d\n", i);
1913
1914 for (j = 0; j < reg_count; j++)
1915 drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_5_2[j].reg_name,
1916 adev->sdma.ip_dump[instance_offset + j]);
1917 }
1918}
1919
1920static void sdma_v5_2_dump_ip_state(struct amdgpu_ip_block *ip_block)
1921{
1922 struct amdgpu_device *adev = ip_block->adev;
1923 int i, j;
1924 uint32_t instance_offset;
1925 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
1926
1927 if (!adev->sdma.ip_dump)
1928 return;
1929
1930 amdgpu_gfx_off_ctrl(adev, false);
1931 for (i = 0; i < adev->sdma.num_instances; i++) {
1932 instance_offset = i * reg_count;
1933 for (j = 0; j < reg_count; j++)
1934 adev->sdma.ip_dump[instance_offset + j] =
1935 RREG32(sdma_v5_2_get_reg_offset(adev, i,
1936 sdma_reg_list_5_2[j].reg_offset));
1937 }
1938 amdgpu_gfx_off_ctrl(adev, true);
1939}
1940
1941static const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
1942 .name = "sdma_v5_2",
1943 .early_init = sdma_v5_2_early_init,
1944 .sw_init = sdma_v5_2_sw_init,
1945 .sw_fini = sdma_v5_2_sw_fini,
1946 .hw_init = sdma_v5_2_hw_init,
1947 .hw_fini = sdma_v5_2_hw_fini,
1948 .suspend = sdma_v5_2_suspend,
1949 .resume = sdma_v5_2_resume,
1950 .is_idle = sdma_v5_2_is_idle,
1951 .wait_for_idle = sdma_v5_2_wait_for_idle,
1952 .soft_reset = sdma_v5_2_soft_reset,
1953 .set_clockgating_state = sdma_v5_2_set_clockgating_state,
1954 .set_powergating_state = sdma_v5_2_set_powergating_state,
1955 .get_clockgating_state = sdma_v5_2_get_clockgating_state,
1956 .dump_ip_state = sdma_v5_2_dump_ip_state,
1957 .print_ip_state = sdma_v5_2_print_ip_state,
1958};
1959
1960static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
1961 .type = AMDGPU_RING_TYPE_SDMA,
1962 .align_mask = 0xf,
1963 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1964 .support_64bit_ptrs = true,
1965 .secure_submission_supported = true,
1966 .get_rptr = sdma_v5_2_ring_get_rptr,
1967 .get_wptr = sdma_v5_2_ring_get_wptr,
1968 .set_wptr = sdma_v5_2_ring_set_wptr,
1969 .emit_frame_size =
1970 5 + /* sdma_v5_2_ring_init_cond_exec */
1971 6 + /* sdma_v5_2_ring_emit_hdp_flush */
1972 3 + /* hdp_invalidate */
1973 6 + /* sdma_v5_2_ring_emit_pipeline_sync */
1974 /* sdma_v5_2_ring_emit_vm_flush */
1975 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1976 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1977 10 + 10 + 10, /* sdma_v5_2_ring_emit_fence x3 for user fence, vm fence */
1978 .emit_ib_size = 7 + 6, /* sdma_v5_2_ring_emit_ib */
1979 .emit_ib = sdma_v5_2_ring_emit_ib,
1980 .emit_mem_sync = sdma_v5_2_ring_emit_mem_sync,
1981 .emit_fence = sdma_v5_2_ring_emit_fence,
1982 .emit_pipeline_sync = sdma_v5_2_ring_emit_pipeline_sync,
1983 .emit_vm_flush = sdma_v5_2_ring_emit_vm_flush,
1984 .emit_hdp_flush = sdma_v5_2_ring_emit_hdp_flush,
1985 .test_ring = sdma_v5_2_ring_test_ring,
1986 .test_ib = sdma_v5_2_ring_test_ib,
1987 .insert_nop = sdma_v5_2_ring_insert_nop,
1988 .pad_ib = sdma_v5_2_ring_pad_ib,
1989 .begin_use = sdma_v5_2_ring_begin_use,
1990 .end_use = sdma_v5_2_ring_end_use,
1991 .emit_wreg = sdma_v5_2_ring_emit_wreg,
1992 .emit_reg_wait = sdma_v5_2_ring_emit_reg_wait,
1993 .emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
1994 .init_cond_exec = sdma_v5_2_ring_init_cond_exec,
1995 .preempt_ib = sdma_v5_2_ring_preempt_ib,
1996 .reset = sdma_v5_2_reset_queue,
1997};
1998
1999static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev)
2000{
2001 int i;
2002
2003 for (i = 0; i < adev->sdma.num_instances; i++) {
2004 adev->sdma.instance[i].ring.funcs = &sdma_v5_2_ring_funcs;
2005 adev->sdma.instance[i].ring.me = i;
2006 }
2007}
2008
2009static const struct amdgpu_irq_src_funcs sdma_v5_2_trap_irq_funcs = {
2010 .set = sdma_v5_2_set_trap_irq_state,
2011 .process = sdma_v5_2_process_trap_irq,
2012};
2013
2014static const struct amdgpu_irq_src_funcs sdma_v5_2_illegal_inst_irq_funcs = {
2015 .process = sdma_v5_2_process_illegal_inst_irq,
2016};
2017
2018static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev)
2019{
2020 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
2021 adev->sdma.num_instances;
2022 adev->sdma.trap_irq.funcs = &sdma_v5_2_trap_irq_funcs;
2023 adev->sdma.illegal_inst_irq.funcs = &sdma_v5_2_illegal_inst_irq_funcs;
2024}
2025
2026/**
2027 * sdma_v5_2_emit_copy_buffer - copy buffer using the sDMA engine
2028 *
2029 * @ib: indirect buffer to copy to
2030 * @src_offset: src GPU address
2031 * @dst_offset: dst GPU address
2032 * @byte_count: number of bytes to xfer
2033 * @copy_flags: copy flags for the buffers
2034 *
2035 * Copy GPU buffers using the DMA engine.
2036 * Used by the amdgpu ttm implementation to move pages if
2037 * registered as the asic copy callback.
2038 */
2039static void sdma_v5_2_emit_copy_buffer(struct amdgpu_ib *ib,
2040 uint64_t src_offset,
2041 uint64_t dst_offset,
2042 uint32_t byte_count,
2043 uint32_t copy_flags)
2044{
2045 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
2046 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
2047 SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
2048 ib->ptr[ib->length_dw++] = byte_count - 1;
2049 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
2050 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
2051 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
2052 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2053 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2054}
2055
2056/**
2057 * sdma_v5_2_emit_fill_buffer - fill buffer using the sDMA engine
2058 *
2059 * @ib: indirect buffer to fill
2060 * @src_data: value to write to buffer
2061 * @dst_offset: dst GPU address
2062 * @byte_count: number of bytes to xfer
2063 *
2064 * Fill GPU buffers using the DMA engine.
2065 */
2066static void sdma_v5_2_emit_fill_buffer(struct amdgpu_ib *ib,
2067 uint32_t src_data,
2068 uint64_t dst_offset,
2069 uint32_t byte_count)
2070{
2071 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
2072 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2073 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2074 ib->ptr[ib->length_dw++] = src_data;
2075 ib->ptr[ib->length_dw++] = byte_count - 1;
2076}
2077
2078static const struct amdgpu_buffer_funcs sdma_v5_2_buffer_funcs = {
2079 .copy_max_bytes = 0x400000,
2080 .copy_num_dw = 7,
2081 .emit_copy_buffer = sdma_v5_2_emit_copy_buffer,
2082
2083 .fill_max_bytes = 0x400000,
2084 .fill_num_dw = 5,
2085 .emit_fill_buffer = sdma_v5_2_emit_fill_buffer,
2086};
2087
2088static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev)
2089{
2090 if (adev->mman.buffer_funcs == NULL) {
2091 adev->mman.buffer_funcs = &sdma_v5_2_buffer_funcs;
2092 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
2093 }
2094}
2095
2096static const struct amdgpu_vm_pte_funcs sdma_v5_2_vm_pte_funcs = {
2097 .copy_pte_num_dw = 7,
2098 .copy_pte = sdma_v5_2_vm_copy_pte,
2099 .write_pte = sdma_v5_2_vm_write_pte,
2100 .set_pte_pde = sdma_v5_2_vm_set_pte_pde,
2101};
2102
2103static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev)
2104{
2105 unsigned i;
2106
2107 if (adev->vm_manager.vm_pte_funcs == NULL) {
2108 adev->vm_manager.vm_pte_funcs = &sdma_v5_2_vm_pte_funcs;
2109 for (i = 0; i < adev->sdma.num_instances; i++) {
2110 adev->vm_manager.vm_pte_scheds[i] =
2111 &adev->sdma.instance[i].ring.sched;
2112 }
2113 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
2114 }
2115}
2116
2117const struct amdgpu_ip_block_version sdma_v5_2_ip_block = {
2118 .type = AMD_IP_BLOCK_TYPE_SDMA,
2119 .major = 5,
2120 .minor = 2,
2121 .rev = 0,
2122 .funcs = &sdma_v5_2_ip_funcs,
2123};