Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/firmware.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ucode.h"
31#include "amdgpu_trace.h"
32
33#include "gc/gc_10_1_0_offset.h"
34#include "gc/gc_10_1_0_sh_mask.h"
35#include "hdp/hdp_5_0_0_offset.h"
36#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
37#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
38
39#include "soc15_common.h"
40#include "soc15.h"
41#include "navi10_sdma_pkt_open.h"
42#include "nbio_v2_3.h"
43#include "sdma_v5_0.h"
44
45MODULE_FIRMWARE("amdgpu/navi10_sdma.bin");
46MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin");
47
48MODULE_FIRMWARE("amdgpu/navi14_sdma.bin");
49MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin");
50
51MODULE_FIRMWARE("amdgpu/navi12_sdma.bin");
52MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin");
53
54#define SDMA1_REG_OFFSET 0x600
55#define SDMA0_HYP_DEC_REG_START 0x5880
56#define SDMA0_HYP_DEC_REG_END 0x5893
57#define SDMA1_HYP_DEC_REG_OFFSET 0x20
58
59static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
60static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
61static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
62static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
63
64static const struct soc15_reg_golden golden_settings_sdma_5[] = {
65 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
66 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
67 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
68 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
69 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
70 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
71 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
72 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
73 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
74 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
75 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
76 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x00ffffff, 0x000c5c00),
77 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
78 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
79 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
80 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
81 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
82 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
83 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
84 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
85 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
86 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
87 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
88 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
89};
90
91static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
92 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
93 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
94};
95
96static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
97 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
99};
100
101static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
104};
105
106static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
107{
108 u32 base;
109
110 if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
111 internal_offset <= SDMA0_HYP_DEC_REG_END) {
112 base = adev->reg_offset[GC_HWIP][0][1];
113 if (instance == 1)
114 internal_offset += SDMA1_HYP_DEC_REG_OFFSET;
115 } else {
116 base = adev->reg_offset[GC_HWIP][0][0];
117 if (instance == 1)
118 internal_offset += SDMA1_REG_OFFSET;
119 }
120
121 return base + internal_offset;
122}
123
124static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
125{
126 switch (adev->asic_type) {
127 case CHIP_NAVI10:
128 soc15_program_register_sequence(adev,
129 golden_settings_sdma_5,
130 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
131 soc15_program_register_sequence(adev,
132 golden_settings_sdma_nv10,
133 (const u32)ARRAY_SIZE(golden_settings_sdma_nv10));
134 break;
135 case CHIP_NAVI14:
136 soc15_program_register_sequence(adev,
137 golden_settings_sdma_5,
138 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
139 soc15_program_register_sequence(adev,
140 golden_settings_sdma_nv14,
141 (const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
142 break;
143 case CHIP_NAVI12:
144 soc15_program_register_sequence(adev,
145 golden_settings_sdma_5,
146 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
147 soc15_program_register_sequence(adev,
148 golden_settings_sdma_nv12,
149 (const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
150 break;
151 default:
152 break;
153 }
154}
155
156/**
157 * sdma_v5_0_init_microcode - load ucode images from disk
158 *
159 * @adev: amdgpu_device pointer
160 *
161 * Use the firmware interface to load the ucode images into
162 * the driver (not loaded into hw).
163 * Returns 0 on success, error on failure.
164 */
165
166// emulation only, won't work on real chip
167// navi10 real chip need to use PSP to load firmware
168static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
169{
170 const char *chip_name;
171 char fw_name[30];
172 int err = 0, i;
173 struct amdgpu_firmware_info *info = NULL;
174 const struct common_firmware_header *header = NULL;
175 const struct sdma_firmware_header_v1_0 *hdr;
176
177 DRM_DEBUG("\n");
178
179 switch (adev->asic_type) {
180 case CHIP_NAVI10:
181 chip_name = "navi10";
182 break;
183 case CHIP_NAVI14:
184 chip_name = "navi14";
185 break;
186 case CHIP_NAVI12:
187 chip_name = "navi12";
188 break;
189 default:
190 BUG();
191 }
192
193 for (i = 0; i < adev->sdma.num_instances; i++) {
194 if (i == 0)
195 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
196 else
197 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
198 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
199 if (err)
200 goto out;
201 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
202 if (err)
203 goto out;
204 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
205 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
206 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
207 if (adev->sdma.instance[i].feature_version >= 20)
208 adev->sdma.instance[i].burst_nop = true;
209 DRM_DEBUG("psp_load == '%s'\n",
210 adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
211
212 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
213 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
214 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
215 info->fw = adev->sdma.instance[i].fw;
216 header = (const struct common_firmware_header *)info->fw->data;
217 adev->firmware.fw_size +=
218 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
219 }
220 }
221out:
222 if (err) {
223 DRM_ERROR("sdma_v5_0: Failed to load firmware \"%s\"\n", fw_name);
224 for (i = 0; i < adev->sdma.num_instances; i++) {
225 release_firmware(adev->sdma.instance[i].fw);
226 adev->sdma.instance[i].fw = NULL;
227 }
228 }
229 return err;
230}
231
232static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring)
233{
234 unsigned ret;
235
236 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
237 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
238 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
239 amdgpu_ring_write(ring, 1);
240 ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
241 amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
242
243 return ret;
244}
245
246static void sdma_v5_0_ring_patch_cond_exec(struct amdgpu_ring *ring,
247 unsigned offset)
248{
249 unsigned cur;
250
251 BUG_ON(offset > ring->buf_mask);
252 BUG_ON(ring->ring[offset] != 0x55aa55aa);
253
254 cur = (ring->wptr - 1) & ring->buf_mask;
255 if (cur > offset)
256 ring->ring[offset] = cur - offset;
257 else
258 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
259}
260
261/**
262 * sdma_v5_0_ring_get_rptr - get the current read pointer
263 *
264 * @ring: amdgpu ring pointer
265 *
266 * Get the current rptr from the hardware (NAVI10+).
267 */
268static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
269{
270 u64 *rptr;
271
272 /* XXX check if swapping is necessary on BE */
273 rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
274
275 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
276 return ((*rptr) >> 2);
277}
278
279/**
280 * sdma_v5_0_ring_get_wptr - get the current write pointer
281 *
282 * @ring: amdgpu ring pointer
283 *
284 * Get the current wptr from the hardware (NAVI10+).
285 */
286static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
287{
288 struct amdgpu_device *adev = ring->adev;
289 u64 *wptr = NULL;
290 uint64_t local_wptr = 0;
291
292 if (ring->use_doorbell) {
293 /* XXX check if swapping is necessary on BE */
294 wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
295 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
296 *wptr = (*wptr) >> 2;
297 DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
298 } else {
299 u32 lowbit, highbit;
300
301 wptr = &local_wptr;
302 lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
303 highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
304
305 DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
306 ring->me, highbit, lowbit);
307 *wptr = highbit;
308 *wptr = (*wptr) << 32;
309 *wptr |= lowbit;
310 }
311
312 return *wptr;
313}
314
315/**
316 * sdma_v5_0_ring_set_wptr - commit the write pointer
317 *
318 * @ring: amdgpu ring pointer
319 *
320 * Write the wptr back to the hardware (NAVI10+).
321 */
322static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
323{
324 struct amdgpu_device *adev = ring->adev;
325
326 DRM_DEBUG("Setting write pointer\n");
327 if (ring->use_doorbell) {
328 DRM_DEBUG("Using doorbell -- "
329 "wptr_offs == 0x%08x "
330 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
331 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
332 ring->wptr_offs,
333 lower_32_bits(ring->wptr << 2),
334 upper_32_bits(ring->wptr << 2));
335 /* XXX check if swapping is necessary on BE */
336 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2);
337 adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2);
338 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
339 ring->doorbell_index, ring->wptr << 2);
340 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
341 } else {
342 DRM_DEBUG("Not using doorbell -- "
343 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
344 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
345 ring->me,
346 lower_32_bits(ring->wptr << 2),
347 ring->me,
348 upper_32_bits(ring->wptr << 2));
349 WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
350 lower_32_bits(ring->wptr << 2));
351 WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
352 upper_32_bits(ring->wptr << 2));
353 }
354}
355
356static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
357{
358 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
359 int i;
360
361 for (i = 0; i < count; i++)
362 if (sdma && sdma->burst_nop && (i == 0))
363 amdgpu_ring_write(ring, ring->funcs->nop |
364 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
365 else
366 amdgpu_ring_write(ring, ring->funcs->nop);
367}
368
369/**
370 * sdma_v5_0_ring_emit_ib - Schedule an IB on the DMA engine
371 *
372 * @ring: amdgpu ring pointer
373 * @ib: IB object to schedule
374 *
375 * Schedule an IB in the DMA ring (NAVI10).
376 */
377static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
378 struct amdgpu_job *job,
379 struct amdgpu_ib *ib,
380 uint32_t flags)
381{
382 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
383 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
384
385 /* IB packet must end on a 8 DW boundary */
386 sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
387
388 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
389 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
390 /* base must be 32 byte aligned */
391 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
392 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
393 amdgpu_ring_write(ring, ib->length_dw);
394 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
395 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
396}
397
398/**
399 * sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
400 *
401 * @ring: amdgpu ring pointer
402 *
403 * Emit an hdp flush packet on the requested DMA ring.
404 */
405static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
406{
407 struct amdgpu_device *adev = ring->adev;
408 u32 ref_and_mask = 0;
409 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
410
411 if (ring->me == 0)
412 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
413 else
414 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
415
416 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
417 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
418 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
419 amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
420 amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
421 amdgpu_ring_write(ring, ref_and_mask); /* reference */
422 amdgpu_ring_write(ring, ref_and_mask); /* mask */
423 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
424 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
425}
426
427/**
428 * sdma_v5_0_ring_emit_fence - emit a fence on the DMA ring
429 *
430 * @ring: amdgpu ring pointer
431 * @fence: amdgpu fence object
432 *
433 * Add a DMA fence packet to the ring to write
434 * the fence seq number and DMA trap packet to generate
435 * an interrupt if needed (NAVI10).
436 */
437static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
438 unsigned flags)
439{
440 struct amdgpu_device *adev = ring->adev;
441 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
442 /* write the fence */
443 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
444 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
445 /* zero in first two bits */
446 BUG_ON(addr & 0x3);
447 amdgpu_ring_write(ring, lower_32_bits(addr));
448 amdgpu_ring_write(ring, upper_32_bits(addr));
449 amdgpu_ring_write(ring, lower_32_bits(seq));
450
451 /* optionally write high bits as well */
452 if (write64bit) {
453 addr += 4;
454 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
455 SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
456 /* zero in first two bits */
457 BUG_ON(addr & 0x3);
458 amdgpu_ring_write(ring, lower_32_bits(addr));
459 amdgpu_ring_write(ring, upper_32_bits(addr));
460 amdgpu_ring_write(ring, upper_32_bits(seq));
461 }
462
463 /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */
464 if ((flags & AMDGPU_FENCE_FLAG_INT) && adev->pdev->device != 0x50) {
465 /* generate an interrupt */
466 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
467 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
468 }
469}
470
471
472/**
473 * sdma_v5_0_gfx_stop - stop the gfx async dma engines
474 *
475 * @adev: amdgpu_device pointer
476 *
477 * Stop the gfx async dma ring buffers (NAVI10).
478 */
479static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
480{
481 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
482 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
483 u32 rb_cntl, ib_cntl;
484 int i;
485
486 if ((adev->mman.buffer_funcs_ring == sdma0) ||
487 (adev->mman.buffer_funcs_ring == sdma1))
488 amdgpu_ttm_set_buffer_funcs_status(adev, false);
489
490 for (i = 0; i < adev->sdma.num_instances; i++) {
491 rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
492 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
493 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
494 ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
495 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
496 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
497 }
498
499 sdma0->sched.ready = false;
500 sdma1->sched.ready = false;
501}
502
503/**
504 * sdma_v5_0_rlc_stop - stop the compute async dma engines
505 *
506 * @adev: amdgpu_device pointer
507 *
508 * Stop the compute async dma queues (NAVI10).
509 */
510static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
511{
512 /* XXX todo */
513}
514
515/**
516 * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
517 *
518 * @adev: amdgpu_device pointer
519 * @enable: enable/disable the DMA MEs context switch.
520 *
521 * Halt or unhalt the async dma engines context switch (NAVI10).
522 */
523static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
524{
525 u32 f32_cntl, phase_quantum = 0;
526 int i;
527
528 if (amdgpu_sdma_phase_quantum) {
529 unsigned value = amdgpu_sdma_phase_quantum;
530 unsigned unit = 0;
531
532 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
533 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
534 value = (value + 1) >> 1;
535 unit++;
536 }
537 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
538 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
539 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
540 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
541 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
542 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
543 WARN_ONCE(1,
544 "clamping sdma_phase_quantum to %uK clock cycles\n",
545 value << unit);
546 }
547 phase_quantum =
548 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
549 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
550 }
551
552 for (i = 0; i < adev->sdma.num_instances; i++) {
553 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
554 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
555 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
556 if (enable && amdgpu_sdma_phase_quantum) {
557 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
558 phase_quantum);
559 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
560 phase_quantum);
561 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
562 phase_quantum);
563 }
564 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
565 }
566
567}
568
569/**
570 * sdma_v5_0_enable - stop the async dma engines
571 *
572 * @adev: amdgpu_device pointer
573 * @enable: enable/disable the DMA MEs.
574 *
575 * Halt or unhalt the async dma engines (NAVI10).
576 */
577static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
578{
579 u32 f32_cntl;
580 int i;
581
582 if (enable == false) {
583 sdma_v5_0_gfx_stop(adev);
584 sdma_v5_0_rlc_stop(adev);
585 }
586
587 for (i = 0; i < adev->sdma.num_instances; i++) {
588 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
589 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
590 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
591 }
592}
593
594/**
595 * sdma_v5_0_gfx_resume - setup and start the async dma engines
596 *
597 * @adev: amdgpu_device pointer
598 *
599 * Set up the gfx DMA ring buffers and enable them (NAVI10).
600 * Returns 0 for success, error for failure.
601 */
602static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
603{
604 struct amdgpu_ring *ring;
605 u32 rb_cntl, ib_cntl;
606 u32 rb_bufsz;
607 u32 wb_offset;
608 u32 doorbell;
609 u32 doorbell_offset;
610 u32 temp;
611 u32 wptr_poll_cntl;
612 u64 wptr_gpu_addr;
613 int i, r;
614
615 for (i = 0; i < adev->sdma.num_instances; i++) {
616 ring = &adev->sdma.instance[i].ring;
617 wb_offset = (ring->rptr_offs * 4);
618
619 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
620
621 /* Set ring buffer size in dwords */
622 rb_bufsz = order_base_2(ring->ring_size / 4);
623 rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
624 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
625#ifdef __BIG_ENDIAN
626 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
627 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
628 RPTR_WRITEBACK_SWAP_ENABLE, 1);
629#endif
630 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
631
632 /* Initialize the ring buffer's read and write pointers */
633 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
634 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
635 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
636 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
637
638 /* setup the wptr shadow polling */
639 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
640 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
641 lower_32_bits(wptr_gpu_addr));
642 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
643 upper_32_bits(wptr_gpu_addr));
644 wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i,
645 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
646 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
647 SDMA0_GFX_RB_WPTR_POLL_CNTL,
648 F32_POLL_ENABLE, 1);
649 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
650 wptr_poll_cntl);
651
652 /* set the wb address whether it's enabled or not */
653 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
654 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
655 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
656 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
657
658 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
659
660 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
661 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
662
663 ring->wptr = 0;
664
665 /* before programing wptr to a less value, need set minor_ptr_update first */
666 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
667
668 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
669 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
670 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
671 }
672
673 doorbell = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
674 doorbell_offset = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
675
676 if (ring->use_doorbell) {
677 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
678 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
679 OFFSET, ring->doorbell_index);
680 } else {
681 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
682 }
683 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
684 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
685
686 adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
687 ring->doorbell_index, 20);
688
689 if (amdgpu_sriov_vf(adev))
690 sdma_v5_0_ring_set_wptr(ring);
691
692 /* set minor_ptr_update to 0 after wptr programed */
693 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
694
695 /* set utc l1 enable flag always to 1 */
696 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
697 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
698
699 /* enable MCBP */
700 temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
701 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
702
703 /* Set up RESP_MODE to non-copy addresses */
704 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
705 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
706 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
707 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
708
709 /* program default cache read and write policy */
710 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
711 /* clean read policy and write policy bits */
712 temp &= 0xFF0FFF;
713 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
714 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
715
716 if (!amdgpu_sriov_vf(adev)) {
717 /* unhalt engine */
718 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
719 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
720 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
721 }
722
723 /* enable DMA RB */
724 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
725 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
726
727 ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
728 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
729#ifdef __BIG_ENDIAN
730 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
731#endif
732 /* enable DMA IBs */
733 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
734
735 ring->sched.ready = true;
736
737 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
738 sdma_v5_0_ctx_switch_enable(adev, true);
739 sdma_v5_0_enable(adev, true);
740 }
741
742 r = amdgpu_ring_test_ring(ring);
743 if (r) {
744 ring->sched.ready = false;
745 return r;
746 }
747
748 if (adev->mman.buffer_funcs_ring == ring)
749 amdgpu_ttm_set_buffer_funcs_status(adev, true);
750 }
751
752 return 0;
753}
754
755/**
756 * sdma_v5_0_rlc_resume - setup and start the async dma engines
757 *
758 * @adev: amdgpu_device pointer
759 *
760 * Set up the compute DMA queues and enable them (NAVI10).
761 * Returns 0 for success, error for failure.
762 */
763static int sdma_v5_0_rlc_resume(struct amdgpu_device *adev)
764{
765 return 0;
766}
767
768/**
769 * sdma_v5_0_load_microcode - load the sDMA ME ucode
770 *
771 * @adev: amdgpu_device pointer
772 *
773 * Loads the sDMA0/1 ucode.
774 * Returns 0 for success, -EINVAL if the ucode is not available.
775 */
776static int sdma_v5_0_load_microcode(struct amdgpu_device *adev)
777{
778 const struct sdma_firmware_header_v1_0 *hdr;
779 const __le32 *fw_data;
780 u32 fw_size;
781 int i, j;
782
783 /* halt the MEs */
784 sdma_v5_0_enable(adev, false);
785
786 for (i = 0; i < adev->sdma.num_instances; i++) {
787 if (!adev->sdma.instance[i].fw)
788 return -EINVAL;
789
790 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
791 amdgpu_ucode_print_sdma_hdr(&hdr->header);
792 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
793
794 fw_data = (const __le32 *)
795 (adev->sdma.instance[i].fw->data +
796 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
797
798 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
799
800 for (j = 0; j < fw_size; j++) {
801 if (amdgpu_emu_mode == 1 && j % 500 == 0)
802 msleep(1);
803 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
804 }
805
806 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
807 }
808
809 return 0;
810}
811
812/**
813 * sdma_v5_0_start - setup and start the async dma engines
814 *
815 * @adev: amdgpu_device pointer
816 *
817 * Set up the DMA engines and enable them (NAVI10).
818 * Returns 0 for success, error for failure.
819 */
820static int sdma_v5_0_start(struct amdgpu_device *adev)
821{
822 int r = 0;
823
824 if (amdgpu_sriov_vf(adev)) {
825 sdma_v5_0_ctx_switch_enable(adev, false);
826 sdma_v5_0_enable(adev, false);
827
828 /* set RB registers */
829 r = sdma_v5_0_gfx_resume(adev);
830 return r;
831 }
832
833 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
834 r = sdma_v5_0_load_microcode(adev);
835 if (r)
836 return r;
837
838 /* The value of mmSDMA_F32_CNTL is invalid the moment after loading fw */
839 if (amdgpu_emu_mode == 1 && adev->pdev->device == 0x4d)
840 msleep(1000);
841 }
842
843 /* unhalt the MEs */
844 sdma_v5_0_enable(adev, true);
845 /* enable sdma ring preemption */
846 sdma_v5_0_ctx_switch_enable(adev, true);
847
848 /* start the gfx rings and rlc compute queues */
849 r = sdma_v5_0_gfx_resume(adev);
850 if (r)
851 return r;
852 r = sdma_v5_0_rlc_resume(adev);
853
854 return r;
855}
856
857/**
858 * sdma_v5_0_ring_test_ring - simple async dma engine test
859 *
860 * @ring: amdgpu_ring structure holding ring information
861 *
862 * Test the DMA engine by writing using it to write an
863 * value to memory. (NAVI10).
864 * Returns 0 for success, error for failure.
865 */
866static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
867{
868 struct amdgpu_device *adev = ring->adev;
869 unsigned i;
870 unsigned index;
871 int r;
872 u32 tmp;
873 u64 gpu_addr;
874
875 r = amdgpu_device_wb_get(adev, &index);
876 if (r) {
877 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
878 return r;
879 }
880
881 gpu_addr = adev->wb.gpu_addr + (index * 4);
882 tmp = 0xCAFEDEAD;
883 adev->wb.wb[index] = cpu_to_le32(tmp);
884
885 r = amdgpu_ring_alloc(ring, 5);
886 if (r) {
887 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
888 amdgpu_device_wb_free(adev, index);
889 return r;
890 }
891
892 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
893 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
894 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
895 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
896 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
897 amdgpu_ring_write(ring, 0xDEADBEEF);
898 amdgpu_ring_commit(ring);
899
900 for (i = 0; i < adev->usec_timeout; i++) {
901 tmp = le32_to_cpu(adev->wb.wb[index]);
902 if (tmp == 0xDEADBEEF)
903 break;
904 if (amdgpu_emu_mode == 1)
905 msleep(1);
906 else
907 udelay(1);
908 }
909
910 if (i < adev->usec_timeout) {
911 if (amdgpu_emu_mode == 1)
912 DRM_INFO("ring test on %d succeeded in %d msecs\n", ring->idx, i);
913 else
914 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
915 } else {
916 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
917 ring->idx, tmp);
918 r = -EINVAL;
919 }
920 amdgpu_device_wb_free(adev, index);
921
922 return r;
923}
924
925/**
926 * sdma_v5_0_ring_test_ib - test an IB on the DMA engine
927 *
928 * @ring: amdgpu_ring structure holding ring information
929 *
930 * Test a simple IB in the DMA ring (NAVI10).
931 * Returns 0 on success, error on failure.
932 */
933static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
934{
935 struct amdgpu_device *adev = ring->adev;
936 struct amdgpu_ib ib;
937 struct dma_fence *f = NULL;
938 unsigned index;
939 long r;
940 u32 tmp = 0;
941 u64 gpu_addr;
942
943 r = amdgpu_device_wb_get(adev, &index);
944 if (r) {
945 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
946 return r;
947 }
948
949 gpu_addr = adev->wb.gpu_addr + (index * 4);
950 tmp = 0xCAFEDEAD;
951 adev->wb.wb[index] = cpu_to_le32(tmp);
952 memset(&ib, 0, sizeof(ib));
953 r = amdgpu_ib_get(adev, NULL, 256, &ib);
954 if (r) {
955 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
956 goto err0;
957 }
958
959 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
960 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
961 ib.ptr[1] = lower_32_bits(gpu_addr);
962 ib.ptr[2] = upper_32_bits(gpu_addr);
963 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
964 ib.ptr[4] = 0xDEADBEEF;
965 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
966 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
967 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
968 ib.length_dw = 8;
969
970 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
971 if (r)
972 goto err1;
973
974 r = dma_fence_wait_timeout(f, false, timeout);
975 if (r == 0) {
976 DRM_ERROR("amdgpu: IB test timed out\n");
977 r = -ETIMEDOUT;
978 goto err1;
979 } else if (r < 0) {
980 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
981 goto err1;
982 }
983 tmp = le32_to_cpu(adev->wb.wb[index]);
984 if (tmp == 0xDEADBEEF) {
985 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
986 r = 0;
987 } else {
988 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
989 r = -EINVAL;
990 }
991
992err1:
993 amdgpu_ib_free(adev, &ib, NULL);
994 dma_fence_put(f);
995err0:
996 amdgpu_device_wb_free(adev, index);
997 return r;
998}
999
1000
1001/**
1002 * sdma_v5_0_vm_copy_pte - update PTEs by copying them from the GART
1003 *
1004 * @ib: indirect buffer to fill with commands
1005 * @pe: addr of the page entry
1006 * @src: src addr to copy from
1007 * @count: number of page entries to update
1008 *
1009 * Update PTEs by copying them from the GART using sDMA (NAVI10).
1010 */
1011static void sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib,
1012 uint64_t pe, uint64_t src,
1013 unsigned count)
1014{
1015 unsigned bytes = count * 8;
1016
1017 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1018 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1019 ib->ptr[ib->length_dw++] = bytes - 1;
1020 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1021 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1022 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1023 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1024 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1025
1026}
1027
1028/**
1029 * sdma_v5_0_vm_write_pte - update PTEs by writing them manually
1030 *
1031 * @ib: indirect buffer to fill with commands
1032 * @pe: addr of the page entry
1033 * @addr: dst addr to write into pe
1034 * @count: number of page entries to update
1035 * @incr: increase next addr by incr bytes
1036 * @flags: access flags
1037 *
1038 * Update PTEs by writing them manually using sDMA (NAVI10).
1039 */
1040static void sdma_v5_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1041 uint64_t value, unsigned count,
1042 uint32_t incr)
1043{
1044 unsigned ndw = count * 2;
1045
1046 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1047 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1048 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1049 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1050 ib->ptr[ib->length_dw++] = ndw - 1;
1051 for (; ndw > 0; ndw -= 2) {
1052 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1053 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1054 value += incr;
1055 }
1056}
1057
1058/**
1059 * sdma_v5_0_vm_set_pte_pde - update the page tables using sDMA
1060 *
1061 * @ib: indirect buffer to fill with commands
1062 * @pe: addr of the page entry
1063 * @addr: dst addr to write into pe
1064 * @count: number of page entries to update
1065 * @incr: increase next addr by incr bytes
1066 * @flags: access flags
1067 *
1068 * Update the page tables using sDMA (NAVI10).
1069 */
1070static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1071 uint64_t pe,
1072 uint64_t addr, unsigned count,
1073 uint32_t incr, uint64_t flags)
1074{
1075 /* for physically contiguous pages (vram) */
1076 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1077 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1078 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1079 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1080 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1081 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1082 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1083 ib->ptr[ib->length_dw++] = incr; /* increment size */
1084 ib->ptr[ib->length_dw++] = 0;
1085 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1086}
1087
1088/**
1089 * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw
1090 *
1091 * @ib: indirect buffer to fill with padding
1092 *
1093 */
1094static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1095{
1096 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1097 u32 pad_count;
1098 int i;
1099
1100 pad_count = (8 - (ib->length_dw & 0x7)) % 8;
1101 for (i = 0; i < pad_count; i++)
1102 if (sdma && sdma->burst_nop && (i == 0))
1103 ib->ptr[ib->length_dw++] =
1104 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1105 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1106 else
1107 ib->ptr[ib->length_dw++] =
1108 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1109}
1110
1111
1112/**
1113 * sdma_v5_0_ring_emit_pipeline_sync - sync the pipeline
1114 *
1115 * @ring: amdgpu_ring pointer
1116 *
1117 * Make sure all previous operations are completed (CIK).
1118 */
1119static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1120{
1121 uint32_t seq = ring->fence_drv.sync_seq;
1122 uint64_t addr = ring->fence_drv.gpu_addr;
1123
1124 /* wait for idle */
1125 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1126 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1127 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1128 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1129 amdgpu_ring_write(ring, addr & 0xfffffffc);
1130 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1131 amdgpu_ring_write(ring, seq); /* reference */
1132 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1133 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1134 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1135}
1136
1137
1138/**
1139 * sdma_v5_0_ring_emit_vm_flush - vm flush using sDMA
1140 *
1141 * @ring: amdgpu_ring pointer
1142 * @vm: amdgpu_vm pointer
1143 *
1144 * Update the page table base and flush the VM TLB
1145 * using sDMA (NAVI10).
1146 */
1147static void sdma_v5_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1148 unsigned vmid, uint64_t pd_addr)
1149{
1150 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1151}
1152
1153static void sdma_v5_0_ring_emit_wreg(struct amdgpu_ring *ring,
1154 uint32_t reg, uint32_t val)
1155{
1156 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1157 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1158 amdgpu_ring_write(ring, reg);
1159 amdgpu_ring_write(ring, val);
1160}
1161
1162static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1163 uint32_t val, uint32_t mask)
1164{
1165 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1166 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1167 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1168 amdgpu_ring_write(ring, reg << 2);
1169 amdgpu_ring_write(ring, 0);
1170 amdgpu_ring_write(ring, val); /* reference */
1171 amdgpu_ring_write(ring, mask); /* mask */
1172 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1173 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1174}
1175
1176static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1177 uint32_t reg0, uint32_t reg1,
1178 uint32_t ref, uint32_t mask)
1179{
1180 amdgpu_ring_emit_wreg(ring, reg0, ref);
1181 /* wait for a cycle to reset vm_inv_eng*_ack */
1182 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1183 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1184}
1185
1186static int sdma_v5_0_early_init(void *handle)
1187{
1188 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1189
1190 adev->sdma.num_instances = 2;
1191
1192 sdma_v5_0_set_ring_funcs(adev);
1193 sdma_v5_0_set_buffer_funcs(adev);
1194 sdma_v5_0_set_vm_pte_funcs(adev);
1195 sdma_v5_0_set_irq_funcs(adev);
1196
1197 return 0;
1198}
1199
1200
1201static int sdma_v5_0_sw_init(void *handle)
1202{
1203 struct amdgpu_ring *ring;
1204 int r, i;
1205 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1206
1207 /* SDMA trap event */
1208 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0,
1209 SDMA0_5_0__SRCID__SDMA_TRAP,
1210 &adev->sdma.trap_irq);
1211 if (r)
1212 return r;
1213
1214 /* SDMA trap event */
1215 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1,
1216 SDMA1_5_0__SRCID__SDMA_TRAP,
1217 &adev->sdma.trap_irq);
1218 if (r)
1219 return r;
1220
1221 r = sdma_v5_0_init_microcode(adev);
1222 if (r) {
1223 DRM_ERROR("Failed to load sdma firmware!\n");
1224 return r;
1225 }
1226
1227 for (i = 0; i < adev->sdma.num_instances; i++) {
1228 ring = &adev->sdma.instance[i].ring;
1229 ring->ring_obj = NULL;
1230 ring->use_doorbell = true;
1231
1232 DRM_INFO("use_doorbell being set to: [%s]\n",
1233 ring->use_doorbell?"true":"false");
1234
1235 ring->doorbell_index = (i == 0) ?
1236 (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset
1237 : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
1238
1239 sprintf(ring->name, "sdma%d", i);
1240 r = amdgpu_ring_init(adev, ring, 1024,
1241 &adev->sdma.trap_irq,
1242 (i == 0) ?
1243 AMDGPU_SDMA_IRQ_INSTANCE0 :
1244 AMDGPU_SDMA_IRQ_INSTANCE1);
1245 if (r)
1246 return r;
1247 }
1248
1249 return r;
1250}
1251
1252static int sdma_v5_0_sw_fini(void *handle)
1253{
1254 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1255 int i;
1256
1257 for (i = 0; i < adev->sdma.num_instances; i++)
1258 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1259
1260 return 0;
1261}
1262
1263static int sdma_v5_0_hw_init(void *handle)
1264{
1265 int r;
1266 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1267
1268 sdma_v5_0_init_golden_registers(adev);
1269
1270 r = sdma_v5_0_start(adev);
1271
1272 return r;
1273}
1274
1275static int sdma_v5_0_hw_fini(void *handle)
1276{
1277 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1278
1279 if (amdgpu_sriov_vf(adev))
1280 return 0;
1281
1282 sdma_v5_0_ctx_switch_enable(adev, false);
1283 sdma_v5_0_enable(adev, false);
1284
1285 return 0;
1286}
1287
1288static int sdma_v5_0_suspend(void *handle)
1289{
1290 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1291
1292 return sdma_v5_0_hw_fini(adev);
1293}
1294
1295static int sdma_v5_0_resume(void *handle)
1296{
1297 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1298
1299 return sdma_v5_0_hw_init(adev);
1300}
1301
1302static bool sdma_v5_0_is_idle(void *handle)
1303{
1304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1305 u32 i;
1306
1307 for (i = 0; i < adev->sdma.num_instances; i++) {
1308 u32 tmp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
1309
1310 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1311 return false;
1312 }
1313
1314 return true;
1315}
1316
1317static int sdma_v5_0_wait_for_idle(void *handle)
1318{
1319 unsigned i;
1320 u32 sdma0, sdma1;
1321 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1322
1323 for (i = 0; i < adev->usec_timeout; i++) {
1324 sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
1325 sdma1 = RREG32(sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
1326
1327 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1328 return 0;
1329 udelay(1);
1330 }
1331 return -ETIMEDOUT;
1332}
1333
1334static int sdma_v5_0_soft_reset(void *handle)
1335{
1336 /* todo */
1337
1338 return 0;
1339}
1340
1341static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
1342{
1343 int i, r = 0;
1344 struct amdgpu_device *adev = ring->adev;
1345 u32 index = 0;
1346 u64 sdma_gfx_preempt;
1347
1348 amdgpu_sdma_get_index_from_ring(ring, &index);
1349 if (index == 0)
1350 sdma_gfx_preempt = mmSDMA0_GFX_PREEMPT;
1351 else
1352 sdma_gfx_preempt = mmSDMA1_GFX_PREEMPT;
1353
1354 /* assert preemption condition */
1355 amdgpu_ring_set_preempt_cond_exec(ring, false);
1356
1357 /* emit the trailing fence */
1358 ring->trail_seq += 1;
1359 amdgpu_ring_alloc(ring, 10);
1360 sdma_v5_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1361 ring->trail_seq, 0);
1362 amdgpu_ring_commit(ring);
1363
1364 /* assert IB preemption */
1365 WREG32(sdma_gfx_preempt, 1);
1366
1367 /* poll the trailing fence */
1368 for (i = 0; i < adev->usec_timeout; i++) {
1369 if (ring->trail_seq ==
1370 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1371 break;
1372 udelay(1);
1373 }
1374
1375 if (i >= adev->usec_timeout) {
1376 r = -EINVAL;
1377 DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1378 }
1379
1380 /* deassert IB preemption */
1381 WREG32(sdma_gfx_preempt, 0);
1382
1383 /* deassert the preemption condition */
1384 amdgpu_ring_set_preempt_cond_exec(ring, true);
1385 return r;
1386}
1387
1388static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
1389 struct amdgpu_irq_src *source,
1390 unsigned type,
1391 enum amdgpu_interrupt_state state)
1392{
1393 u32 sdma_cntl;
1394
1395 u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
1396 sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
1397 sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
1398
1399 sdma_cntl = RREG32(reg_offset);
1400 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1401 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1402 WREG32(reg_offset, sdma_cntl);
1403
1404 return 0;
1405}
1406
1407static int sdma_v5_0_process_trap_irq(struct amdgpu_device *adev,
1408 struct amdgpu_irq_src *source,
1409 struct amdgpu_iv_entry *entry)
1410{
1411 DRM_DEBUG("IH: SDMA trap\n");
1412 switch (entry->client_id) {
1413 case SOC15_IH_CLIENTID_SDMA0:
1414 switch (entry->ring_id) {
1415 case 0:
1416 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1417 break;
1418 case 1:
1419 /* XXX compute */
1420 break;
1421 case 2:
1422 /* XXX compute */
1423 break;
1424 case 3:
1425 /* XXX page queue*/
1426 break;
1427 }
1428 break;
1429 case SOC15_IH_CLIENTID_SDMA1:
1430 switch (entry->ring_id) {
1431 case 0:
1432 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1433 break;
1434 case 1:
1435 /* XXX compute */
1436 break;
1437 case 2:
1438 /* XXX compute */
1439 break;
1440 case 3:
1441 /* XXX page queue*/
1442 break;
1443 }
1444 break;
1445 }
1446 return 0;
1447}
1448
1449static int sdma_v5_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1450 struct amdgpu_irq_src *source,
1451 struct amdgpu_iv_entry *entry)
1452{
1453 return 0;
1454}
1455
1456static void sdma_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
1457 bool enable)
1458{
1459 uint32_t data, def;
1460 int i;
1461
1462 for (i = 0; i < adev->sdma.num_instances; i++) {
1463 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1464 /* Enable sdma clock gating */
1465 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1466 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1467 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1468 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1469 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1470 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1471 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1472 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1473 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1474 if (def != data)
1475 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1476 } else {
1477 /* Disable sdma clock gating */
1478 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1479 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1480 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1481 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1482 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1483 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1484 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1485 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1486 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1487 if (def != data)
1488 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1489 }
1490 }
1491}
1492
1493static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
1494 bool enable)
1495{
1496 uint32_t data, def;
1497 int i;
1498
1499 for (i = 0; i < adev->sdma.num_instances; i++) {
1500 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1501 /* Enable sdma mem light sleep */
1502 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1503 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1504 if (def != data)
1505 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1506
1507 } else {
1508 /* Disable sdma mem light sleep */
1509 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1510 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1511 if (def != data)
1512 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1513
1514 }
1515 }
1516}
1517
1518static int sdma_v5_0_set_clockgating_state(void *handle,
1519 enum amd_clockgating_state state)
1520{
1521 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1522
1523 if (amdgpu_sriov_vf(adev))
1524 return 0;
1525
1526 switch (adev->asic_type) {
1527 case CHIP_NAVI10:
1528 case CHIP_NAVI14:
1529 case CHIP_NAVI12:
1530 sdma_v5_0_update_medium_grain_clock_gating(adev,
1531 state == AMD_CG_STATE_GATE ? true : false);
1532 sdma_v5_0_update_medium_grain_light_sleep(adev,
1533 state == AMD_CG_STATE_GATE ? true : false);
1534 break;
1535 default:
1536 break;
1537 }
1538
1539 return 0;
1540}
1541
1542static int sdma_v5_0_set_powergating_state(void *handle,
1543 enum amd_powergating_state state)
1544{
1545 return 0;
1546}
1547
1548static void sdma_v5_0_get_clockgating_state(void *handle, u32 *flags)
1549{
1550 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1551 int data;
1552
1553 if (amdgpu_sriov_vf(adev))
1554 *flags = 0;
1555
1556 /* AMD_CG_SUPPORT_SDMA_MGCG */
1557 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL));
1558 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
1559 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1560
1561 /* AMD_CG_SUPPORT_SDMA_LS */
1562 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL));
1563 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1564 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1565}
1566
1567const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
1568 .name = "sdma_v5_0",
1569 .early_init = sdma_v5_0_early_init,
1570 .late_init = NULL,
1571 .sw_init = sdma_v5_0_sw_init,
1572 .sw_fini = sdma_v5_0_sw_fini,
1573 .hw_init = sdma_v5_0_hw_init,
1574 .hw_fini = sdma_v5_0_hw_fini,
1575 .suspend = sdma_v5_0_suspend,
1576 .resume = sdma_v5_0_resume,
1577 .is_idle = sdma_v5_0_is_idle,
1578 .wait_for_idle = sdma_v5_0_wait_for_idle,
1579 .soft_reset = sdma_v5_0_soft_reset,
1580 .set_clockgating_state = sdma_v5_0_set_clockgating_state,
1581 .set_powergating_state = sdma_v5_0_set_powergating_state,
1582 .get_clockgating_state = sdma_v5_0_get_clockgating_state,
1583};
1584
1585static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
1586 .type = AMDGPU_RING_TYPE_SDMA,
1587 .align_mask = 0xf,
1588 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1589 .support_64bit_ptrs = true,
1590 .vmhub = AMDGPU_GFXHUB_0,
1591 .get_rptr = sdma_v5_0_ring_get_rptr,
1592 .get_wptr = sdma_v5_0_ring_get_wptr,
1593 .set_wptr = sdma_v5_0_ring_set_wptr,
1594 .emit_frame_size =
1595 5 + /* sdma_v5_0_ring_init_cond_exec */
1596 6 + /* sdma_v5_0_ring_emit_hdp_flush */
1597 3 + /* hdp_invalidate */
1598 6 + /* sdma_v5_0_ring_emit_pipeline_sync */
1599 /* sdma_v5_0_ring_emit_vm_flush */
1600 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1601 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
1602 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
1603 .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
1604 .emit_ib = sdma_v5_0_ring_emit_ib,
1605 .emit_fence = sdma_v5_0_ring_emit_fence,
1606 .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
1607 .emit_vm_flush = sdma_v5_0_ring_emit_vm_flush,
1608 .emit_hdp_flush = sdma_v5_0_ring_emit_hdp_flush,
1609 .test_ring = sdma_v5_0_ring_test_ring,
1610 .test_ib = sdma_v5_0_ring_test_ib,
1611 .insert_nop = sdma_v5_0_ring_insert_nop,
1612 .pad_ib = sdma_v5_0_ring_pad_ib,
1613 .emit_wreg = sdma_v5_0_ring_emit_wreg,
1614 .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
1615 .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
1616 .init_cond_exec = sdma_v5_0_ring_init_cond_exec,
1617 .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
1618 .preempt_ib = sdma_v5_0_ring_preempt_ib,
1619};
1620
1621static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev)
1622{
1623 int i;
1624
1625 for (i = 0; i < adev->sdma.num_instances; i++) {
1626 adev->sdma.instance[i].ring.funcs = &sdma_v5_0_ring_funcs;
1627 adev->sdma.instance[i].ring.me = i;
1628 }
1629}
1630
1631static const struct amdgpu_irq_src_funcs sdma_v5_0_trap_irq_funcs = {
1632 .set = sdma_v5_0_set_trap_irq_state,
1633 .process = sdma_v5_0_process_trap_irq,
1634};
1635
1636static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = {
1637 .process = sdma_v5_0_process_illegal_inst_irq,
1638};
1639
1640static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
1641{
1642 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
1643 adev->sdma.num_instances;
1644 adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs;
1645 adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs;
1646}
1647
1648/**
1649 * sdma_v5_0_emit_copy_buffer - copy buffer using the sDMA engine
1650 *
1651 * @ring: amdgpu_ring structure holding ring information
1652 * @src_offset: src GPU address
1653 * @dst_offset: dst GPU address
1654 * @byte_count: number of bytes to xfer
1655 *
1656 * Copy GPU buffers using the DMA engine (NAVI10).
1657 * Used by the amdgpu ttm implementation to move pages if
1658 * registered as the asic copy callback.
1659 */
1660static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
1661 uint64_t src_offset,
1662 uint64_t dst_offset,
1663 uint32_t byte_count)
1664{
1665 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1666 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1667 ib->ptr[ib->length_dw++] = byte_count - 1;
1668 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1669 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1670 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1671 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1672 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1673}
1674
1675/**
1676 * sdma_v5_0_emit_fill_buffer - fill buffer using the sDMA engine
1677 *
1678 * @ring: amdgpu_ring structure holding ring information
1679 * @src_data: value to write to buffer
1680 * @dst_offset: dst GPU address
1681 * @byte_count: number of bytes to xfer
1682 *
1683 * Fill GPU buffers using the DMA engine (NAVI10).
1684 */
1685static void sdma_v5_0_emit_fill_buffer(struct amdgpu_ib *ib,
1686 uint32_t src_data,
1687 uint64_t dst_offset,
1688 uint32_t byte_count)
1689{
1690 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1691 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1692 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1693 ib->ptr[ib->length_dw++] = src_data;
1694 ib->ptr[ib->length_dw++] = byte_count - 1;
1695}
1696
1697static const struct amdgpu_buffer_funcs sdma_v5_0_buffer_funcs = {
1698 .copy_max_bytes = 0x400000,
1699 .copy_num_dw = 7,
1700 .emit_copy_buffer = sdma_v5_0_emit_copy_buffer,
1701
1702 .fill_max_bytes = 0x400000,
1703 .fill_num_dw = 5,
1704 .emit_fill_buffer = sdma_v5_0_emit_fill_buffer,
1705};
1706
1707static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev)
1708{
1709 if (adev->mman.buffer_funcs == NULL) {
1710 adev->mman.buffer_funcs = &sdma_v5_0_buffer_funcs;
1711 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1712 }
1713}
1714
1715static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
1716 .copy_pte_num_dw = 7,
1717 .copy_pte = sdma_v5_0_vm_copy_pte,
1718 .write_pte = sdma_v5_0_vm_write_pte,
1719 .set_pte_pde = sdma_v5_0_vm_set_pte_pde,
1720};
1721
1722static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1723{
1724 struct drm_gpu_scheduler *sched;
1725 unsigned i;
1726
1727 if (adev->vm_manager.vm_pte_funcs == NULL) {
1728 adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
1729 for (i = 0; i < adev->sdma.num_instances; i++) {
1730 sched = &adev->sdma.instance[i].ring.sched;
1731 adev->vm_manager.vm_pte_rqs[i] =
1732 &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
1733 }
1734 adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
1735 }
1736}
1737
1738const struct amdgpu_ip_block_version sdma_v5_0_ip_block = {
1739 .type = AMD_IP_BLOCK_TYPE_SDMA,
1740 .major = 5,
1741 .minor = 0,
1742 .rev = 0,
1743 .funcs = &sdma_v5_0_ip_funcs,
1744};
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/firmware.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ucode.h"
31#include "amdgpu_trace.h"
32
33#include "gc/gc_10_1_0_offset.h"
34#include "gc/gc_10_1_0_sh_mask.h"
35#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
36#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
37
38#include "soc15_common.h"
39#include "soc15.h"
40#include "navi10_sdma_pkt_open.h"
41#include "nbio_v2_3.h"
42#include "sdma_common.h"
43#include "sdma_v5_0.h"
44
45MODULE_FIRMWARE("amdgpu/navi10_sdma.bin");
46MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin");
47
48MODULE_FIRMWARE("amdgpu/navi14_sdma.bin");
49MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin");
50
51MODULE_FIRMWARE("amdgpu/navi12_sdma.bin");
52MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin");
53
54MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma.bin");
55MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma1.bin");
56
57#define SDMA1_REG_OFFSET 0x600
58#define SDMA0_HYP_DEC_REG_START 0x5880
59#define SDMA0_HYP_DEC_REG_END 0x5893
60#define SDMA1_HYP_DEC_REG_OFFSET 0x20
61
62static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
63static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
64static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
65static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
66
67static const struct soc15_reg_golden golden_settings_sdma_5[] = {
68 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
69 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
70 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
71 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
72 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
73 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
74 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
75 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
76 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
77 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
78 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
79 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x00ffffff, 0x000c5c00),
80 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
81 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
82 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
83 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
84 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
85 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
86 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
87 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
88 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
89 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
90 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
91 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
92};
93
94static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = {
95 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
96 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
97 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
99 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
100 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
101 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
110 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
111 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
112 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
115};
116
117static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
120};
121
122static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
124 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
125};
126
127static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
128 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
130 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
134};
135
136static const struct soc15_reg_golden golden_settings_sdma_cyan_skillfish[] = {
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
142 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
143 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
144 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
145 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
146 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
147 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
148 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
149 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x007fffff, 0x004c5c00),
151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
152 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
153 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
157 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
161 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
162 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
163 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
164 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x007fffff, 0x004c5c00)
165};
166
167static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
168{
169 u32 base;
170
171 if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
172 internal_offset <= SDMA0_HYP_DEC_REG_END) {
173 base = adev->reg_offset[GC_HWIP][0][1];
174 if (instance == 1)
175 internal_offset += SDMA1_HYP_DEC_REG_OFFSET;
176 } else {
177 base = adev->reg_offset[GC_HWIP][0][0];
178 if (instance == 1)
179 internal_offset += SDMA1_REG_OFFSET;
180 }
181
182 return base + internal_offset;
183}
184
185static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
186{
187 switch (adev->ip_versions[SDMA0_HWIP][0]) {
188 case IP_VERSION(5, 0, 0):
189 soc15_program_register_sequence(adev,
190 golden_settings_sdma_5,
191 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
192 soc15_program_register_sequence(adev,
193 golden_settings_sdma_nv10,
194 (const u32)ARRAY_SIZE(golden_settings_sdma_nv10));
195 break;
196 case IP_VERSION(5, 0, 2):
197 soc15_program_register_sequence(adev,
198 golden_settings_sdma_5,
199 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
200 soc15_program_register_sequence(adev,
201 golden_settings_sdma_nv14,
202 (const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
203 break;
204 case IP_VERSION(5, 0, 5):
205 if (amdgpu_sriov_vf(adev))
206 soc15_program_register_sequence(adev,
207 golden_settings_sdma_5_sriov,
208 (const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov));
209 else
210 soc15_program_register_sequence(adev,
211 golden_settings_sdma_5,
212 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
213 soc15_program_register_sequence(adev,
214 golden_settings_sdma_nv12,
215 (const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
216 break;
217 case IP_VERSION(5, 0, 1):
218 soc15_program_register_sequence(adev,
219 golden_settings_sdma_cyan_skillfish,
220 (const u32)ARRAY_SIZE(golden_settings_sdma_cyan_skillfish));
221 break;
222 default:
223 break;
224 }
225}
226
227/**
228 * sdma_v5_0_init_microcode - load ucode images from disk
229 *
230 * @adev: amdgpu_device pointer
231 *
232 * Use the firmware interface to load the ucode images into
233 * the driver (not loaded into hw).
234 * Returns 0 on success, error on failure.
235 */
236
237// emulation only, won't work on real chip
238// navi10 real chip need to use PSP to load firmware
239static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
240{
241 const char *chip_name;
242 char fw_name[40];
243 int ret, i;
244
245 if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5)))
246 return 0;
247
248 DRM_DEBUG("\n");
249
250 switch (adev->ip_versions[SDMA0_HWIP][0]) {
251 case IP_VERSION(5, 0, 0):
252 chip_name = "navi10";
253 break;
254 case IP_VERSION(5, 0, 2):
255 chip_name = "navi14";
256 break;
257 case IP_VERSION(5, 0, 5):
258 chip_name = "navi12";
259 break;
260 case IP_VERSION(5, 0, 1):
261 chip_name = "cyan_skillfish2";
262 break;
263 default:
264 BUG();
265 }
266
267 for (i = 0; i < adev->sdma.num_instances; i++) {
268 if (i == 0)
269 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
270 else
271 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
272 ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
273 if (ret)
274 return ret;
275 }
276
277 return ret;
278}
279
280static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring)
281{
282 unsigned ret;
283
284 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
285 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
286 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
287 amdgpu_ring_write(ring, 1);
288 ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
289 amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
290
291 return ret;
292}
293
294static void sdma_v5_0_ring_patch_cond_exec(struct amdgpu_ring *ring,
295 unsigned offset)
296{
297 unsigned cur;
298
299 BUG_ON(offset > ring->buf_mask);
300 BUG_ON(ring->ring[offset] != 0x55aa55aa);
301
302 cur = (ring->wptr - 1) & ring->buf_mask;
303 if (cur > offset)
304 ring->ring[offset] = cur - offset;
305 else
306 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
307}
308
309/**
310 * sdma_v5_0_ring_get_rptr - get the current read pointer
311 *
312 * @ring: amdgpu ring pointer
313 *
314 * Get the current rptr from the hardware (NAVI10+).
315 */
316static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
317{
318 u64 *rptr;
319
320 /* XXX check if swapping is necessary on BE */
321 rptr = (u64 *)ring->rptr_cpu_addr;
322
323 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
324 return ((*rptr) >> 2);
325}
326
327/**
328 * sdma_v5_0_ring_get_wptr - get the current write pointer
329 *
330 * @ring: amdgpu ring pointer
331 *
332 * Get the current wptr from the hardware (NAVI10+).
333 */
334static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
335{
336 struct amdgpu_device *adev = ring->adev;
337 u64 wptr;
338
339 if (ring->use_doorbell) {
340 /* XXX check if swapping is necessary on BE */
341 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
342 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
343 } else {
344 wptr = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
345 wptr = wptr << 32;
346 wptr |= RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
347 DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
348 }
349
350 return wptr >> 2;
351}
352
353/**
354 * sdma_v5_0_ring_set_wptr - commit the write pointer
355 *
356 * @ring: amdgpu ring pointer
357 *
358 * Write the wptr back to the hardware (NAVI10+).
359 */
360static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
361{
362 struct amdgpu_device *adev = ring->adev;
363 uint32_t *wptr_saved;
364 uint32_t *is_queue_unmap;
365 uint64_t aggregated_db_index;
366 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
367
368 DRM_DEBUG("Setting write pointer\n");
369 if (ring->is_mes_queue) {
370 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
371 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
372 sizeof(uint32_t));
373 aggregated_db_index =
374 amdgpu_mes_get_aggregated_doorbell_index(adev,
375 AMDGPU_MES_PRIORITY_LEVEL_NORMAL);
376
377 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
378 ring->wptr << 2);
379 *wptr_saved = ring->wptr << 2;
380 if (*is_queue_unmap) {
381 WDOORBELL64(aggregated_db_index, ring->wptr << 2);
382 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
383 ring->doorbell_index, ring->wptr << 2);
384 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
385 } else {
386 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
387 ring->doorbell_index, ring->wptr << 2);
388 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
389
390 if (*is_queue_unmap)
391 WDOORBELL64(aggregated_db_index,
392 ring->wptr << 2);
393 }
394 } else {
395 if (ring->use_doorbell) {
396 DRM_DEBUG("Using doorbell -- "
397 "wptr_offs == 0x%08x "
398 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
399 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
400 ring->wptr_offs,
401 lower_32_bits(ring->wptr << 2),
402 upper_32_bits(ring->wptr << 2));
403 /* XXX check if swapping is necessary on BE */
404 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
405 ring->wptr << 2);
406 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
407 ring->doorbell_index, ring->wptr << 2);
408 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
409 } else {
410 DRM_DEBUG("Not using doorbell -- "
411 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
412 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
413 ring->me,
414 lower_32_bits(ring->wptr << 2),
415 ring->me,
416 upper_32_bits(ring->wptr << 2));
417 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
418 ring->me, mmSDMA0_GFX_RB_WPTR),
419 lower_32_bits(ring->wptr << 2));
420 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
421 ring->me, mmSDMA0_GFX_RB_WPTR_HI),
422 upper_32_bits(ring->wptr << 2));
423 }
424 }
425}
426
427static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
428{
429 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
430 int i;
431
432 for (i = 0; i < count; i++)
433 if (sdma && sdma->burst_nop && (i == 0))
434 amdgpu_ring_write(ring, ring->funcs->nop |
435 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
436 else
437 amdgpu_ring_write(ring, ring->funcs->nop);
438}
439
440/**
441 * sdma_v5_0_ring_emit_ib - Schedule an IB on the DMA engine
442 *
443 * @ring: amdgpu ring pointer
444 * @job: job to retrieve vmid from
445 * @ib: IB object to schedule
446 * @flags: unused
447 *
448 * Schedule an IB in the DMA ring (NAVI10).
449 */
450static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
451 struct amdgpu_job *job,
452 struct amdgpu_ib *ib,
453 uint32_t flags)
454{
455 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
456 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
457
458 /* An IB packet must end on a 8 DW boundary--the next dword
459 * must be on a 8-dword boundary. Our IB packet below is 6
460 * dwords long, thus add x number of NOPs, such that, in
461 * modular arithmetic,
462 * wptr + 6 + x = 8k, k >= 0, which in C is,
463 * (wptr + 6 + x) % 8 = 0.
464 * The expression below, is a solution of x.
465 */
466 sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
467
468 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
469 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
470 /* base must be 32 byte aligned */
471 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
472 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
473 amdgpu_ring_write(ring, ib->length_dw);
474 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
475 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
476}
477
478/**
479 * sdma_v5_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
480 *
481 * @ring: amdgpu ring pointer
482 *
483 * flush the IB by graphics cache rinse.
484 */
485static void sdma_v5_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
486{
487 uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
488 SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
489 SDMA_GCR_GLI_INV(1);
490
491 /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
492 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
493 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
494 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
495 SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
496 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
497 SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
498 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
499 SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
500}
501
502/**
503 * sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
504 *
505 * @ring: amdgpu ring pointer
506 *
507 * Emit an hdp flush packet on the requested DMA ring.
508 */
509static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
510{
511 struct amdgpu_device *adev = ring->adev;
512 u32 ref_and_mask = 0;
513 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
514
515 if (ring->me == 0)
516 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
517 else
518 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
519
520 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
521 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
522 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
523 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
524 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
525 amdgpu_ring_write(ring, ref_and_mask); /* reference */
526 amdgpu_ring_write(ring, ref_and_mask); /* mask */
527 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
528 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
529}
530
531/**
532 * sdma_v5_0_ring_emit_fence - emit a fence on the DMA ring
533 *
534 * @ring: amdgpu ring pointer
535 * @addr: address
536 * @seq: sequence number
537 * @flags: fence related flags
538 *
539 * Add a DMA fence packet to the ring to write
540 * the fence seq number and DMA trap packet to generate
541 * an interrupt if needed (NAVI10).
542 */
543static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
544 unsigned flags)
545{
546 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
547 /* write the fence */
548 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
549 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
550 /* zero in first two bits */
551 BUG_ON(addr & 0x3);
552 amdgpu_ring_write(ring, lower_32_bits(addr));
553 amdgpu_ring_write(ring, upper_32_bits(addr));
554 amdgpu_ring_write(ring, lower_32_bits(seq));
555
556 /* optionally write high bits as well */
557 if (write64bit) {
558 addr += 4;
559 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
560 SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
561 /* zero in first two bits */
562 BUG_ON(addr & 0x3);
563 amdgpu_ring_write(ring, lower_32_bits(addr));
564 amdgpu_ring_write(ring, upper_32_bits(addr));
565 amdgpu_ring_write(ring, upper_32_bits(seq));
566 }
567
568 if (flags & AMDGPU_FENCE_FLAG_INT) {
569 uint32_t ctx = ring->is_mes_queue ?
570 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
571 /* generate an interrupt */
572 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
573 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
574 }
575}
576
577
578/**
579 * sdma_v5_0_gfx_stop - stop the gfx async dma engines
580 *
581 * @adev: amdgpu_device pointer
582 *
583 * Stop the gfx async dma ring buffers (NAVI10).
584 */
585static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
586{
587 u32 rb_cntl, ib_cntl;
588 int i;
589
590 amdgpu_sdma_unset_buffer_funcs_helper(adev);
591
592 for (i = 0; i < adev->sdma.num_instances; i++) {
593 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
594 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
595 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
596 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
597 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
598 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
599 }
600}
601
602/**
603 * sdma_v5_0_rlc_stop - stop the compute async dma engines
604 *
605 * @adev: amdgpu_device pointer
606 *
607 * Stop the compute async dma queues (NAVI10).
608 */
609static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
610{
611 /* XXX todo */
612}
613
614/**
615 * sdma_v5_0_ctx_switch_enable - stop the async dma engines context switch
616 *
617 * @adev: amdgpu_device pointer
618 * @enable: enable/disable the DMA MEs context switch.
619 *
620 * Halt or unhalt the async dma engines context switch (NAVI10).
621 */
622static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
623{
624 u32 f32_cntl = 0, phase_quantum = 0;
625 int i;
626
627 if (amdgpu_sdma_phase_quantum) {
628 unsigned value = amdgpu_sdma_phase_quantum;
629 unsigned unit = 0;
630
631 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
632 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
633 value = (value + 1) >> 1;
634 unit++;
635 }
636 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
637 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
638 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
639 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
640 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
641 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
642 WARN_ONCE(1,
643 "clamping sdma_phase_quantum to %uK clock cycles\n",
644 value << unit);
645 }
646 phase_quantum =
647 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
648 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
649 }
650
651 for (i = 0; i < adev->sdma.num_instances; i++) {
652 if (!amdgpu_sriov_vf(adev)) {
653 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
654 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
655 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
656 }
657
658 if (enable && amdgpu_sdma_phase_quantum) {
659 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
660 phase_quantum);
661 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
662 phase_quantum);
663 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
664 phase_quantum);
665 }
666 if (!amdgpu_sriov_vf(adev))
667 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
668 }
669
670}
671
672/**
673 * sdma_v5_0_enable - stop the async dma engines
674 *
675 * @adev: amdgpu_device pointer
676 * @enable: enable/disable the DMA MEs.
677 *
678 * Halt or unhalt the async dma engines (NAVI10).
679 */
680static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
681{
682 u32 f32_cntl;
683 int i;
684
685 if (!enable) {
686 sdma_v5_0_gfx_stop(adev);
687 sdma_v5_0_rlc_stop(adev);
688 }
689
690 if (amdgpu_sriov_vf(adev))
691 return;
692
693 for (i = 0; i < adev->sdma.num_instances; i++) {
694 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
695 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
696 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
697 }
698}
699
700/**
701 * sdma_v5_0_gfx_resume - setup and start the async dma engines
702 *
703 * @adev: amdgpu_device pointer
704 *
705 * Set up the gfx DMA ring buffers and enable them (NAVI10).
706 * Returns 0 for success, error for failure.
707 */
708static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
709{
710 struct amdgpu_ring *ring;
711 u32 rb_cntl, ib_cntl;
712 u32 rb_bufsz;
713 u32 doorbell;
714 u32 doorbell_offset;
715 u32 temp;
716 u32 wptr_poll_cntl;
717 u64 wptr_gpu_addr;
718 int i, r;
719
720 for (i = 0; i < adev->sdma.num_instances; i++) {
721 ring = &adev->sdma.instance[i].ring;
722
723 if (!amdgpu_sriov_vf(adev))
724 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
725
726 /* Set ring buffer size in dwords */
727 rb_bufsz = order_base_2(ring->ring_size / 4);
728 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
729 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
730#ifdef __BIG_ENDIAN
731 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
732 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
733 RPTR_WRITEBACK_SWAP_ENABLE, 1);
734#endif
735 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
736
737 /* Initialize the ring buffer's read and write pointers */
738 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
739 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
740 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
741 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
742
743 /* setup the wptr shadow polling */
744 wptr_gpu_addr = ring->wptr_gpu_addr;
745 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
746 lower_32_bits(wptr_gpu_addr));
747 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
748 upper_32_bits(wptr_gpu_addr));
749 wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
750 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
751 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
752 SDMA0_GFX_RB_WPTR_POLL_CNTL,
753 F32_POLL_ENABLE, 1);
754 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
755 wptr_poll_cntl);
756
757 /* set the wb address whether it's enabled or not */
758 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
759 upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
760 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
761 lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
762
763 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
764
765 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE),
766 ring->gpu_addr >> 8);
767 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI),
768 ring->gpu_addr >> 40);
769
770 ring->wptr = 0;
771
772 /* before programing wptr to a less value, need set minor_ptr_update first */
773 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
774
775 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
776 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
777 lower_32_bits(ring->wptr << 2));
778 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
779 upper_32_bits(ring->wptr << 2));
780 }
781
782 doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
783 doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
784 mmSDMA0_GFX_DOORBELL_OFFSET));
785
786 if (ring->use_doorbell) {
787 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
788 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
789 OFFSET, ring->doorbell_index);
790 } else {
791 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
792 }
793 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
794 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET),
795 doorbell_offset);
796
797 adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
798 ring->doorbell_index, 20);
799
800 if (amdgpu_sriov_vf(adev))
801 sdma_v5_0_ring_set_wptr(ring);
802
803 /* set minor_ptr_update to 0 after wptr programed */
804 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
805
806 if (!amdgpu_sriov_vf(adev)) {
807 /* set utc l1 enable flag always to 1 */
808 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
809 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
810
811 /* enable MCBP */
812 temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
813 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
814
815 /* Set up RESP_MODE to non-copy addresses */
816 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
817 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
818 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
819 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
820
821 /* program default cache read and write policy */
822 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
823 /* clean read policy and write policy bits */
824 temp &= 0xFF0FFF;
825 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
826 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
827 }
828
829 if (!amdgpu_sriov_vf(adev)) {
830 /* unhalt engine */
831 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
832 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
833 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
834 }
835
836 /* enable DMA RB */
837 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
838 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
839
840 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
841 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
842#ifdef __BIG_ENDIAN
843 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
844#endif
845 /* enable DMA IBs */
846 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
847
848 ring->sched.ready = true;
849
850 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
851 sdma_v5_0_ctx_switch_enable(adev, true);
852 sdma_v5_0_enable(adev, true);
853 }
854
855 r = amdgpu_ring_test_helper(ring);
856 if (r)
857 return r;
858
859 if (adev->mman.buffer_funcs_ring == ring)
860 amdgpu_ttm_set_buffer_funcs_status(adev, true);
861 }
862
863 return 0;
864}
865
866/**
867 * sdma_v5_0_rlc_resume - setup and start the async dma engines
868 *
869 * @adev: amdgpu_device pointer
870 *
871 * Set up the compute DMA queues and enable them (NAVI10).
872 * Returns 0 for success, error for failure.
873 */
874static int sdma_v5_0_rlc_resume(struct amdgpu_device *adev)
875{
876 return 0;
877}
878
879/**
880 * sdma_v5_0_load_microcode - load the sDMA ME ucode
881 *
882 * @adev: amdgpu_device pointer
883 *
884 * Loads the sDMA0/1 ucode.
885 * Returns 0 for success, -EINVAL if the ucode is not available.
886 */
887static int sdma_v5_0_load_microcode(struct amdgpu_device *adev)
888{
889 const struct sdma_firmware_header_v1_0 *hdr;
890 const __le32 *fw_data;
891 u32 fw_size;
892 int i, j;
893
894 /* halt the MEs */
895 sdma_v5_0_enable(adev, false);
896
897 for (i = 0; i < adev->sdma.num_instances; i++) {
898 if (!adev->sdma.instance[i].fw)
899 return -EINVAL;
900
901 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
902 amdgpu_ucode_print_sdma_hdr(&hdr->header);
903 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
904
905 fw_data = (const __le32 *)
906 (adev->sdma.instance[i].fw->data +
907 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
908
909 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
910
911 for (j = 0; j < fw_size; j++) {
912 if (amdgpu_emu_mode == 1 && j % 500 == 0)
913 msleep(1);
914 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
915 }
916
917 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
918 }
919
920 return 0;
921}
922
923/**
924 * sdma_v5_0_start - setup and start the async dma engines
925 *
926 * @adev: amdgpu_device pointer
927 *
928 * Set up the DMA engines and enable them (NAVI10).
929 * Returns 0 for success, error for failure.
930 */
931static int sdma_v5_0_start(struct amdgpu_device *adev)
932{
933 int r = 0;
934
935 if (amdgpu_sriov_vf(adev)) {
936 sdma_v5_0_ctx_switch_enable(adev, false);
937 sdma_v5_0_enable(adev, false);
938
939 /* set RB registers */
940 r = sdma_v5_0_gfx_resume(adev);
941 return r;
942 }
943
944 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
945 r = sdma_v5_0_load_microcode(adev);
946 if (r)
947 return r;
948 }
949
950 /* unhalt the MEs */
951 sdma_v5_0_enable(adev, true);
952 /* enable sdma ring preemption */
953 sdma_v5_0_ctx_switch_enable(adev, true);
954
955 /* start the gfx rings and rlc compute queues */
956 r = sdma_v5_0_gfx_resume(adev);
957 if (r)
958 return r;
959 r = sdma_v5_0_rlc_resume(adev);
960
961 return r;
962}
963
964static int sdma_v5_0_mqd_init(struct amdgpu_device *adev, void *mqd,
965 struct amdgpu_mqd_prop *prop)
966{
967 struct v10_sdma_mqd *m = mqd;
968 uint64_t wb_gpu_addr;
969
970 m->sdmax_rlcx_rb_cntl =
971 order_base_2(prop->queue_size / 4) << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
972 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
973 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
974 1 << SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT;
975
976 m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
977 m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
978
979 m->sdmax_rlcx_rb_wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, 0,
980 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
981
982 wb_gpu_addr = prop->wptr_gpu_addr;
983 m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr);
984 m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr);
985
986 wb_gpu_addr = prop->rptr_gpu_addr;
987 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr);
988 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr);
989
990 m->sdmax_rlcx_ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, 0,
991 mmSDMA0_GFX_IB_CNTL));
992
993 m->sdmax_rlcx_doorbell_offset =
994 prop->doorbell_index << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
995
996 m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_RLC0_DOORBELL, ENABLE, 1);
997
998 return 0;
999}
1000
1001static void sdma_v5_0_set_mqd_funcs(struct amdgpu_device *adev)
1002{
1003 adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v10_sdma_mqd);
1004 adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v5_0_mqd_init;
1005}
1006
1007/**
1008 * sdma_v5_0_ring_test_ring - simple async dma engine test
1009 *
1010 * @ring: amdgpu_ring structure holding ring information
1011 *
1012 * Test the DMA engine by writing using it to write an
1013 * value to memory. (NAVI10).
1014 * Returns 0 for success, error for failure.
1015 */
1016static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
1017{
1018 struct amdgpu_device *adev = ring->adev;
1019 unsigned i;
1020 unsigned index;
1021 int r;
1022 u32 tmp;
1023 u64 gpu_addr;
1024 volatile uint32_t *cpu_ptr = NULL;
1025
1026 tmp = 0xCAFEDEAD;
1027
1028 if (ring->is_mes_queue) {
1029 uint32_t offset = 0;
1030 offset = amdgpu_mes_ctx_get_offs(ring,
1031 AMDGPU_MES_CTX_PADDING_OFFS);
1032 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1033 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
1034 *cpu_ptr = tmp;
1035 } else {
1036 r = amdgpu_device_wb_get(adev, &index);
1037 if (r) {
1038 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
1039 return r;
1040 }
1041
1042 gpu_addr = adev->wb.gpu_addr + (index * 4);
1043 adev->wb.wb[index] = cpu_to_le32(tmp);
1044 }
1045
1046 r = amdgpu_ring_alloc(ring, 20);
1047 if (r) {
1048 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
1049 amdgpu_device_wb_free(adev, index);
1050 return r;
1051 }
1052
1053 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1054 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
1055 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
1056 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
1057 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
1058 amdgpu_ring_write(ring, 0xDEADBEEF);
1059 amdgpu_ring_commit(ring);
1060
1061 for (i = 0; i < adev->usec_timeout; i++) {
1062 if (ring->is_mes_queue)
1063 tmp = le32_to_cpu(*cpu_ptr);
1064 else
1065 tmp = le32_to_cpu(adev->wb.wb[index]);
1066 if (tmp == 0xDEADBEEF)
1067 break;
1068 if (amdgpu_emu_mode == 1)
1069 msleep(1);
1070 else
1071 udelay(1);
1072 }
1073
1074 if (i >= adev->usec_timeout)
1075 r = -ETIMEDOUT;
1076
1077 if (!ring->is_mes_queue)
1078 amdgpu_device_wb_free(adev, index);
1079
1080 return r;
1081}
1082
1083/**
1084 * sdma_v5_0_ring_test_ib - test an IB on the DMA engine
1085 *
1086 * @ring: amdgpu_ring structure holding ring information
1087 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1088 *
1089 * Test a simple IB in the DMA ring (NAVI10).
1090 * Returns 0 on success, error on failure.
1091 */
1092static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1093{
1094 struct amdgpu_device *adev = ring->adev;
1095 struct amdgpu_ib ib;
1096 struct dma_fence *f = NULL;
1097 unsigned index;
1098 long r;
1099 u32 tmp = 0;
1100 u64 gpu_addr;
1101 volatile uint32_t *cpu_ptr = NULL;
1102
1103 tmp = 0xCAFEDEAD;
1104 memset(&ib, 0, sizeof(ib));
1105
1106 if (ring->is_mes_queue) {
1107 uint32_t offset = 0;
1108 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
1109 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1110 ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
1111
1112 offset = amdgpu_mes_ctx_get_offs(ring,
1113 AMDGPU_MES_CTX_PADDING_OFFS);
1114 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1115 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
1116 *cpu_ptr = tmp;
1117 } else {
1118 r = amdgpu_device_wb_get(adev, &index);
1119 if (r) {
1120 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
1121 return r;
1122 }
1123
1124 gpu_addr = adev->wb.gpu_addr + (index * 4);
1125 adev->wb.wb[index] = cpu_to_le32(tmp);
1126
1127 r = amdgpu_ib_get(adev, NULL, 256,
1128 AMDGPU_IB_POOL_DIRECT, &ib);
1129 if (r) {
1130 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
1131 goto err0;
1132 }
1133 }
1134
1135 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1136 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1137 ib.ptr[1] = lower_32_bits(gpu_addr);
1138 ib.ptr[2] = upper_32_bits(gpu_addr);
1139 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1140 ib.ptr[4] = 0xDEADBEEF;
1141 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1142 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1143 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1144 ib.length_dw = 8;
1145
1146 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1147 if (r)
1148 goto err1;
1149
1150 r = dma_fence_wait_timeout(f, false, timeout);
1151 if (r == 0) {
1152 DRM_ERROR("amdgpu: IB test timed out\n");
1153 r = -ETIMEDOUT;
1154 goto err1;
1155 } else if (r < 0) {
1156 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1157 goto err1;
1158 }
1159
1160 if (ring->is_mes_queue)
1161 tmp = le32_to_cpu(*cpu_ptr);
1162 else
1163 tmp = le32_to_cpu(adev->wb.wb[index]);
1164
1165 if (tmp == 0xDEADBEEF)
1166 r = 0;
1167 else
1168 r = -EINVAL;
1169
1170err1:
1171 amdgpu_ib_free(adev, &ib, NULL);
1172 dma_fence_put(f);
1173err0:
1174 if (!ring->is_mes_queue)
1175 amdgpu_device_wb_free(adev, index);
1176 return r;
1177}
1178
1179
1180/**
1181 * sdma_v5_0_vm_copy_pte - update PTEs by copying them from the GART
1182 *
1183 * @ib: indirect buffer to fill with commands
1184 * @pe: addr of the page entry
1185 * @src: src addr to copy from
1186 * @count: number of page entries to update
1187 *
1188 * Update PTEs by copying them from the GART using sDMA (NAVI10).
1189 */
1190static void sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib,
1191 uint64_t pe, uint64_t src,
1192 unsigned count)
1193{
1194 unsigned bytes = count * 8;
1195
1196 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1197 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1198 ib->ptr[ib->length_dw++] = bytes - 1;
1199 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1200 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1201 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1202 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1203 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1204
1205}
1206
1207/**
1208 * sdma_v5_0_vm_write_pte - update PTEs by writing them manually
1209 *
1210 * @ib: indirect buffer to fill with commands
1211 * @pe: addr of the page entry
1212 * @value: dst addr to write into pe
1213 * @count: number of page entries to update
1214 * @incr: increase next addr by incr bytes
1215 *
1216 * Update PTEs by writing them manually using sDMA (NAVI10).
1217 */
1218static void sdma_v5_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1219 uint64_t value, unsigned count,
1220 uint32_t incr)
1221{
1222 unsigned ndw = count * 2;
1223
1224 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1225 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1226 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1227 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1228 ib->ptr[ib->length_dw++] = ndw - 1;
1229 for (; ndw > 0; ndw -= 2) {
1230 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1231 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1232 value += incr;
1233 }
1234}
1235
1236/**
1237 * sdma_v5_0_vm_set_pte_pde - update the page tables using sDMA
1238 *
1239 * @ib: indirect buffer to fill with commands
1240 * @pe: addr of the page entry
1241 * @addr: dst addr to write into pe
1242 * @count: number of page entries to update
1243 * @incr: increase next addr by incr bytes
1244 * @flags: access flags
1245 *
1246 * Update the page tables using sDMA (NAVI10).
1247 */
1248static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1249 uint64_t pe,
1250 uint64_t addr, unsigned count,
1251 uint32_t incr, uint64_t flags)
1252{
1253 /* for physically contiguous pages (vram) */
1254 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1255 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1256 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1257 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1258 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1259 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1260 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1261 ib->ptr[ib->length_dw++] = incr; /* increment size */
1262 ib->ptr[ib->length_dw++] = 0;
1263 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1264}
1265
1266/**
1267 * sdma_v5_0_ring_pad_ib - pad the IB
1268 * @ring: amdgpu_ring structure holding ring information
1269 * @ib: indirect buffer to fill with padding
1270 *
1271 * Pad the IB with NOPs to a boundary multiple of 8.
1272 */
1273static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1274{
1275 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1276 u32 pad_count;
1277 int i;
1278
1279 pad_count = (-ib->length_dw) & 0x7;
1280 for (i = 0; i < pad_count; i++)
1281 if (sdma && sdma->burst_nop && (i == 0))
1282 ib->ptr[ib->length_dw++] =
1283 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1284 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1285 else
1286 ib->ptr[ib->length_dw++] =
1287 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1288}
1289
1290
1291/**
1292 * sdma_v5_0_ring_emit_pipeline_sync - sync the pipeline
1293 *
1294 * @ring: amdgpu_ring pointer
1295 *
1296 * Make sure all previous operations are completed (CIK).
1297 */
1298static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1299{
1300 uint32_t seq = ring->fence_drv.sync_seq;
1301 uint64_t addr = ring->fence_drv.gpu_addr;
1302
1303 /* wait for idle */
1304 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1305 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1306 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1307 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1308 amdgpu_ring_write(ring, addr & 0xfffffffc);
1309 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1310 amdgpu_ring_write(ring, seq); /* reference */
1311 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1312 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1313 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1314}
1315
1316
1317/**
1318 * sdma_v5_0_ring_emit_vm_flush - vm flush using sDMA
1319 *
1320 * @ring: amdgpu_ring pointer
1321 * @vmid: vmid number to use
1322 * @pd_addr: address
1323 *
1324 * Update the page table base and flush the VM TLB
1325 * using sDMA (NAVI10).
1326 */
1327static void sdma_v5_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1328 unsigned vmid, uint64_t pd_addr)
1329{
1330 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1331}
1332
1333static void sdma_v5_0_ring_emit_wreg(struct amdgpu_ring *ring,
1334 uint32_t reg, uint32_t val)
1335{
1336 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1337 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1338 amdgpu_ring_write(ring, reg);
1339 amdgpu_ring_write(ring, val);
1340}
1341
1342static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1343 uint32_t val, uint32_t mask)
1344{
1345 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1346 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1347 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1348 amdgpu_ring_write(ring, reg << 2);
1349 amdgpu_ring_write(ring, 0);
1350 amdgpu_ring_write(ring, val); /* reference */
1351 amdgpu_ring_write(ring, mask); /* mask */
1352 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1353 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1354}
1355
1356static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1357 uint32_t reg0, uint32_t reg1,
1358 uint32_t ref, uint32_t mask)
1359{
1360 amdgpu_ring_emit_wreg(ring, reg0, ref);
1361 /* wait for a cycle to reset vm_inv_eng*_ack */
1362 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1363 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1364}
1365
1366static int sdma_v5_0_early_init(void *handle)
1367{
1368 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1369
1370 sdma_v5_0_set_ring_funcs(adev);
1371 sdma_v5_0_set_buffer_funcs(adev);
1372 sdma_v5_0_set_vm_pte_funcs(adev);
1373 sdma_v5_0_set_irq_funcs(adev);
1374 sdma_v5_0_set_mqd_funcs(adev);
1375
1376 return 0;
1377}
1378
1379
1380static int sdma_v5_0_sw_init(void *handle)
1381{
1382 struct amdgpu_ring *ring;
1383 int r, i;
1384 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1385
1386 /* SDMA trap event */
1387 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0,
1388 SDMA0_5_0__SRCID__SDMA_TRAP,
1389 &adev->sdma.trap_irq);
1390 if (r)
1391 return r;
1392
1393 /* SDMA trap event */
1394 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1,
1395 SDMA1_5_0__SRCID__SDMA_TRAP,
1396 &adev->sdma.trap_irq);
1397 if (r)
1398 return r;
1399
1400 r = sdma_v5_0_init_microcode(adev);
1401 if (r) {
1402 DRM_ERROR("Failed to load sdma firmware!\n");
1403 return r;
1404 }
1405
1406 for (i = 0; i < adev->sdma.num_instances; i++) {
1407 ring = &adev->sdma.instance[i].ring;
1408 ring->ring_obj = NULL;
1409 ring->use_doorbell = true;
1410
1411 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1412 ring->use_doorbell?"true":"false");
1413
1414 ring->doorbell_index = (i == 0) ?
1415 (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset
1416 : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
1417
1418 sprintf(ring->name, "sdma%d", i);
1419 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1420 (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
1421 AMDGPU_SDMA_IRQ_INSTANCE1,
1422 AMDGPU_RING_PRIO_DEFAULT, NULL);
1423 if (r)
1424 return r;
1425 }
1426
1427 return r;
1428}
1429
1430static int sdma_v5_0_sw_fini(void *handle)
1431{
1432 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1433 int i;
1434
1435 for (i = 0; i < adev->sdma.num_instances; i++)
1436 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1437
1438 amdgpu_sdma_destroy_inst_ctx(adev, false);
1439
1440 return 0;
1441}
1442
1443static int sdma_v5_0_hw_init(void *handle)
1444{
1445 int r;
1446 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1447
1448 sdma_v5_0_init_golden_registers(adev);
1449
1450 r = sdma_v5_0_start(adev);
1451
1452 return r;
1453}
1454
1455static int sdma_v5_0_hw_fini(void *handle)
1456{
1457 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1458
1459 if (amdgpu_sriov_vf(adev)) {
1460 /* disable the scheduler for SDMA */
1461 amdgpu_sdma_unset_buffer_funcs_helper(adev);
1462 return 0;
1463 }
1464
1465 sdma_v5_0_ctx_switch_enable(adev, false);
1466 sdma_v5_0_enable(adev, false);
1467
1468 return 0;
1469}
1470
1471static int sdma_v5_0_suspend(void *handle)
1472{
1473 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1474
1475 return sdma_v5_0_hw_fini(adev);
1476}
1477
1478static int sdma_v5_0_resume(void *handle)
1479{
1480 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1481
1482 return sdma_v5_0_hw_init(adev);
1483}
1484
1485static bool sdma_v5_0_is_idle(void *handle)
1486{
1487 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1488 u32 i;
1489
1490 for (i = 0; i < adev->sdma.num_instances; i++) {
1491 u32 tmp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
1492
1493 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1494 return false;
1495 }
1496
1497 return true;
1498}
1499
1500static int sdma_v5_0_wait_for_idle(void *handle)
1501{
1502 unsigned i;
1503 u32 sdma0, sdma1;
1504 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1505
1506 for (i = 0; i < adev->usec_timeout; i++) {
1507 sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
1508 sdma1 = RREG32(sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
1509
1510 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1511 return 0;
1512 udelay(1);
1513 }
1514 return -ETIMEDOUT;
1515}
1516
1517static int sdma_v5_0_soft_reset(void *handle)
1518{
1519 /* todo */
1520
1521 return 0;
1522}
1523
1524static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
1525{
1526 int i, r = 0;
1527 struct amdgpu_device *adev = ring->adev;
1528 u32 index = 0;
1529 u64 sdma_gfx_preempt;
1530
1531 amdgpu_sdma_get_index_from_ring(ring, &index);
1532 if (index == 0)
1533 sdma_gfx_preempt = mmSDMA0_GFX_PREEMPT;
1534 else
1535 sdma_gfx_preempt = mmSDMA1_GFX_PREEMPT;
1536
1537 /* assert preemption condition */
1538 amdgpu_ring_set_preempt_cond_exec(ring, false);
1539
1540 /* emit the trailing fence */
1541 ring->trail_seq += 1;
1542 amdgpu_ring_alloc(ring, 10);
1543 sdma_v5_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1544 ring->trail_seq, 0);
1545 amdgpu_ring_commit(ring);
1546
1547 /* assert IB preemption */
1548 WREG32(sdma_gfx_preempt, 1);
1549
1550 /* poll the trailing fence */
1551 for (i = 0; i < adev->usec_timeout; i++) {
1552 if (ring->trail_seq ==
1553 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1554 break;
1555 udelay(1);
1556 }
1557
1558 if (i >= adev->usec_timeout) {
1559 r = -EINVAL;
1560 DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1561 }
1562
1563 /* deassert IB preemption */
1564 WREG32(sdma_gfx_preempt, 0);
1565
1566 /* deassert the preemption condition */
1567 amdgpu_ring_set_preempt_cond_exec(ring, true);
1568 return r;
1569}
1570
1571static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
1572 struct amdgpu_irq_src *source,
1573 unsigned type,
1574 enum amdgpu_interrupt_state state)
1575{
1576 u32 sdma_cntl;
1577
1578 if (!amdgpu_sriov_vf(adev)) {
1579 u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
1580 sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
1581 sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
1582
1583 sdma_cntl = RREG32(reg_offset);
1584 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1585 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1586 WREG32(reg_offset, sdma_cntl);
1587 }
1588
1589 return 0;
1590}
1591
1592static int sdma_v5_0_process_trap_irq(struct amdgpu_device *adev,
1593 struct amdgpu_irq_src *source,
1594 struct amdgpu_iv_entry *entry)
1595{
1596 uint32_t mes_queue_id = entry->src_data[0];
1597
1598 DRM_DEBUG("IH: SDMA trap\n");
1599
1600 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
1601 struct amdgpu_mes_queue *queue;
1602
1603 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
1604
1605 spin_lock(&adev->mes.queue_id_lock);
1606 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
1607 if (queue) {
1608 DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
1609 amdgpu_fence_process(queue->ring);
1610 }
1611 spin_unlock(&adev->mes.queue_id_lock);
1612 return 0;
1613 }
1614
1615 switch (entry->client_id) {
1616 case SOC15_IH_CLIENTID_SDMA0:
1617 switch (entry->ring_id) {
1618 case 0:
1619 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1620 break;
1621 case 1:
1622 /* XXX compute */
1623 break;
1624 case 2:
1625 /* XXX compute */
1626 break;
1627 case 3:
1628 /* XXX page queue*/
1629 break;
1630 }
1631 break;
1632 case SOC15_IH_CLIENTID_SDMA1:
1633 switch (entry->ring_id) {
1634 case 0:
1635 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1636 break;
1637 case 1:
1638 /* XXX compute */
1639 break;
1640 case 2:
1641 /* XXX compute */
1642 break;
1643 case 3:
1644 /* XXX page queue*/
1645 break;
1646 }
1647 break;
1648 }
1649 return 0;
1650}
1651
1652static int sdma_v5_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1653 struct amdgpu_irq_src *source,
1654 struct amdgpu_iv_entry *entry)
1655{
1656 return 0;
1657}
1658
1659static void sdma_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
1660 bool enable)
1661{
1662 uint32_t data, def;
1663 int i;
1664
1665 for (i = 0; i < adev->sdma.num_instances; i++) {
1666 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1667 /* Enable sdma clock gating */
1668 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1669 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1670 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1671 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1672 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1673 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1674 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1675 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1676 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1677 if (def != data)
1678 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1679 } else {
1680 /* Disable sdma clock gating */
1681 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1682 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1683 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1684 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1685 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1686 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1687 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1688 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1689 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1690 if (def != data)
1691 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1692 }
1693 }
1694}
1695
1696static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
1697 bool enable)
1698{
1699 uint32_t data, def;
1700 int i;
1701
1702 for (i = 0; i < adev->sdma.num_instances; i++) {
1703 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1704 /* Enable sdma mem light sleep */
1705 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1706 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1707 if (def != data)
1708 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1709
1710 } else {
1711 /* Disable sdma mem light sleep */
1712 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1713 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1714 if (def != data)
1715 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1716
1717 }
1718 }
1719}
1720
1721static int sdma_v5_0_set_clockgating_state(void *handle,
1722 enum amd_clockgating_state state)
1723{
1724 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1725
1726 if (amdgpu_sriov_vf(adev))
1727 return 0;
1728
1729 switch (adev->ip_versions[SDMA0_HWIP][0]) {
1730 case IP_VERSION(5, 0, 0):
1731 case IP_VERSION(5, 0, 2):
1732 case IP_VERSION(5, 0, 5):
1733 sdma_v5_0_update_medium_grain_clock_gating(adev,
1734 state == AMD_CG_STATE_GATE);
1735 sdma_v5_0_update_medium_grain_light_sleep(adev,
1736 state == AMD_CG_STATE_GATE);
1737 break;
1738 default:
1739 break;
1740 }
1741
1742 return 0;
1743}
1744
1745static int sdma_v5_0_set_powergating_state(void *handle,
1746 enum amd_powergating_state state)
1747{
1748 return 0;
1749}
1750
1751static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags)
1752{
1753 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1754 int data;
1755
1756 if (amdgpu_sriov_vf(adev))
1757 *flags = 0;
1758
1759 /* AMD_CG_SUPPORT_SDMA_MGCG */
1760 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL));
1761 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
1762 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1763
1764 /* AMD_CG_SUPPORT_SDMA_LS */
1765 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL));
1766 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1767 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1768}
1769
1770const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
1771 .name = "sdma_v5_0",
1772 .early_init = sdma_v5_0_early_init,
1773 .late_init = NULL,
1774 .sw_init = sdma_v5_0_sw_init,
1775 .sw_fini = sdma_v5_0_sw_fini,
1776 .hw_init = sdma_v5_0_hw_init,
1777 .hw_fini = sdma_v5_0_hw_fini,
1778 .suspend = sdma_v5_0_suspend,
1779 .resume = sdma_v5_0_resume,
1780 .is_idle = sdma_v5_0_is_idle,
1781 .wait_for_idle = sdma_v5_0_wait_for_idle,
1782 .soft_reset = sdma_v5_0_soft_reset,
1783 .set_clockgating_state = sdma_v5_0_set_clockgating_state,
1784 .set_powergating_state = sdma_v5_0_set_powergating_state,
1785 .get_clockgating_state = sdma_v5_0_get_clockgating_state,
1786};
1787
1788static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
1789 .type = AMDGPU_RING_TYPE_SDMA,
1790 .align_mask = 0xf,
1791 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1792 .support_64bit_ptrs = true,
1793 .secure_submission_supported = true,
1794 .vmhub = AMDGPU_GFXHUB_0,
1795 .get_rptr = sdma_v5_0_ring_get_rptr,
1796 .get_wptr = sdma_v5_0_ring_get_wptr,
1797 .set_wptr = sdma_v5_0_ring_set_wptr,
1798 .emit_frame_size =
1799 5 + /* sdma_v5_0_ring_init_cond_exec */
1800 6 + /* sdma_v5_0_ring_emit_hdp_flush */
1801 3 + /* hdp_invalidate */
1802 6 + /* sdma_v5_0_ring_emit_pipeline_sync */
1803 /* sdma_v5_0_ring_emit_vm_flush */
1804 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1805 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
1806 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
1807 .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
1808 .emit_ib = sdma_v5_0_ring_emit_ib,
1809 .emit_mem_sync = sdma_v5_0_ring_emit_mem_sync,
1810 .emit_fence = sdma_v5_0_ring_emit_fence,
1811 .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
1812 .emit_vm_flush = sdma_v5_0_ring_emit_vm_flush,
1813 .emit_hdp_flush = sdma_v5_0_ring_emit_hdp_flush,
1814 .test_ring = sdma_v5_0_ring_test_ring,
1815 .test_ib = sdma_v5_0_ring_test_ib,
1816 .insert_nop = sdma_v5_0_ring_insert_nop,
1817 .pad_ib = sdma_v5_0_ring_pad_ib,
1818 .emit_wreg = sdma_v5_0_ring_emit_wreg,
1819 .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
1820 .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
1821 .init_cond_exec = sdma_v5_0_ring_init_cond_exec,
1822 .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
1823 .preempt_ib = sdma_v5_0_ring_preempt_ib,
1824};
1825
1826static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev)
1827{
1828 int i;
1829
1830 for (i = 0; i < adev->sdma.num_instances; i++) {
1831 adev->sdma.instance[i].ring.funcs = &sdma_v5_0_ring_funcs;
1832 adev->sdma.instance[i].ring.me = i;
1833 }
1834}
1835
1836static const struct amdgpu_irq_src_funcs sdma_v5_0_trap_irq_funcs = {
1837 .set = sdma_v5_0_set_trap_irq_state,
1838 .process = sdma_v5_0_process_trap_irq,
1839};
1840
1841static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = {
1842 .process = sdma_v5_0_process_illegal_inst_irq,
1843};
1844
1845static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
1846{
1847 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
1848 adev->sdma.num_instances;
1849 adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs;
1850 adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs;
1851}
1852
1853/**
1854 * sdma_v5_0_emit_copy_buffer - copy buffer using the sDMA engine
1855 *
1856 * @ib: indirect buffer to copy to
1857 * @src_offset: src GPU address
1858 * @dst_offset: dst GPU address
1859 * @byte_count: number of bytes to xfer
1860 * @tmz: if a secure copy should be used
1861 *
1862 * Copy GPU buffers using the DMA engine (NAVI10).
1863 * Used by the amdgpu ttm implementation to move pages if
1864 * registered as the asic copy callback.
1865 */
1866static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
1867 uint64_t src_offset,
1868 uint64_t dst_offset,
1869 uint32_t byte_count,
1870 bool tmz)
1871{
1872 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1873 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
1874 SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
1875 ib->ptr[ib->length_dw++] = byte_count - 1;
1876 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1877 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1878 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1879 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1880 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1881}
1882
1883/**
1884 * sdma_v5_0_emit_fill_buffer - fill buffer using the sDMA engine
1885 *
1886 * @ib: indirect buffer to fill
1887 * @src_data: value to write to buffer
1888 * @dst_offset: dst GPU address
1889 * @byte_count: number of bytes to xfer
1890 *
1891 * Fill GPU buffers using the DMA engine (NAVI10).
1892 */
1893static void sdma_v5_0_emit_fill_buffer(struct amdgpu_ib *ib,
1894 uint32_t src_data,
1895 uint64_t dst_offset,
1896 uint32_t byte_count)
1897{
1898 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1899 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1900 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1901 ib->ptr[ib->length_dw++] = src_data;
1902 ib->ptr[ib->length_dw++] = byte_count - 1;
1903}
1904
1905static const struct amdgpu_buffer_funcs sdma_v5_0_buffer_funcs = {
1906 .copy_max_bytes = 0x400000,
1907 .copy_num_dw = 7,
1908 .emit_copy_buffer = sdma_v5_0_emit_copy_buffer,
1909
1910 .fill_max_bytes = 0x400000,
1911 .fill_num_dw = 5,
1912 .emit_fill_buffer = sdma_v5_0_emit_fill_buffer,
1913};
1914
1915static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev)
1916{
1917 if (adev->mman.buffer_funcs == NULL) {
1918 adev->mman.buffer_funcs = &sdma_v5_0_buffer_funcs;
1919 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1920 }
1921}
1922
1923static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
1924 .copy_pte_num_dw = 7,
1925 .copy_pte = sdma_v5_0_vm_copy_pte,
1926 .write_pte = sdma_v5_0_vm_write_pte,
1927 .set_pte_pde = sdma_v5_0_vm_set_pte_pde,
1928};
1929
1930static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1931{
1932 unsigned i;
1933
1934 if (adev->vm_manager.vm_pte_funcs == NULL) {
1935 adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
1936 for (i = 0; i < adev->sdma.num_instances; i++) {
1937 adev->vm_manager.vm_pte_scheds[i] =
1938 &adev->sdma.instance[i].ring.sched;
1939 }
1940 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1941 }
1942}
1943
1944const struct amdgpu_ip_block_version sdma_v5_0_ip_block = {
1945 .type = AMD_IP_BLOCK_TYPE_SDMA,
1946 .major = 5,
1947 .minor = 0,
1948 .rev = 0,
1949 .funcs = &sdma_v5_0_ip_funcs,
1950};