Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include <linux/delay.h>
26#include <linux/firmware.h>
27#include <linux/module.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ucode.h"
31#include "amdgpu_trace.h"
32#include "vi.h"
33#include "vid.h"
34
35#include "oss/oss_3_0_d.h"
36#include "oss/oss_3_0_sh_mask.h"
37
38#include "gmc/gmc_8_1_d.h"
39#include "gmc/gmc_8_1_sh_mask.h"
40
41#include "gca/gfx_8_0_d.h"
42#include "gca/gfx_8_0_enum.h"
43#include "gca/gfx_8_0_sh_mask.h"
44
45#include "bif/bif_5_0_d.h"
46#include "bif/bif_5_0_sh_mask.h"
47
48#include "tonga_sdma_pkt_open.h"
49
50#include "ivsrcid/ivsrcid_vislands30.h"
51
52static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
53static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
54static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
55static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);
56
57MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
58MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
59MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
60MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
61MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
62MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
63MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
64MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
65MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
66MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
67MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
68MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
69MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
70MODULE_FIRMWARE("amdgpu/vegam_sdma.bin");
71MODULE_FIRMWARE("amdgpu/vegam_sdma1.bin");
72
73
74static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
75{
76 SDMA0_REGISTER_OFFSET,
77 SDMA1_REGISTER_OFFSET
78};
79
80static const u32 golden_settings_tonga_a11[] =
81{
82 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
83 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
84 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
85 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
86 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
87 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
88 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
89 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
90 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
91 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
92};
93
94static const u32 tonga_mgcg_cgcg_init[] =
95{
96 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
97 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
98};
99
100static const u32 golden_settings_fiji_a10[] =
101{
102 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
103 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
104 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
105 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
106 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
107 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
108 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
109 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
110};
111
112static const u32 fiji_mgcg_cgcg_init[] =
113{
114 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
115 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
116};
117
118static const u32 golden_settings_polaris11_a11[] =
119{
120 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
121 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
122 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
123 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
124 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
125 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
126 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
127 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
128 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
129 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
130};
131
132static const u32 golden_settings_polaris10_a11[] =
133{
134 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
135 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
136 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
137 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
138 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
139 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
140 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
141 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
142 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
143 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
144};
145
146static const u32 cz_golden_settings_a11[] =
147{
148 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
149 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
150 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
151 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
152 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
153 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
154 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
155 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
156 mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100,
157 mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800,
158 mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100,
159 mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100,
160};
161
162static const u32 cz_mgcg_cgcg_init[] =
163{
164 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
165 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
166};
167
168static const u32 stoney_golden_settings_a11[] =
169{
170 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
171 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
172 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
173 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
174};
175
176static const u32 stoney_mgcg_cgcg_init[] =
177{
178 mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100,
179};
180
181/*
182 * sDMA - System DMA
183 * Starting with CIK, the GPU has new asynchronous
184 * DMA engines. These engines are used for compute
185 * and gfx. There are two DMA engines (SDMA0, SDMA1)
186 * and each one supports 1 ring buffer used for gfx
187 * and 2 queues used for compute.
188 *
189 * The programming model is very similar to the CP
190 * (ring buffer, IBs, etc.), but sDMA has it's own
191 * packet format that is different from the PM4 format
192 * used by the CP. sDMA supports copying data, writing
193 * embedded data, solid fills, and a number of other
194 * things. It also has support for tiling/detiling of
195 * buffers.
196 */
197
198static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
199{
200 switch (adev->asic_type) {
201 case CHIP_FIJI:
202 amdgpu_device_program_register_sequence(adev,
203 fiji_mgcg_cgcg_init,
204 ARRAY_SIZE(fiji_mgcg_cgcg_init));
205 amdgpu_device_program_register_sequence(adev,
206 golden_settings_fiji_a10,
207 ARRAY_SIZE(golden_settings_fiji_a10));
208 break;
209 case CHIP_TONGA:
210 amdgpu_device_program_register_sequence(adev,
211 tonga_mgcg_cgcg_init,
212 ARRAY_SIZE(tonga_mgcg_cgcg_init));
213 amdgpu_device_program_register_sequence(adev,
214 golden_settings_tonga_a11,
215 ARRAY_SIZE(golden_settings_tonga_a11));
216 break;
217 case CHIP_POLARIS11:
218 case CHIP_POLARIS12:
219 case CHIP_VEGAM:
220 amdgpu_device_program_register_sequence(adev,
221 golden_settings_polaris11_a11,
222 ARRAY_SIZE(golden_settings_polaris11_a11));
223 break;
224 case CHIP_POLARIS10:
225 amdgpu_device_program_register_sequence(adev,
226 golden_settings_polaris10_a11,
227 ARRAY_SIZE(golden_settings_polaris10_a11));
228 break;
229 case CHIP_CARRIZO:
230 amdgpu_device_program_register_sequence(adev,
231 cz_mgcg_cgcg_init,
232 ARRAY_SIZE(cz_mgcg_cgcg_init));
233 amdgpu_device_program_register_sequence(adev,
234 cz_golden_settings_a11,
235 ARRAY_SIZE(cz_golden_settings_a11));
236 break;
237 case CHIP_STONEY:
238 amdgpu_device_program_register_sequence(adev,
239 stoney_mgcg_cgcg_init,
240 ARRAY_SIZE(stoney_mgcg_cgcg_init));
241 amdgpu_device_program_register_sequence(adev,
242 stoney_golden_settings_a11,
243 ARRAY_SIZE(stoney_golden_settings_a11));
244 break;
245 default:
246 break;
247 }
248}
249
250static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
251{
252 int i;
253
254 for (i = 0; i < adev->sdma.num_instances; i++)
255 amdgpu_ucode_release(&adev->sdma.instance[i].fw);
256}
257
258/**
259 * sdma_v3_0_init_microcode - load ucode images from disk
260 *
261 * @adev: amdgpu_device pointer
262 *
263 * Use the firmware interface to load the ucode images into
264 * the driver (not loaded into hw).
265 * Returns 0 on success, error on failure.
266 */
267static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
268{
269 const char *chip_name;
270 char fw_name[30];
271 int err = 0, i;
272 struct amdgpu_firmware_info *info = NULL;
273 const struct common_firmware_header *header = NULL;
274 const struct sdma_firmware_header_v1_0 *hdr;
275
276 DRM_DEBUG("\n");
277
278 switch (adev->asic_type) {
279 case CHIP_TONGA:
280 chip_name = "tonga";
281 break;
282 case CHIP_FIJI:
283 chip_name = "fiji";
284 break;
285 case CHIP_POLARIS10:
286 chip_name = "polaris10";
287 break;
288 case CHIP_POLARIS11:
289 chip_name = "polaris11";
290 break;
291 case CHIP_POLARIS12:
292 chip_name = "polaris12";
293 break;
294 case CHIP_VEGAM:
295 chip_name = "vegam";
296 break;
297 case CHIP_CARRIZO:
298 chip_name = "carrizo";
299 break;
300 case CHIP_STONEY:
301 chip_name = "stoney";
302 break;
303 default: BUG();
304 }
305
306 for (i = 0; i < adev->sdma.num_instances; i++) {
307 if (i == 0)
308 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
309 else
310 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
311 err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
312 if (err)
313 goto out;
314 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
315 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
316 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
317 if (adev->sdma.instance[i].feature_version >= 20)
318 adev->sdma.instance[i].burst_nop = true;
319
320 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
321 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
322 info->fw = adev->sdma.instance[i].fw;
323 header = (const struct common_firmware_header *)info->fw->data;
324 adev->firmware.fw_size +=
325 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
326
327 }
328out:
329 if (err) {
330 pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name);
331 for (i = 0; i < adev->sdma.num_instances; i++)
332 amdgpu_ucode_release(&adev->sdma.instance[i].fw);
333 }
334 return err;
335}
336
337/**
338 * sdma_v3_0_ring_get_rptr - get the current read pointer
339 *
340 * @ring: amdgpu ring pointer
341 *
342 * Get the current rptr from the hardware (VI+).
343 */
344static uint64_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
345{
346 /* XXX check if swapping is necessary on BE */
347 return *ring->rptr_cpu_addr >> 2;
348}
349
350/**
351 * sdma_v3_0_ring_get_wptr - get the current write pointer
352 *
353 * @ring: amdgpu ring pointer
354 *
355 * Get the current wptr from the hardware (VI+).
356 */
357static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
358{
359 struct amdgpu_device *adev = ring->adev;
360 u32 wptr;
361
362 if (ring->use_doorbell || ring->use_pollmem) {
363 /* XXX check if swapping is necessary on BE */
364 wptr = *ring->wptr_cpu_addr >> 2;
365 } else {
366 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
367 }
368
369 return wptr;
370}
371
372/**
373 * sdma_v3_0_ring_set_wptr - commit the write pointer
374 *
375 * @ring: amdgpu ring pointer
376 *
377 * Write the wptr back to the hardware (VI+).
378 */
379static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
380{
381 struct amdgpu_device *adev = ring->adev;
382
383 if (ring->use_doorbell) {
384 u32 *wb = (u32 *)ring->wptr_cpu_addr;
385 /* XXX check if swapping is necessary on BE */
386 WRITE_ONCE(*wb, ring->wptr << 2);
387 WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
388 } else if (ring->use_pollmem) {
389 u32 *wb = (u32 *)ring->wptr_cpu_addr;
390
391 WRITE_ONCE(*wb, ring->wptr << 2);
392 } else {
393 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], ring->wptr << 2);
394 }
395}
396
397static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
398{
399 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
400 int i;
401
402 for (i = 0; i < count; i++)
403 if (sdma && sdma->burst_nop && (i == 0))
404 amdgpu_ring_write(ring, ring->funcs->nop |
405 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
406 else
407 amdgpu_ring_write(ring, ring->funcs->nop);
408}
409
410/**
411 * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
412 *
413 * @ring: amdgpu ring pointer
414 * @job: job to retrieve vmid from
415 * @ib: IB object to schedule
416 * @flags: unused
417 *
418 * Schedule an IB in the DMA ring (VI).
419 */
420static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
421 struct amdgpu_job *job,
422 struct amdgpu_ib *ib,
423 uint32_t flags)
424{
425 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
426
427 /* IB packet must end on a 8 DW boundary */
428 sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
429
430 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
431 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
432 /* base must be 32 byte aligned */
433 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
434 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
435 amdgpu_ring_write(ring, ib->length_dw);
436 amdgpu_ring_write(ring, 0);
437 amdgpu_ring_write(ring, 0);
438
439}
440
441/**
442 * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
443 *
444 * @ring: amdgpu ring pointer
445 *
446 * Emit an hdp flush packet on the requested DMA ring.
447 */
448static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
449{
450 u32 ref_and_mask = 0;
451
452 if (ring->me == 0)
453 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
454 else
455 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
456
457 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
458 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
459 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
460 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
461 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
462 amdgpu_ring_write(ring, ref_and_mask); /* reference */
463 amdgpu_ring_write(ring, ref_and_mask); /* mask */
464 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
465 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
466}
467
468/**
469 * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
470 *
471 * @ring: amdgpu ring pointer
472 * @addr: address
473 * @seq: sequence number
474 * @flags: fence related flags
475 *
476 * Add a DMA fence packet to the ring to write
477 * the fence seq number and DMA trap packet to generate
478 * an interrupt if needed (VI).
479 */
480static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
481 unsigned flags)
482{
483 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
484 /* write the fence */
485 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
486 amdgpu_ring_write(ring, lower_32_bits(addr));
487 amdgpu_ring_write(ring, upper_32_bits(addr));
488 amdgpu_ring_write(ring, lower_32_bits(seq));
489
490 /* optionally write high bits as well */
491 if (write64bit) {
492 addr += 4;
493 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
494 amdgpu_ring_write(ring, lower_32_bits(addr));
495 amdgpu_ring_write(ring, upper_32_bits(addr));
496 amdgpu_ring_write(ring, upper_32_bits(seq));
497 }
498
499 /* generate an interrupt */
500 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
501 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
502}
503
504/**
505 * sdma_v3_0_gfx_stop - stop the gfx async dma engines
506 *
507 * @adev: amdgpu_device pointer
508 *
509 * Stop the gfx async dma ring buffers (VI).
510 */
511static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
512{
513 u32 rb_cntl, ib_cntl;
514 int i;
515
516 for (i = 0; i < adev->sdma.num_instances; i++) {
517 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
518 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
519 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
520 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
521 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
522 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
523 }
524}
525
526/**
527 * sdma_v3_0_rlc_stop - stop the compute async dma engines
528 *
529 * @adev: amdgpu_device pointer
530 *
531 * Stop the compute async dma queues (VI).
532 */
533static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
534{
535 /* XXX todo */
536}
537
538/**
539 * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch
540 *
541 * @adev: amdgpu_device pointer
542 * @enable: enable/disable the DMA MEs context switch.
543 *
544 * Halt or unhalt the async dma engines context switch (VI).
545 */
546static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
547{
548 u32 f32_cntl, phase_quantum = 0;
549 int i;
550
551 if (amdgpu_sdma_phase_quantum) {
552 unsigned value = amdgpu_sdma_phase_quantum;
553 unsigned unit = 0;
554
555 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
556 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
557 value = (value + 1) >> 1;
558 unit++;
559 }
560 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
561 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
562 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
563 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
564 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
565 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
566 WARN_ONCE(1,
567 "clamping sdma_phase_quantum to %uK clock cycles\n",
568 value << unit);
569 }
570 phase_quantum =
571 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
572 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
573 }
574
575 for (i = 0; i < adev->sdma.num_instances; i++) {
576 f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
577 if (enable) {
578 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
579 AUTO_CTXSW_ENABLE, 1);
580 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
581 ATC_L1_ENABLE, 1);
582 if (amdgpu_sdma_phase_quantum) {
583 WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
584 phase_quantum);
585 WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
586 phase_quantum);
587 }
588 } else {
589 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
590 AUTO_CTXSW_ENABLE, 0);
591 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
592 ATC_L1_ENABLE, 1);
593 }
594
595 WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
596 }
597}
598
599/**
600 * sdma_v3_0_enable - stop the async dma engines
601 *
602 * @adev: amdgpu_device pointer
603 * @enable: enable/disable the DMA MEs.
604 *
605 * Halt or unhalt the async dma engines (VI).
606 */
607static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
608{
609 u32 f32_cntl;
610 int i;
611
612 if (!enable) {
613 sdma_v3_0_gfx_stop(adev);
614 sdma_v3_0_rlc_stop(adev);
615 }
616
617 for (i = 0; i < adev->sdma.num_instances; i++) {
618 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
619 if (enable)
620 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
621 else
622 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
623 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
624 }
625}
626
627/**
628 * sdma_v3_0_gfx_resume - setup and start the async dma engines
629 *
630 * @adev: amdgpu_device pointer
631 *
632 * Set up the gfx DMA ring buffers and enable them (VI).
633 * Returns 0 for success, error for failure.
634 */
635static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
636{
637 struct amdgpu_ring *ring;
638 u32 rb_cntl, ib_cntl, wptr_poll_cntl;
639 u32 rb_bufsz;
640 u32 doorbell;
641 u64 wptr_gpu_addr;
642 int i, j, r;
643
644 for (i = 0; i < adev->sdma.num_instances; i++) {
645 ring = &adev->sdma.instance[i].ring;
646 amdgpu_ring_clear_ring(ring);
647
648 mutex_lock(&adev->srbm_mutex);
649 for (j = 0; j < 16; j++) {
650 vi_srbm_select(adev, 0, 0, 0, j);
651 /* SDMA GFX */
652 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
653 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
654 }
655 vi_srbm_select(adev, 0, 0, 0, 0);
656 mutex_unlock(&adev->srbm_mutex);
657
658 WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
659 adev->gfx.config.gb_addr_config & 0x70);
660
661 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
662
663 /* Set ring buffer size in dwords */
664 rb_bufsz = order_base_2(ring->ring_size / 4);
665 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
666 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
667#ifdef __BIG_ENDIAN
668 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
669 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
670 RPTR_WRITEBACK_SWAP_ENABLE, 1);
671#endif
672 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
673
674 /* Initialize the ring buffer's read and write pointers */
675 ring->wptr = 0;
676 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
677 sdma_v3_0_ring_set_wptr(ring);
678 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
679 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
680
681 /* set the wb address whether it's enabled or not */
682 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
683 upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
684 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
685 lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
686
687 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
688
689 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
690 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
691
692 doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]);
693
694 if (ring->use_doorbell) {
695 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL,
696 OFFSET, ring->doorbell_index);
697 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
698 } else {
699 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
700 }
701 WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
702
703 /* setup the wptr shadow polling */
704 wptr_gpu_addr = ring->wptr_gpu_addr;
705
706 WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO + sdma_offsets[i],
707 lower_32_bits(wptr_gpu_addr));
708 WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
709 upper_32_bits(wptr_gpu_addr));
710 wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
711 if (ring->use_pollmem) {
712 /*wptr polling is not enogh fast, directly clean the wptr register */
713 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
714 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
715 SDMA0_GFX_RB_WPTR_POLL_CNTL,
716 ENABLE, 1);
717 } else {
718 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
719 SDMA0_GFX_RB_WPTR_POLL_CNTL,
720 ENABLE, 0);
721 }
722 WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
723
724 /* enable DMA RB */
725 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
726 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
727
728 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
729 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
730#ifdef __BIG_ENDIAN
731 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
732#endif
733 /* enable DMA IBs */
734 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
735 }
736
737 /* unhalt the MEs */
738 sdma_v3_0_enable(adev, true);
739 /* enable sdma ring preemption */
740 sdma_v3_0_ctx_switch_enable(adev, true);
741
742 for (i = 0; i < adev->sdma.num_instances; i++) {
743 ring = &adev->sdma.instance[i].ring;
744 r = amdgpu_ring_test_helper(ring);
745 if (r)
746 return r;
747 }
748
749 return 0;
750}
751
752/**
753 * sdma_v3_0_rlc_resume - setup and start the async dma engines
754 *
755 * @adev: amdgpu_device pointer
756 *
757 * Set up the compute DMA queues and enable them (VI).
758 * Returns 0 for success, error for failure.
759 */
760static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
761{
762 /* XXX todo */
763 return 0;
764}
765
766/**
767 * sdma_v3_0_start - setup and start the async dma engines
768 *
769 * @adev: amdgpu_device pointer
770 *
771 * Set up the DMA engines and enable them (VI).
772 * Returns 0 for success, error for failure.
773 */
774static int sdma_v3_0_start(struct amdgpu_device *adev)
775{
776 int r;
777
778 /* disable sdma engine before programing it */
779 sdma_v3_0_ctx_switch_enable(adev, false);
780 sdma_v3_0_enable(adev, false);
781
782 /* start the gfx rings and rlc compute queues */
783 r = sdma_v3_0_gfx_resume(adev);
784 if (r)
785 return r;
786 r = sdma_v3_0_rlc_resume(adev);
787 if (r)
788 return r;
789
790 return 0;
791}
792
793/**
794 * sdma_v3_0_ring_test_ring - simple async dma engine test
795 *
796 * @ring: amdgpu_ring structure holding ring information
797 *
798 * Test the DMA engine by writing using it to write an
799 * value to memory. (VI).
800 * Returns 0 for success, error for failure.
801 */
802static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
803{
804 struct amdgpu_device *adev = ring->adev;
805 unsigned i;
806 unsigned index;
807 int r;
808 u32 tmp;
809 u64 gpu_addr;
810
811 r = amdgpu_device_wb_get(adev, &index);
812 if (r)
813 return r;
814
815 gpu_addr = adev->wb.gpu_addr + (index * 4);
816 tmp = 0xCAFEDEAD;
817 adev->wb.wb[index] = cpu_to_le32(tmp);
818
819 r = amdgpu_ring_alloc(ring, 5);
820 if (r)
821 goto error_free_wb;
822
823 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
824 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
825 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
826 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
827 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
828 amdgpu_ring_write(ring, 0xDEADBEEF);
829 amdgpu_ring_commit(ring);
830
831 for (i = 0; i < adev->usec_timeout; i++) {
832 tmp = le32_to_cpu(adev->wb.wb[index]);
833 if (tmp == 0xDEADBEEF)
834 break;
835 udelay(1);
836 }
837
838 if (i >= adev->usec_timeout)
839 r = -ETIMEDOUT;
840
841error_free_wb:
842 amdgpu_device_wb_free(adev, index);
843 return r;
844}
845
846/**
847 * sdma_v3_0_ring_test_ib - test an IB on the DMA engine
848 *
849 * @ring: amdgpu_ring structure holding ring information
850 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
851 *
852 * Test a simple IB in the DMA ring (VI).
853 * Returns 0 on success, error on failure.
854 */
855static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
856{
857 struct amdgpu_device *adev = ring->adev;
858 struct amdgpu_ib ib;
859 struct dma_fence *f = NULL;
860 unsigned index;
861 u32 tmp = 0;
862 u64 gpu_addr;
863 long r;
864
865 r = amdgpu_device_wb_get(adev, &index);
866 if (r)
867 return r;
868
869 gpu_addr = adev->wb.gpu_addr + (index * 4);
870 tmp = 0xCAFEDEAD;
871 adev->wb.wb[index] = cpu_to_le32(tmp);
872 memset(&ib, 0, sizeof(ib));
873 r = amdgpu_ib_get(adev, NULL, 256,
874 AMDGPU_IB_POOL_DIRECT, &ib);
875 if (r)
876 goto err0;
877
878 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
879 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
880 ib.ptr[1] = lower_32_bits(gpu_addr);
881 ib.ptr[2] = upper_32_bits(gpu_addr);
882 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
883 ib.ptr[4] = 0xDEADBEEF;
884 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
885 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
886 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
887 ib.length_dw = 8;
888
889 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
890 if (r)
891 goto err1;
892
893 r = dma_fence_wait_timeout(f, false, timeout);
894 if (r == 0) {
895 r = -ETIMEDOUT;
896 goto err1;
897 } else if (r < 0) {
898 goto err1;
899 }
900 tmp = le32_to_cpu(adev->wb.wb[index]);
901 if (tmp == 0xDEADBEEF)
902 r = 0;
903 else
904 r = -EINVAL;
905err1:
906 amdgpu_ib_free(adev, &ib, NULL);
907 dma_fence_put(f);
908err0:
909 amdgpu_device_wb_free(adev, index);
910 return r;
911}
912
913/**
914 * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART
915 *
916 * @ib: indirect buffer to fill with commands
917 * @pe: addr of the page entry
918 * @src: src addr to copy from
919 * @count: number of page entries to update
920 *
921 * Update PTEs by copying them from the GART using sDMA (CIK).
922 */
923static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
924 uint64_t pe, uint64_t src,
925 unsigned count)
926{
927 unsigned bytes = count * 8;
928
929 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
930 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
931 ib->ptr[ib->length_dw++] = bytes;
932 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
933 ib->ptr[ib->length_dw++] = lower_32_bits(src);
934 ib->ptr[ib->length_dw++] = upper_32_bits(src);
935 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
936 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
937}
938
939/**
940 * sdma_v3_0_vm_write_pte - update PTEs by writing them manually
941 *
942 * @ib: indirect buffer to fill with commands
943 * @pe: addr of the page entry
944 * @value: dst addr to write into pe
945 * @count: number of page entries to update
946 * @incr: increase next addr by incr bytes
947 *
948 * Update PTEs by writing them manually using sDMA (CIK).
949 */
950static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
951 uint64_t value, unsigned count,
952 uint32_t incr)
953{
954 unsigned ndw = count * 2;
955
956 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
957 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
958 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
959 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
960 ib->ptr[ib->length_dw++] = ndw;
961 for (; ndw > 0; ndw -= 2) {
962 ib->ptr[ib->length_dw++] = lower_32_bits(value);
963 ib->ptr[ib->length_dw++] = upper_32_bits(value);
964 value += incr;
965 }
966}
967
968/**
969 * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA
970 *
971 * @ib: indirect buffer to fill with commands
972 * @pe: addr of the page entry
973 * @addr: dst addr to write into pe
974 * @count: number of page entries to update
975 * @incr: increase next addr by incr bytes
976 * @flags: access flags
977 *
978 * Update the page tables using sDMA (CIK).
979 */
980static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
981 uint64_t addr, unsigned count,
982 uint32_t incr, uint64_t flags)
983{
984 /* for physically contiguous pages (vram) */
985 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
986 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
987 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
988 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
989 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
990 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
991 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
992 ib->ptr[ib->length_dw++] = incr; /* increment size */
993 ib->ptr[ib->length_dw++] = 0;
994 ib->ptr[ib->length_dw++] = count; /* number of entries */
995}
996
997/**
998 * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw
999 *
1000 * @ring: amdgpu_ring structure holding ring information
1001 * @ib: indirect buffer to fill with padding
1002 *
1003 */
1004static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1005{
1006 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1007 u32 pad_count;
1008 int i;
1009
1010 pad_count = (-ib->length_dw) & 7;
1011 for (i = 0; i < pad_count; i++)
1012 if (sdma && sdma->burst_nop && (i == 0))
1013 ib->ptr[ib->length_dw++] =
1014 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1015 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1016 else
1017 ib->ptr[ib->length_dw++] =
1018 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1019}
1020
1021/**
1022 * sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline
1023 *
1024 * @ring: amdgpu_ring pointer
1025 *
1026 * Make sure all previous operations are completed (CIK).
1027 */
1028static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1029{
1030 uint32_t seq = ring->fence_drv.sync_seq;
1031 uint64_t addr = ring->fence_drv.gpu_addr;
1032
1033 /* wait for idle */
1034 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1035 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1036 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1037 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1038 amdgpu_ring_write(ring, addr & 0xfffffffc);
1039 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1040 amdgpu_ring_write(ring, seq); /* reference */
1041 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1042 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1043 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1044}
1045
1046/**
1047 * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
1048 *
1049 * @ring: amdgpu_ring pointer
1050 * @vmid: vmid number to use
1051 * @pd_addr: address
1052 *
1053 * Update the page table base and flush the VM TLB
1054 * using sDMA (VI).
1055 */
1056static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1057 unsigned vmid, uint64_t pd_addr)
1058{
1059 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1060
1061 /* wait for flush */
1062 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1063 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1064 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
1065 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1066 amdgpu_ring_write(ring, 0);
1067 amdgpu_ring_write(ring, 0); /* reference */
1068 amdgpu_ring_write(ring, 0); /* mask */
1069 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1070 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
1071}
1072
1073static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring,
1074 uint32_t reg, uint32_t val)
1075{
1076 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1077 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1078 amdgpu_ring_write(ring, reg);
1079 amdgpu_ring_write(ring, val);
1080}
1081
1082static int sdma_v3_0_early_init(void *handle)
1083{
1084 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1085 int r;
1086
1087 switch (adev->asic_type) {
1088 case CHIP_STONEY:
1089 adev->sdma.num_instances = 1;
1090 break;
1091 default:
1092 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
1093 break;
1094 }
1095
1096 r = sdma_v3_0_init_microcode(adev);
1097 if (r)
1098 return r;
1099
1100 sdma_v3_0_set_ring_funcs(adev);
1101 sdma_v3_0_set_buffer_funcs(adev);
1102 sdma_v3_0_set_vm_pte_funcs(adev);
1103 sdma_v3_0_set_irq_funcs(adev);
1104
1105 return 0;
1106}
1107
1108static int sdma_v3_0_sw_init(void *handle)
1109{
1110 struct amdgpu_ring *ring;
1111 int r, i;
1112 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1113
1114 /* SDMA trap event */
1115 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
1116 &adev->sdma.trap_irq);
1117 if (r)
1118 return r;
1119
1120 /* SDMA Privileged inst */
1121 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
1122 &adev->sdma.illegal_inst_irq);
1123 if (r)
1124 return r;
1125
1126 /* SDMA Privileged inst */
1127 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
1128 &adev->sdma.illegal_inst_irq);
1129 if (r)
1130 return r;
1131
1132 for (i = 0; i < adev->sdma.num_instances; i++) {
1133 ring = &adev->sdma.instance[i].ring;
1134 ring->ring_obj = NULL;
1135 if (!amdgpu_sriov_vf(adev)) {
1136 ring->use_doorbell = true;
1137 ring->doorbell_index = adev->doorbell_index.sdma_engine[i];
1138 } else {
1139 ring->use_pollmem = true;
1140 }
1141
1142 sprintf(ring->name, "sdma%d", i);
1143 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1144 (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
1145 AMDGPU_SDMA_IRQ_INSTANCE1,
1146 AMDGPU_RING_PRIO_DEFAULT, NULL);
1147 if (r)
1148 return r;
1149 }
1150
1151 return r;
1152}
1153
1154static int sdma_v3_0_sw_fini(void *handle)
1155{
1156 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1157 int i;
1158
1159 for (i = 0; i < adev->sdma.num_instances; i++)
1160 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1161
1162 sdma_v3_0_free_microcode(adev);
1163 return 0;
1164}
1165
1166static int sdma_v3_0_hw_init(void *handle)
1167{
1168 int r;
1169 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1170
1171 sdma_v3_0_init_golden_registers(adev);
1172
1173 r = sdma_v3_0_start(adev);
1174 if (r)
1175 return r;
1176
1177 return r;
1178}
1179
1180static int sdma_v3_0_hw_fini(void *handle)
1181{
1182 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1183
1184 sdma_v3_0_ctx_switch_enable(adev, false);
1185 sdma_v3_0_enable(adev, false);
1186
1187 return 0;
1188}
1189
1190static int sdma_v3_0_suspend(void *handle)
1191{
1192 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1193
1194 return sdma_v3_0_hw_fini(adev);
1195}
1196
1197static int sdma_v3_0_resume(void *handle)
1198{
1199 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1200
1201 return sdma_v3_0_hw_init(adev);
1202}
1203
1204static bool sdma_v3_0_is_idle(void *handle)
1205{
1206 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1207 u32 tmp = RREG32(mmSRBM_STATUS2);
1208
1209 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1210 SRBM_STATUS2__SDMA1_BUSY_MASK))
1211 return false;
1212
1213 return true;
1214}
1215
1216static int sdma_v3_0_wait_for_idle(void *handle)
1217{
1218 unsigned i;
1219 u32 tmp;
1220 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1221
1222 for (i = 0; i < adev->usec_timeout; i++) {
1223 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1224 SRBM_STATUS2__SDMA1_BUSY_MASK);
1225
1226 if (!tmp)
1227 return 0;
1228 udelay(1);
1229 }
1230 return -ETIMEDOUT;
1231}
1232
1233static bool sdma_v3_0_check_soft_reset(void *handle)
1234{
1235 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1236 u32 srbm_soft_reset = 0;
1237 u32 tmp = RREG32(mmSRBM_STATUS2);
1238
1239 if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
1240 (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
1241 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1242 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1243 }
1244
1245 if (srbm_soft_reset) {
1246 adev->sdma.srbm_soft_reset = srbm_soft_reset;
1247 return true;
1248 } else {
1249 adev->sdma.srbm_soft_reset = 0;
1250 return false;
1251 }
1252}
1253
1254static int sdma_v3_0_pre_soft_reset(void *handle)
1255{
1256 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1257 u32 srbm_soft_reset = 0;
1258
1259 if (!adev->sdma.srbm_soft_reset)
1260 return 0;
1261
1262 srbm_soft_reset = adev->sdma.srbm_soft_reset;
1263
1264 if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
1265 REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
1266 sdma_v3_0_ctx_switch_enable(adev, false);
1267 sdma_v3_0_enable(adev, false);
1268 }
1269
1270 return 0;
1271}
1272
1273static int sdma_v3_0_post_soft_reset(void *handle)
1274{
1275 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1276 u32 srbm_soft_reset = 0;
1277
1278 if (!adev->sdma.srbm_soft_reset)
1279 return 0;
1280
1281 srbm_soft_reset = adev->sdma.srbm_soft_reset;
1282
1283 if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
1284 REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
1285 sdma_v3_0_gfx_resume(adev);
1286 sdma_v3_0_rlc_resume(adev);
1287 }
1288
1289 return 0;
1290}
1291
1292static int sdma_v3_0_soft_reset(void *handle)
1293{
1294 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1295 u32 srbm_soft_reset = 0;
1296 u32 tmp;
1297
1298 if (!adev->sdma.srbm_soft_reset)
1299 return 0;
1300
1301 srbm_soft_reset = adev->sdma.srbm_soft_reset;
1302
1303 if (srbm_soft_reset) {
1304 tmp = RREG32(mmSRBM_SOFT_RESET);
1305 tmp |= srbm_soft_reset;
1306 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1307 WREG32(mmSRBM_SOFT_RESET, tmp);
1308 tmp = RREG32(mmSRBM_SOFT_RESET);
1309
1310 udelay(50);
1311
1312 tmp &= ~srbm_soft_reset;
1313 WREG32(mmSRBM_SOFT_RESET, tmp);
1314 tmp = RREG32(mmSRBM_SOFT_RESET);
1315
1316 /* Wait a little for things to settle down */
1317 udelay(50);
1318 }
1319
1320 return 0;
1321}
1322
1323static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
1324 struct amdgpu_irq_src *source,
1325 unsigned type,
1326 enum amdgpu_interrupt_state state)
1327{
1328 u32 sdma_cntl;
1329
1330 switch (type) {
1331 case AMDGPU_SDMA_IRQ_INSTANCE0:
1332 switch (state) {
1333 case AMDGPU_IRQ_STATE_DISABLE:
1334 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1335 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1336 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1337 break;
1338 case AMDGPU_IRQ_STATE_ENABLE:
1339 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1340 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1341 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1342 break;
1343 default:
1344 break;
1345 }
1346 break;
1347 case AMDGPU_SDMA_IRQ_INSTANCE1:
1348 switch (state) {
1349 case AMDGPU_IRQ_STATE_DISABLE:
1350 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1351 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1352 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1353 break;
1354 case AMDGPU_IRQ_STATE_ENABLE:
1355 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1356 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1357 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1358 break;
1359 default:
1360 break;
1361 }
1362 break;
1363 default:
1364 break;
1365 }
1366 return 0;
1367}
1368
1369static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
1370 struct amdgpu_irq_src *source,
1371 struct amdgpu_iv_entry *entry)
1372{
1373 u8 instance_id, queue_id;
1374
1375 instance_id = (entry->ring_id & 0x3) >> 0;
1376 queue_id = (entry->ring_id & 0xc) >> 2;
1377 DRM_DEBUG("IH: SDMA trap\n");
1378 switch (instance_id) {
1379 case 0:
1380 switch (queue_id) {
1381 case 0:
1382 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1383 break;
1384 case 1:
1385 /* XXX compute */
1386 break;
1387 case 2:
1388 /* XXX compute */
1389 break;
1390 }
1391 break;
1392 case 1:
1393 switch (queue_id) {
1394 case 0:
1395 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1396 break;
1397 case 1:
1398 /* XXX compute */
1399 break;
1400 case 2:
1401 /* XXX compute */
1402 break;
1403 }
1404 break;
1405 }
1406 return 0;
1407}
1408
1409static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1410 struct amdgpu_irq_src *source,
1411 struct amdgpu_iv_entry *entry)
1412{
1413 u8 instance_id, queue_id;
1414
1415 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1416 instance_id = (entry->ring_id & 0x3) >> 0;
1417 queue_id = (entry->ring_id & 0xc) >> 2;
1418
1419 if (instance_id <= 1 && queue_id == 0)
1420 drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
1421 return 0;
1422}
1423
1424static void sdma_v3_0_update_sdma_medium_grain_clock_gating(
1425 struct amdgpu_device *adev,
1426 bool enable)
1427{
1428 uint32_t temp, data;
1429 int i;
1430
1431 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1432 for (i = 0; i < adev->sdma.num_instances; i++) {
1433 temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
1434 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1435 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1436 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1437 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1438 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1439 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1440 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1441 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1442 if (data != temp)
1443 WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
1444 }
1445 } else {
1446 for (i = 0; i < adev->sdma.num_instances; i++) {
1447 temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
1448 data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1449 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1450 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1451 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1452 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1453 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1454 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1455 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;
1456
1457 if (data != temp)
1458 WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
1459 }
1460 }
1461}
1462
1463static void sdma_v3_0_update_sdma_medium_grain_light_sleep(
1464 struct amdgpu_device *adev,
1465 bool enable)
1466{
1467 uint32_t temp, data;
1468 int i;
1469
1470 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1471 for (i = 0; i < adev->sdma.num_instances; i++) {
1472 temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
1473 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1474
1475 if (temp != data)
1476 WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
1477 }
1478 } else {
1479 for (i = 0; i < adev->sdma.num_instances; i++) {
1480 temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
1481 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1482
1483 if (temp != data)
1484 WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
1485 }
1486 }
1487}
1488
1489static int sdma_v3_0_set_clockgating_state(void *handle,
1490 enum amd_clockgating_state state)
1491{
1492 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1493
1494 if (amdgpu_sriov_vf(adev))
1495 return 0;
1496
1497 switch (adev->asic_type) {
1498 case CHIP_FIJI:
1499 case CHIP_CARRIZO:
1500 case CHIP_STONEY:
1501 sdma_v3_0_update_sdma_medium_grain_clock_gating(adev,
1502 state == AMD_CG_STATE_GATE);
1503 sdma_v3_0_update_sdma_medium_grain_light_sleep(adev,
1504 state == AMD_CG_STATE_GATE);
1505 break;
1506 default:
1507 break;
1508 }
1509 return 0;
1510}
1511
1512static int sdma_v3_0_set_powergating_state(void *handle,
1513 enum amd_powergating_state state)
1514{
1515 return 0;
1516}
1517
1518static void sdma_v3_0_get_clockgating_state(void *handle, u64 *flags)
1519{
1520 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1521 int data;
1522
1523 if (amdgpu_sriov_vf(adev))
1524 *flags = 0;
1525
1526 /* AMD_CG_SUPPORT_SDMA_MGCG */
1527 data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[0]);
1528 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK))
1529 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1530
1531 /* AMD_CG_SUPPORT_SDMA_LS */
1532 data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[0]);
1533 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1534 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1535}
1536
1537static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
1538 .name = "sdma_v3_0",
1539 .early_init = sdma_v3_0_early_init,
1540 .late_init = NULL,
1541 .sw_init = sdma_v3_0_sw_init,
1542 .sw_fini = sdma_v3_0_sw_fini,
1543 .hw_init = sdma_v3_0_hw_init,
1544 .hw_fini = sdma_v3_0_hw_fini,
1545 .suspend = sdma_v3_0_suspend,
1546 .resume = sdma_v3_0_resume,
1547 .is_idle = sdma_v3_0_is_idle,
1548 .wait_for_idle = sdma_v3_0_wait_for_idle,
1549 .check_soft_reset = sdma_v3_0_check_soft_reset,
1550 .pre_soft_reset = sdma_v3_0_pre_soft_reset,
1551 .post_soft_reset = sdma_v3_0_post_soft_reset,
1552 .soft_reset = sdma_v3_0_soft_reset,
1553 .set_clockgating_state = sdma_v3_0_set_clockgating_state,
1554 .set_powergating_state = sdma_v3_0_set_powergating_state,
1555 .get_clockgating_state = sdma_v3_0_get_clockgating_state,
1556};
1557
1558static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1559 .type = AMDGPU_RING_TYPE_SDMA,
1560 .align_mask = 0xf,
1561 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1562 .support_64bit_ptrs = false,
1563 .secure_submission_supported = true,
1564 .get_rptr = sdma_v3_0_ring_get_rptr,
1565 .get_wptr = sdma_v3_0_ring_get_wptr,
1566 .set_wptr = sdma_v3_0_ring_set_wptr,
1567 .emit_frame_size =
1568 6 + /* sdma_v3_0_ring_emit_hdp_flush */
1569 3 + /* hdp invalidate */
1570 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
1571 VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v3_0_ring_emit_vm_flush */
1572 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
1573 .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
1574 .emit_ib = sdma_v3_0_ring_emit_ib,
1575 .emit_fence = sdma_v3_0_ring_emit_fence,
1576 .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
1577 .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
1578 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
1579 .test_ring = sdma_v3_0_ring_test_ring,
1580 .test_ib = sdma_v3_0_ring_test_ib,
1581 .insert_nop = sdma_v3_0_ring_insert_nop,
1582 .pad_ib = sdma_v3_0_ring_pad_ib,
1583 .emit_wreg = sdma_v3_0_ring_emit_wreg,
1584};
1585
1586static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
1587{
1588 int i;
1589
1590 for (i = 0; i < adev->sdma.num_instances; i++) {
1591 adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
1592 adev->sdma.instance[i].ring.me = i;
1593 }
1594}
1595
1596static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
1597 .set = sdma_v3_0_set_trap_irq_state,
1598 .process = sdma_v3_0_process_trap_irq,
1599};
1600
1601static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
1602 .process = sdma_v3_0_process_illegal_inst_irq,
1603};
1604
1605static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
1606{
1607 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1608 adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
1609 adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
1610}
1611
1612/**
1613 * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine
1614 *
1615 * @ib: indirect buffer to copy to
1616 * @src_offset: src GPU address
1617 * @dst_offset: dst GPU address
1618 * @byte_count: number of bytes to xfer
1619 * @tmz: unused
1620 *
1621 * Copy GPU buffers using the DMA engine (VI).
1622 * Used by the amdgpu ttm implementation to move pages if
1623 * registered as the asic copy callback.
1624 */
1625static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
1626 uint64_t src_offset,
1627 uint64_t dst_offset,
1628 uint32_t byte_count,
1629 bool tmz)
1630{
1631 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1632 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1633 ib->ptr[ib->length_dw++] = byte_count;
1634 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1635 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1636 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1637 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1638 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1639}
1640
1641/**
1642 * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine
1643 *
1644 * @ib: indirect buffer to copy to
1645 * @src_data: value to write to buffer
1646 * @dst_offset: dst GPU address
1647 * @byte_count: number of bytes to xfer
1648 *
1649 * Fill GPU buffers using the DMA engine (VI).
1650 */
1651static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
1652 uint32_t src_data,
1653 uint64_t dst_offset,
1654 uint32_t byte_count)
1655{
1656 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1657 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1658 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1659 ib->ptr[ib->length_dw++] = src_data;
1660 ib->ptr[ib->length_dw++] = byte_count;
1661}
1662
1663static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
1664 .copy_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
1665 .copy_num_dw = 7,
1666 .emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
1667
1668 .fill_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
1669 .fill_num_dw = 5,
1670 .emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
1671};
1672
1673static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
1674{
1675 adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
1676 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1677}
1678
1679static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
1680 .copy_pte_num_dw = 7,
1681 .copy_pte = sdma_v3_0_vm_copy_pte,
1682
1683 .write_pte = sdma_v3_0_vm_write_pte,
1684 .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
1685};
1686
1687static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1688{
1689 unsigned i;
1690
1691 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
1692 for (i = 0; i < adev->sdma.num_instances; i++) {
1693 adev->vm_manager.vm_pte_scheds[i] =
1694 &adev->sdma.instance[i].ring.sched;
1695 }
1696 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1697}
1698
1699const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
1700{
1701 .type = AMD_IP_BLOCK_TYPE_SDMA,
1702 .major = 3,
1703 .minor = 0,
1704 .rev = 0,
1705 .funcs = &sdma_v3_0_ip_funcs,
1706};
1707
1708const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
1709{
1710 .type = AMD_IP_BLOCK_TYPE_SDMA,
1711 .major = 3,
1712 .minor = 1,
1713 .rev = 0,
1714 .funcs = &sdma_v3_0_ip_funcs,
1715};
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_ucode.h"
28#include "amdgpu_trace.h"
29#include "vi.h"
30#include "vid.h"
31
32#include "oss/oss_3_0_d.h"
33#include "oss/oss_3_0_sh_mask.h"
34
35#include "gmc/gmc_8_1_d.h"
36#include "gmc/gmc_8_1_sh_mask.h"
37
38#include "gca/gfx_8_0_d.h"
39#include "gca/gfx_8_0_enum.h"
40#include "gca/gfx_8_0_sh_mask.h"
41
42#include "bif/bif_5_0_d.h"
43#include "bif/bif_5_0_sh_mask.h"
44
45#include "tonga_sdma_pkt_open.h"
46
47static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
48static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
49static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
50static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);
51
52MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
53MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
54MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
55MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
56MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
57MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
58MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
59MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
60MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
61MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
62MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
63MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
64MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
65
66
67static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
68{
69 SDMA0_REGISTER_OFFSET,
70 SDMA1_REGISTER_OFFSET
71};
72
73static const u32 golden_settings_tonga_a11[] =
74{
75 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
76 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
77 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
78 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
79 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
80 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
81 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
82 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
83 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
84 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
85};
86
87static const u32 tonga_mgcg_cgcg_init[] =
88{
89 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
90 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
91};
92
93static const u32 golden_settings_fiji_a10[] =
94{
95 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
96 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
97 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
98 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
99 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
100 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
101 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
102 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
103};
104
105static const u32 fiji_mgcg_cgcg_init[] =
106{
107 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
108 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
109};
110
111static const u32 golden_settings_polaris11_a11[] =
112{
113 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
114 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
115 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
116 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
117 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
118 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
119 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
120 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
121 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
122 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
123};
124
125static const u32 golden_settings_polaris10_a11[] =
126{
127 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
128 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
129 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
130 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
131 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
132 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
133 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
134 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
135 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
136 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
137};
138
139static const u32 cz_golden_settings_a11[] =
140{
141 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
142 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
143 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
144 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
145 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
146 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
147 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
148 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
149 mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100,
150 mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800,
151 mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100,
152 mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100,
153};
154
155static const u32 cz_mgcg_cgcg_init[] =
156{
157 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
158 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
159};
160
161static const u32 stoney_golden_settings_a11[] =
162{
163 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
164 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
165 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
166 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
167};
168
169static const u32 stoney_mgcg_cgcg_init[] =
170{
171 mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100,
172};
173
174/*
175 * sDMA - System DMA
176 * Starting with CIK, the GPU has new asynchronous
177 * DMA engines. These engines are used for compute
178 * and gfx. There are two DMA engines (SDMA0, SDMA1)
179 * and each one supports 1 ring buffer used for gfx
180 * and 2 queues used for compute.
181 *
182 * The programming model is very similar to the CP
183 * (ring buffer, IBs, etc.), but sDMA has it's own
184 * packet format that is different from the PM4 format
185 * used by the CP. sDMA supports copying data, writing
186 * embedded data, solid fills, and a number of other
187 * things. It also has support for tiling/detiling of
188 * buffers.
189 */
190
191static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
192{
193 switch (adev->asic_type) {
194 case CHIP_FIJI:
195 amdgpu_device_program_register_sequence(adev,
196 fiji_mgcg_cgcg_init,
197 ARRAY_SIZE(fiji_mgcg_cgcg_init));
198 amdgpu_device_program_register_sequence(adev,
199 golden_settings_fiji_a10,
200 ARRAY_SIZE(golden_settings_fiji_a10));
201 break;
202 case CHIP_TONGA:
203 amdgpu_device_program_register_sequence(adev,
204 tonga_mgcg_cgcg_init,
205 ARRAY_SIZE(tonga_mgcg_cgcg_init));
206 amdgpu_device_program_register_sequence(adev,
207 golden_settings_tonga_a11,
208 ARRAY_SIZE(golden_settings_tonga_a11));
209 break;
210 case CHIP_POLARIS11:
211 case CHIP_POLARIS12:
212 amdgpu_device_program_register_sequence(adev,
213 golden_settings_polaris11_a11,
214 ARRAY_SIZE(golden_settings_polaris11_a11));
215 break;
216 case CHIP_POLARIS10:
217 amdgpu_device_program_register_sequence(adev,
218 golden_settings_polaris10_a11,
219 ARRAY_SIZE(golden_settings_polaris10_a11));
220 break;
221 case CHIP_CARRIZO:
222 amdgpu_device_program_register_sequence(adev,
223 cz_mgcg_cgcg_init,
224 ARRAY_SIZE(cz_mgcg_cgcg_init));
225 amdgpu_device_program_register_sequence(adev,
226 cz_golden_settings_a11,
227 ARRAY_SIZE(cz_golden_settings_a11));
228 break;
229 case CHIP_STONEY:
230 amdgpu_device_program_register_sequence(adev,
231 stoney_mgcg_cgcg_init,
232 ARRAY_SIZE(stoney_mgcg_cgcg_init));
233 amdgpu_device_program_register_sequence(adev,
234 stoney_golden_settings_a11,
235 ARRAY_SIZE(stoney_golden_settings_a11));
236 break;
237 default:
238 break;
239 }
240}
241
242static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
243{
244 int i;
245 for (i = 0; i < adev->sdma.num_instances; i++) {
246 release_firmware(adev->sdma.instance[i].fw);
247 adev->sdma.instance[i].fw = NULL;
248 }
249}
250
251/**
252 * sdma_v3_0_init_microcode - load ucode images from disk
253 *
254 * @adev: amdgpu_device pointer
255 *
256 * Use the firmware interface to load the ucode images into
257 * the driver (not loaded into hw).
258 * Returns 0 on success, error on failure.
259 */
260static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
261{
262 const char *chip_name;
263 char fw_name[30];
264 int err = 0, i;
265 struct amdgpu_firmware_info *info = NULL;
266 const struct common_firmware_header *header = NULL;
267 const struct sdma_firmware_header_v1_0 *hdr;
268
269 DRM_DEBUG("\n");
270
271 switch (adev->asic_type) {
272 case CHIP_TONGA:
273 chip_name = "tonga";
274 break;
275 case CHIP_FIJI:
276 chip_name = "fiji";
277 break;
278 case CHIP_POLARIS11:
279 chip_name = "polaris11";
280 break;
281 case CHIP_POLARIS10:
282 chip_name = "polaris10";
283 break;
284 case CHIP_POLARIS12:
285 chip_name = "polaris12";
286 break;
287 case CHIP_CARRIZO:
288 chip_name = "carrizo";
289 break;
290 case CHIP_STONEY:
291 chip_name = "stoney";
292 break;
293 default: BUG();
294 }
295
296 for (i = 0; i < adev->sdma.num_instances; i++) {
297 if (i == 0)
298 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
299 else
300 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
301 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
302 if (err)
303 goto out;
304 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
305 if (err)
306 goto out;
307 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
308 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
309 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
310 if (adev->sdma.instance[i].feature_version >= 20)
311 adev->sdma.instance[i].burst_nop = true;
312
313 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
314 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
315 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
316 info->fw = adev->sdma.instance[i].fw;
317 header = (const struct common_firmware_header *)info->fw->data;
318 adev->firmware.fw_size +=
319 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
320 }
321 }
322out:
323 if (err) {
324 pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name);
325 for (i = 0; i < adev->sdma.num_instances; i++) {
326 release_firmware(adev->sdma.instance[i].fw);
327 adev->sdma.instance[i].fw = NULL;
328 }
329 }
330 return err;
331}
332
333/**
334 * sdma_v3_0_ring_get_rptr - get the current read pointer
335 *
336 * @ring: amdgpu ring pointer
337 *
338 * Get the current rptr from the hardware (VI+).
339 */
340static uint64_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
341{
342 /* XXX check if swapping is necessary on BE */
343 return ring->adev->wb.wb[ring->rptr_offs] >> 2;
344}
345
346/**
347 * sdma_v3_0_ring_get_wptr - get the current write pointer
348 *
349 * @ring: amdgpu ring pointer
350 *
351 * Get the current wptr from the hardware (VI+).
352 */
353static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
354{
355 struct amdgpu_device *adev = ring->adev;
356 u32 wptr;
357
358 if (ring->use_doorbell || ring->use_pollmem) {
359 /* XXX check if swapping is necessary on BE */
360 wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
361 } else {
362 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
363
364 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
365 }
366
367 return wptr;
368}
369
370/**
371 * sdma_v3_0_ring_set_wptr - commit the write pointer
372 *
373 * @ring: amdgpu ring pointer
374 *
375 * Write the wptr back to the hardware (VI+).
376 */
377static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
378{
379 struct amdgpu_device *adev = ring->adev;
380
381 if (ring->use_doorbell) {
382 u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
383 /* XXX check if swapping is necessary on BE */
384 WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
385 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
386 } else if (ring->use_pollmem) {
387 u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
388
389 WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
390 } else {
391 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
392
393 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
394 }
395}
396
397static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
398{
399 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
400 int i;
401
402 for (i = 0; i < count; i++)
403 if (sdma && sdma->burst_nop && (i == 0))
404 amdgpu_ring_write(ring, ring->funcs->nop |
405 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
406 else
407 amdgpu_ring_write(ring, ring->funcs->nop);
408}
409
410/**
411 * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
412 *
413 * @ring: amdgpu ring pointer
414 * @ib: IB object to schedule
415 *
416 * Schedule an IB in the DMA ring (VI).
417 */
418static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
419 struct amdgpu_ib *ib,
420 unsigned vmid, bool ctx_switch)
421{
422 /* IB packet must end on a 8 DW boundary */
423 sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
424
425 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
426 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
427 /* base must be 32 byte aligned */
428 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
429 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
430 amdgpu_ring_write(ring, ib->length_dw);
431 amdgpu_ring_write(ring, 0);
432 amdgpu_ring_write(ring, 0);
433
434}
435
436/**
437 * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
438 *
439 * @ring: amdgpu ring pointer
440 *
441 * Emit an hdp flush packet on the requested DMA ring.
442 */
443static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
444{
445 u32 ref_and_mask = 0;
446
447 if (ring == &ring->adev->sdma.instance[0].ring)
448 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
449 else
450 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
451
452 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
453 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
454 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
455 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
456 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
457 amdgpu_ring_write(ring, ref_and_mask); /* reference */
458 amdgpu_ring_write(ring, ref_and_mask); /* mask */
459 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
460 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
461}
462
463/**
464 * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
465 *
466 * @ring: amdgpu ring pointer
467 * @fence: amdgpu fence object
468 *
469 * Add a DMA fence packet to the ring to write
470 * the fence seq number and DMA trap packet to generate
471 * an interrupt if needed (VI).
472 */
473static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
474 unsigned flags)
475{
476 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
477 /* write the fence */
478 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
479 amdgpu_ring_write(ring, lower_32_bits(addr));
480 amdgpu_ring_write(ring, upper_32_bits(addr));
481 amdgpu_ring_write(ring, lower_32_bits(seq));
482
483 /* optionally write high bits as well */
484 if (write64bit) {
485 addr += 4;
486 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
487 amdgpu_ring_write(ring, lower_32_bits(addr));
488 amdgpu_ring_write(ring, upper_32_bits(addr));
489 amdgpu_ring_write(ring, upper_32_bits(seq));
490 }
491
492 /* generate an interrupt */
493 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
494 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
495}
496
497/**
498 * sdma_v3_0_gfx_stop - stop the gfx async dma engines
499 *
500 * @adev: amdgpu_device pointer
501 *
502 * Stop the gfx async dma ring buffers (VI).
503 */
504static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
505{
506 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
507 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
508 u32 rb_cntl, ib_cntl;
509 int i;
510
511 if ((adev->mman.buffer_funcs_ring == sdma0) ||
512 (adev->mman.buffer_funcs_ring == sdma1))
513 amdgpu_ttm_set_buffer_funcs_status(adev, false);
514
515 for (i = 0; i < adev->sdma.num_instances; i++) {
516 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
517 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
518 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
519 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
520 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
521 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
522 }
523 sdma0->ready = false;
524 sdma1->ready = false;
525}
526
527/**
528 * sdma_v3_0_rlc_stop - stop the compute async dma engines
529 *
530 * @adev: amdgpu_device pointer
531 *
532 * Stop the compute async dma queues (VI).
533 */
534static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
535{
536 /* XXX todo */
537}
538
539/**
540 * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch
541 *
542 * @adev: amdgpu_device pointer
543 * @enable: enable/disable the DMA MEs context switch.
544 *
545 * Halt or unhalt the async dma engines context switch (VI).
546 */
547static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
548{
549 u32 f32_cntl, phase_quantum = 0;
550 int i;
551
552 if (amdgpu_sdma_phase_quantum) {
553 unsigned value = amdgpu_sdma_phase_quantum;
554 unsigned unit = 0;
555
556 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
557 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
558 value = (value + 1) >> 1;
559 unit++;
560 }
561 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
562 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
563 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
564 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
565 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
566 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
567 WARN_ONCE(1,
568 "clamping sdma_phase_quantum to %uK clock cycles\n",
569 value << unit);
570 }
571 phase_quantum =
572 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
573 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
574 }
575
576 for (i = 0; i < adev->sdma.num_instances; i++) {
577 f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
578 if (enable) {
579 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
580 AUTO_CTXSW_ENABLE, 1);
581 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
582 ATC_L1_ENABLE, 1);
583 if (amdgpu_sdma_phase_quantum) {
584 WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
585 phase_quantum);
586 WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
587 phase_quantum);
588 }
589 } else {
590 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
591 AUTO_CTXSW_ENABLE, 0);
592 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
593 ATC_L1_ENABLE, 1);
594 }
595
596 WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
597 }
598}
599
600/**
601 * sdma_v3_0_enable - stop the async dma engines
602 *
603 * @adev: amdgpu_device pointer
604 * @enable: enable/disable the DMA MEs.
605 *
606 * Halt or unhalt the async dma engines (VI).
607 */
608static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
609{
610 u32 f32_cntl;
611 int i;
612
613 if (!enable) {
614 sdma_v3_0_gfx_stop(adev);
615 sdma_v3_0_rlc_stop(adev);
616 }
617
618 for (i = 0; i < adev->sdma.num_instances; i++) {
619 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
620 if (enable)
621 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
622 else
623 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
624 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
625 }
626}
627
628/**
629 * sdma_v3_0_gfx_resume - setup and start the async dma engines
630 *
631 * @adev: amdgpu_device pointer
632 *
633 * Set up the gfx DMA ring buffers and enable them (VI).
634 * Returns 0 for success, error for failure.
635 */
636static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
637{
638 struct amdgpu_ring *ring;
639 u32 rb_cntl, ib_cntl, wptr_poll_cntl;
640 u32 rb_bufsz;
641 u32 wb_offset;
642 u32 doorbell;
643 u64 wptr_gpu_addr;
644 int i, j, r;
645
646 for (i = 0; i < adev->sdma.num_instances; i++) {
647 ring = &adev->sdma.instance[i].ring;
648 amdgpu_ring_clear_ring(ring);
649 wb_offset = (ring->rptr_offs * 4);
650
651 mutex_lock(&adev->srbm_mutex);
652 for (j = 0; j < 16; j++) {
653 vi_srbm_select(adev, 0, 0, 0, j);
654 /* SDMA GFX */
655 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
656 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
657 }
658 vi_srbm_select(adev, 0, 0, 0, 0);
659 mutex_unlock(&adev->srbm_mutex);
660
661 WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
662 adev->gfx.config.gb_addr_config & 0x70);
663
664 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
665
666 /* Set ring buffer size in dwords */
667 rb_bufsz = order_base_2(ring->ring_size / 4);
668 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
669 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
670#ifdef __BIG_ENDIAN
671 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
672 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
673 RPTR_WRITEBACK_SWAP_ENABLE, 1);
674#endif
675 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
676
677 /* Initialize the ring buffer's read and write pointers */
678 ring->wptr = 0;
679 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
680 sdma_v3_0_ring_set_wptr(ring);
681 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
682 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
683
684 /* set the wb address whether it's enabled or not */
685 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
686 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
687 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
688 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
689
690 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
691
692 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
693 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
694
695 doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]);
696
697 if (ring->use_doorbell) {
698 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL,
699 OFFSET, ring->doorbell_index);
700 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
701 } else {
702 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
703 }
704 WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
705
706 /* setup the wptr shadow polling */
707 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
708
709 WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO + sdma_offsets[i],
710 lower_32_bits(wptr_gpu_addr));
711 WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
712 upper_32_bits(wptr_gpu_addr));
713 wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
714 if (ring->use_pollmem) {
715 /*wptr polling is not enogh fast, directly clean the wptr register */
716 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
717 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
718 SDMA0_GFX_RB_WPTR_POLL_CNTL,
719 ENABLE, 1);
720 } else {
721 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
722 SDMA0_GFX_RB_WPTR_POLL_CNTL,
723 ENABLE, 0);
724 }
725 WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
726
727 /* enable DMA RB */
728 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
729 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
730
731 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
732 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
733#ifdef __BIG_ENDIAN
734 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
735#endif
736 /* enable DMA IBs */
737 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
738
739 ring->ready = true;
740 }
741
742 /* unhalt the MEs */
743 sdma_v3_0_enable(adev, true);
744 /* enable sdma ring preemption */
745 sdma_v3_0_ctx_switch_enable(adev, true);
746
747 for (i = 0; i < adev->sdma.num_instances; i++) {
748 ring = &adev->sdma.instance[i].ring;
749 r = amdgpu_ring_test_ring(ring);
750 if (r) {
751 ring->ready = false;
752 return r;
753 }
754
755 if (adev->mman.buffer_funcs_ring == ring)
756 amdgpu_ttm_set_buffer_funcs_status(adev, true);
757 }
758
759 return 0;
760}
761
762/**
763 * sdma_v3_0_rlc_resume - setup and start the async dma engines
764 *
765 * @adev: amdgpu_device pointer
766 *
767 * Set up the compute DMA queues and enable them (VI).
768 * Returns 0 for success, error for failure.
769 */
770static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
771{
772 /* XXX todo */
773 return 0;
774}
775
776/**
777 * sdma_v3_0_load_microcode - load the sDMA ME ucode
778 *
779 * @adev: amdgpu_device pointer
780 *
781 * Loads the sDMA0/1 ucode.
782 * Returns 0 for success, -EINVAL if the ucode is not available.
783 */
784static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
785{
786 const struct sdma_firmware_header_v1_0 *hdr;
787 const __le32 *fw_data;
788 u32 fw_size;
789 int i, j;
790
791 /* halt the MEs */
792 sdma_v3_0_enable(adev, false);
793
794 for (i = 0; i < adev->sdma.num_instances; i++) {
795 if (!adev->sdma.instance[i].fw)
796 return -EINVAL;
797 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
798 amdgpu_ucode_print_sdma_hdr(&hdr->header);
799 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
800 fw_data = (const __le32 *)
801 (adev->sdma.instance[i].fw->data +
802 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
803 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
804 for (j = 0; j < fw_size; j++)
805 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
806 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
807 }
808
809 return 0;
810}
811
812/**
813 * sdma_v3_0_start - setup and start the async dma engines
814 *
815 * @adev: amdgpu_device pointer
816 *
817 * Set up the DMA engines and enable them (VI).
818 * Returns 0 for success, error for failure.
819 */
820static int sdma_v3_0_start(struct amdgpu_device *adev)
821{
822 int r;
823
824 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
825 r = sdma_v3_0_load_microcode(adev);
826 if (r)
827 return r;
828 }
829
830 /* disable sdma engine before programing it */
831 sdma_v3_0_ctx_switch_enable(adev, false);
832 sdma_v3_0_enable(adev, false);
833
834 /* start the gfx rings and rlc compute queues */
835 r = sdma_v3_0_gfx_resume(adev);
836 if (r)
837 return r;
838 r = sdma_v3_0_rlc_resume(adev);
839 if (r)
840 return r;
841
842 return 0;
843}
844
845/**
846 * sdma_v3_0_ring_test_ring - simple async dma engine test
847 *
848 * @ring: amdgpu_ring structure holding ring information
849 *
850 * Test the DMA engine by writing using it to write an
851 * value to memory. (VI).
852 * Returns 0 for success, error for failure.
853 */
854static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
855{
856 struct amdgpu_device *adev = ring->adev;
857 unsigned i;
858 unsigned index;
859 int r;
860 u32 tmp;
861 u64 gpu_addr;
862
863 r = amdgpu_device_wb_get(adev, &index);
864 if (r) {
865 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
866 return r;
867 }
868
869 gpu_addr = adev->wb.gpu_addr + (index * 4);
870 tmp = 0xCAFEDEAD;
871 adev->wb.wb[index] = cpu_to_le32(tmp);
872
873 r = amdgpu_ring_alloc(ring, 5);
874 if (r) {
875 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
876 amdgpu_device_wb_free(adev, index);
877 return r;
878 }
879
880 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
881 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
882 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
883 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
884 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
885 amdgpu_ring_write(ring, 0xDEADBEEF);
886 amdgpu_ring_commit(ring);
887
888 for (i = 0; i < adev->usec_timeout; i++) {
889 tmp = le32_to_cpu(adev->wb.wb[index]);
890 if (tmp == 0xDEADBEEF)
891 break;
892 DRM_UDELAY(1);
893 }
894
895 if (i < adev->usec_timeout) {
896 DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
897 } else {
898 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
899 ring->idx, tmp);
900 r = -EINVAL;
901 }
902 amdgpu_device_wb_free(adev, index);
903
904 return r;
905}
906
907/**
908 * sdma_v3_0_ring_test_ib - test an IB on the DMA engine
909 *
910 * @ring: amdgpu_ring structure holding ring information
911 *
912 * Test a simple IB in the DMA ring (VI).
913 * Returns 0 on success, error on failure.
914 */
915static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
916{
917 struct amdgpu_device *adev = ring->adev;
918 struct amdgpu_ib ib;
919 struct dma_fence *f = NULL;
920 unsigned index;
921 u32 tmp = 0;
922 u64 gpu_addr;
923 long r;
924
925 r = amdgpu_device_wb_get(adev, &index);
926 if (r) {
927 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
928 return r;
929 }
930
931 gpu_addr = adev->wb.gpu_addr + (index * 4);
932 tmp = 0xCAFEDEAD;
933 adev->wb.wb[index] = cpu_to_le32(tmp);
934 memset(&ib, 0, sizeof(ib));
935 r = amdgpu_ib_get(adev, NULL, 256, &ib);
936 if (r) {
937 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
938 goto err0;
939 }
940
941 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
942 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
943 ib.ptr[1] = lower_32_bits(gpu_addr);
944 ib.ptr[2] = upper_32_bits(gpu_addr);
945 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
946 ib.ptr[4] = 0xDEADBEEF;
947 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
948 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
949 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
950 ib.length_dw = 8;
951
952 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
953 if (r)
954 goto err1;
955
956 r = dma_fence_wait_timeout(f, false, timeout);
957 if (r == 0) {
958 DRM_ERROR("amdgpu: IB test timed out\n");
959 r = -ETIMEDOUT;
960 goto err1;
961 } else if (r < 0) {
962 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
963 goto err1;
964 }
965 tmp = le32_to_cpu(adev->wb.wb[index]);
966 if (tmp == 0xDEADBEEF) {
967 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
968 r = 0;
969 } else {
970 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
971 r = -EINVAL;
972 }
973err1:
974 amdgpu_ib_free(adev, &ib, NULL);
975 dma_fence_put(f);
976err0:
977 amdgpu_device_wb_free(adev, index);
978 return r;
979}
980
981/**
982 * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART
983 *
984 * @ib: indirect buffer to fill with commands
985 * @pe: addr of the page entry
986 * @src: src addr to copy from
987 * @count: number of page entries to update
988 *
989 * Update PTEs by copying them from the GART using sDMA (CIK).
990 */
991static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
992 uint64_t pe, uint64_t src,
993 unsigned count)
994{
995 unsigned bytes = count * 8;
996
997 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
998 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
999 ib->ptr[ib->length_dw++] = bytes;
1000 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1001 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1002 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1003 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1004 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1005}
1006
1007/**
1008 * sdma_v3_0_vm_write_pte - update PTEs by writing them manually
1009 *
1010 * @ib: indirect buffer to fill with commands
1011 * @pe: addr of the page entry
1012 * @value: dst addr to write into pe
1013 * @count: number of page entries to update
1014 * @incr: increase next addr by incr bytes
1015 *
1016 * Update PTEs by writing them manually using sDMA (CIK).
1017 */
1018static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1019 uint64_t value, unsigned count,
1020 uint32_t incr)
1021{
1022 unsigned ndw = count * 2;
1023
1024 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1025 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1026 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1027 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1028 ib->ptr[ib->length_dw++] = ndw;
1029 for (; ndw > 0; ndw -= 2) {
1030 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1031 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1032 value += incr;
1033 }
1034}
1035
1036/**
1037 * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA
1038 *
1039 * @ib: indirect buffer to fill with commands
1040 * @pe: addr of the page entry
1041 * @addr: dst addr to write into pe
1042 * @count: number of page entries to update
1043 * @incr: increase next addr by incr bytes
1044 * @flags: access flags
1045 *
1046 * Update the page tables using sDMA (CIK).
1047 */
1048static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
1049 uint64_t addr, unsigned count,
1050 uint32_t incr, uint64_t flags)
1051{
1052 /* for physically contiguous pages (vram) */
1053 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
1054 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1055 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1056 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1057 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1058 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1059 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1060 ib->ptr[ib->length_dw++] = incr; /* increment size */
1061 ib->ptr[ib->length_dw++] = 0;
1062 ib->ptr[ib->length_dw++] = count; /* number of entries */
1063}
1064
1065/**
1066 * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw
1067 *
1068 * @ib: indirect buffer to fill with padding
1069 *
1070 */
1071static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1072{
1073 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
1074 u32 pad_count;
1075 int i;
1076
1077 pad_count = (8 - (ib->length_dw & 0x7)) % 8;
1078 for (i = 0; i < pad_count; i++)
1079 if (sdma && sdma->burst_nop && (i == 0))
1080 ib->ptr[ib->length_dw++] =
1081 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1082 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1083 else
1084 ib->ptr[ib->length_dw++] =
1085 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1086}
1087
1088/**
1089 * sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline
1090 *
1091 * @ring: amdgpu_ring pointer
1092 *
1093 * Make sure all previous operations are completed (CIK).
1094 */
1095static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1096{
1097 uint32_t seq = ring->fence_drv.sync_seq;
1098 uint64_t addr = ring->fence_drv.gpu_addr;
1099
1100 /* wait for idle */
1101 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1102 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1103 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1104 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1105 amdgpu_ring_write(ring, addr & 0xfffffffc);
1106 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1107 amdgpu_ring_write(ring, seq); /* reference */
1108 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1109 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1110 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1111}
1112
1113/**
1114 * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
1115 *
1116 * @ring: amdgpu_ring pointer
1117 * @vm: amdgpu_vm pointer
1118 *
1119 * Update the page table base and flush the VM TLB
1120 * using sDMA (VI).
1121 */
1122static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1123 unsigned vmid, uint64_t pd_addr)
1124{
1125 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1126
1127 /* wait for flush */
1128 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1129 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1130 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
1131 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1132 amdgpu_ring_write(ring, 0);
1133 amdgpu_ring_write(ring, 0); /* reference */
1134 amdgpu_ring_write(ring, 0); /* mask */
1135 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1136 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
1137}
1138
1139static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring,
1140 uint32_t reg, uint32_t val)
1141{
1142 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1143 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1144 amdgpu_ring_write(ring, reg);
1145 amdgpu_ring_write(ring, val);
1146}
1147
1148static int sdma_v3_0_early_init(void *handle)
1149{
1150 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1151
1152 switch (adev->asic_type) {
1153 case CHIP_STONEY:
1154 adev->sdma.num_instances = 1;
1155 break;
1156 default:
1157 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
1158 break;
1159 }
1160
1161 sdma_v3_0_set_ring_funcs(adev);
1162 sdma_v3_0_set_buffer_funcs(adev);
1163 sdma_v3_0_set_vm_pte_funcs(adev);
1164 sdma_v3_0_set_irq_funcs(adev);
1165
1166 return 0;
1167}
1168
1169static int sdma_v3_0_sw_init(void *handle)
1170{
1171 struct amdgpu_ring *ring;
1172 int r, i;
1173 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1174
1175 /* SDMA trap event */
1176 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
1177 &adev->sdma.trap_irq);
1178 if (r)
1179 return r;
1180
1181 /* SDMA Privileged inst */
1182 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
1183 &adev->sdma.illegal_inst_irq);
1184 if (r)
1185 return r;
1186
1187 /* SDMA Privileged inst */
1188 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
1189 &adev->sdma.illegal_inst_irq);
1190 if (r)
1191 return r;
1192
1193 r = sdma_v3_0_init_microcode(adev);
1194 if (r) {
1195 DRM_ERROR("Failed to load sdma firmware!\n");
1196 return r;
1197 }
1198
1199 for (i = 0; i < adev->sdma.num_instances; i++) {
1200 ring = &adev->sdma.instance[i].ring;
1201 ring->ring_obj = NULL;
1202 if (!amdgpu_sriov_vf(adev)) {
1203 ring->use_doorbell = true;
1204 ring->doorbell_index = (i == 0) ?
1205 AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
1206 } else {
1207 ring->use_pollmem = true;
1208 }
1209
1210 sprintf(ring->name, "sdma%d", i);
1211 r = amdgpu_ring_init(adev, ring, 1024,
1212 &adev->sdma.trap_irq,
1213 (i == 0) ?
1214 AMDGPU_SDMA_IRQ_TRAP0 :
1215 AMDGPU_SDMA_IRQ_TRAP1);
1216 if (r)
1217 return r;
1218 }
1219
1220 return r;
1221}
1222
1223static int sdma_v3_0_sw_fini(void *handle)
1224{
1225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1226 int i;
1227
1228 for (i = 0; i < adev->sdma.num_instances; i++)
1229 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1230
1231 sdma_v3_0_free_microcode(adev);
1232 return 0;
1233}
1234
1235static int sdma_v3_0_hw_init(void *handle)
1236{
1237 int r;
1238 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1239
1240 sdma_v3_0_init_golden_registers(adev);
1241
1242 r = sdma_v3_0_start(adev);
1243 if (r)
1244 return r;
1245
1246 return r;
1247}
1248
1249static int sdma_v3_0_hw_fini(void *handle)
1250{
1251 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1252
1253 sdma_v3_0_ctx_switch_enable(adev, false);
1254 sdma_v3_0_enable(adev, false);
1255
1256 return 0;
1257}
1258
1259static int sdma_v3_0_suspend(void *handle)
1260{
1261 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1262
1263 return sdma_v3_0_hw_fini(adev);
1264}
1265
1266static int sdma_v3_0_resume(void *handle)
1267{
1268 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1269
1270 return sdma_v3_0_hw_init(adev);
1271}
1272
1273static bool sdma_v3_0_is_idle(void *handle)
1274{
1275 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1276 u32 tmp = RREG32(mmSRBM_STATUS2);
1277
1278 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1279 SRBM_STATUS2__SDMA1_BUSY_MASK))
1280 return false;
1281
1282 return true;
1283}
1284
1285static int sdma_v3_0_wait_for_idle(void *handle)
1286{
1287 unsigned i;
1288 u32 tmp;
1289 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1290
1291 for (i = 0; i < adev->usec_timeout; i++) {
1292 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1293 SRBM_STATUS2__SDMA1_BUSY_MASK);
1294
1295 if (!tmp)
1296 return 0;
1297 udelay(1);
1298 }
1299 return -ETIMEDOUT;
1300}
1301
1302static bool sdma_v3_0_check_soft_reset(void *handle)
1303{
1304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1305 u32 srbm_soft_reset = 0;
1306 u32 tmp = RREG32(mmSRBM_STATUS2);
1307
1308 if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
1309 (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
1310 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1311 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1312 }
1313
1314 if (srbm_soft_reset) {
1315 adev->sdma.srbm_soft_reset = srbm_soft_reset;
1316 return true;
1317 } else {
1318 adev->sdma.srbm_soft_reset = 0;
1319 return false;
1320 }
1321}
1322
1323static int sdma_v3_0_pre_soft_reset(void *handle)
1324{
1325 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1326 u32 srbm_soft_reset = 0;
1327
1328 if (!adev->sdma.srbm_soft_reset)
1329 return 0;
1330
1331 srbm_soft_reset = adev->sdma.srbm_soft_reset;
1332
1333 if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
1334 REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
1335 sdma_v3_0_ctx_switch_enable(adev, false);
1336 sdma_v3_0_enable(adev, false);
1337 }
1338
1339 return 0;
1340}
1341
1342static int sdma_v3_0_post_soft_reset(void *handle)
1343{
1344 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1345 u32 srbm_soft_reset = 0;
1346
1347 if (!adev->sdma.srbm_soft_reset)
1348 return 0;
1349
1350 srbm_soft_reset = adev->sdma.srbm_soft_reset;
1351
1352 if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
1353 REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
1354 sdma_v3_0_gfx_resume(adev);
1355 sdma_v3_0_rlc_resume(adev);
1356 }
1357
1358 return 0;
1359}
1360
1361static int sdma_v3_0_soft_reset(void *handle)
1362{
1363 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1364 u32 srbm_soft_reset = 0;
1365 u32 tmp;
1366
1367 if (!adev->sdma.srbm_soft_reset)
1368 return 0;
1369
1370 srbm_soft_reset = adev->sdma.srbm_soft_reset;
1371
1372 if (srbm_soft_reset) {
1373 tmp = RREG32(mmSRBM_SOFT_RESET);
1374 tmp |= srbm_soft_reset;
1375 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1376 WREG32(mmSRBM_SOFT_RESET, tmp);
1377 tmp = RREG32(mmSRBM_SOFT_RESET);
1378
1379 udelay(50);
1380
1381 tmp &= ~srbm_soft_reset;
1382 WREG32(mmSRBM_SOFT_RESET, tmp);
1383 tmp = RREG32(mmSRBM_SOFT_RESET);
1384
1385 /* Wait a little for things to settle down */
1386 udelay(50);
1387 }
1388
1389 return 0;
1390}
1391
1392static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
1393 struct amdgpu_irq_src *source,
1394 unsigned type,
1395 enum amdgpu_interrupt_state state)
1396{
1397 u32 sdma_cntl;
1398
1399 switch (type) {
1400 case AMDGPU_SDMA_IRQ_TRAP0:
1401 switch (state) {
1402 case AMDGPU_IRQ_STATE_DISABLE:
1403 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1404 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1405 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1406 break;
1407 case AMDGPU_IRQ_STATE_ENABLE:
1408 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1409 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1410 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1411 break;
1412 default:
1413 break;
1414 }
1415 break;
1416 case AMDGPU_SDMA_IRQ_TRAP1:
1417 switch (state) {
1418 case AMDGPU_IRQ_STATE_DISABLE:
1419 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1420 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1421 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1422 break;
1423 case AMDGPU_IRQ_STATE_ENABLE:
1424 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1425 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1426 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1427 break;
1428 default:
1429 break;
1430 }
1431 break;
1432 default:
1433 break;
1434 }
1435 return 0;
1436}
1437
1438static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
1439 struct amdgpu_irq_src *source,
1440 struct amdgpu_iv_entry *entry)
1441{
1442 u8 instance_id, queue_id;
1443
1444 instance_id = (entry->ring_id & 0x3) >> 0;
1445 queue_id = (entry->ring_id & 0xc) >> 2;
1446 DRM_DEBUG("IH: SDMA trap\n");
1447 switch (instance_id) {
1448 case 0:
1449 switch (queue_id) {
1450 case 0:
1451 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1452 break;
1453 case 1:
1454 /* XXX compute */
1455 break;
1456 case 2:
1457 /* XXX compute */
1458 break;
1459 }
1460 break;
1461 case 1:
1462 switch (queue_id) {
1463 case 0:
1464 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1465 break;
1466 case 1:
1467 /* XXX compute */
1468 break;
1469 case 2:
1470 /* XXX compute */
1471 break;
1472 }
1473 break;
1474 }
1475 return 0;
1476}
1477
1478static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1479 struct amdgpu_irq_src *source,
1480 struct amdgpu_iv_entry *entry)
1481{
1482 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1483 schedule_work(&adev->reset_work);
1484 return 0;
1485}
1486
1487static void sdma_v3_0_update_sdma_medium_grain_clock_gating(
1488 struct amdgpu_device *adev,
1489 bool enable)
1490{
1491 uint32_t temp, data;
1492 int i;
1493
1494 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1495 for (i = 0; i < adev->sdma.num_instances; i++) {
1496 temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
1497 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1498 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1499 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1500 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1501 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1502 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1503 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1504 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1505 if (data != temp)
1506 WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
1507 }
1508 } else {
1509 for (i = 0; i < adev->sdma.num_instances; i++) {
1510 temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
1511 data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1512 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1513 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1514 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1515 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1516 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1517 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1518 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;
1519
1520 if (data != temp)
1521 WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
1522 }
1523 }
1524}
1525
1526static void sdma_v3_0_update_sdma_medium_grain_light_sleep(
1527 struct amdgpu_device *adev,
1528 bool enable)
1529{
1530 uint32_t temp, data;
1531 int i;
1532
1533 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1534 for (i = 0; i < adev->sdma.num_instances; i++) {
1535 temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
1536 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1537
1538 if (temp != data)
1539 WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
1540 }
1541 } else {
1542 for (i = 0; i < adev->sdma.num_instances; i++) {
1543 temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
1544 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1545
1546 if (temp != data)
1547 WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
1548 }
1549 }
1550}
1551
1552static int sdma_v3_0_set_clockgating_state(void *handle,
1553 enum amd_clockgating_state state)
1554{
1555 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1556
1557 if (amdgpu_sriov_vf(adev))
1558 return 0;
1559
1560 switch (adev->asic_type) {
1561 case CHIP_FIJI:
1562 case CHIP_CARRIZO:
1563 case CHIP_STONEY:
1564 sdma_v3_0_update_sdma_medium_grain_clock_gating(adev,
1565 state == AMD_CG_STATE_GATE);
1566 sdma_v3_0_update_sdma_medium_grain_light_sleep(adev,
1567 state == AMD_CG_STATE_GATE);
1568 break;
1569 default:
1570 break;
1571 }
1572 return 0;
1573}
1574
1575static int sdma_v3_0_set_powergating_state(void *handle,
1576 enum amd_powergating_state state)
1577{
1578 return 0;
1579}
1580
1581static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags)
1582{
1583 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1584 int data;
1585
1586 if (amdgpu_sriov_vf(adev))
1587 *flags = 0;
1588
1589 /* AMD_CG_SUPPORT_SDMA_MGCG */
1590 data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[0]);
1591 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK))
1592 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1593
1594 /* AMD_CG_SUPPORT_SDMA_LS */
1595 data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[0]);
1596 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1597 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1598}
1599
1600static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
1601 .name = "sdma_v3_0",
1602 .early_init = sdma_v3_0_early_init,
1603 .late_init = NULL,
1604 .sw_init = sdma_v3_0_sw_init,
1605 .sw_fini = sdma_v3_0_sw_fini,
1606 .hw_init = sdma_v3_0_hw_init,
1607 .hw_fini = sdma_v3_0_hw_fini,
1608 .suspend = sdma_v3_0_suspend,
1609 .resume = sdma_v3_0_resume,
1610 .is_idle = sdma_v3_0_is_idle,
1611 .wait_for_idle = sdma_v3_0_wait_for_idle,
1612 .check_soft_reset = sdma_v3_0_check_soft_reset,
1613 .pre_soft_reset = sdma_v3_0_pre_soft_reset,
1614 .post_soft_reset = sdma_v3_0_post_soft_reset,
1615 .soft_reset = sdma_v3_0_soft_reset,
1616 .set_clockgating_state = sdma_v3_0_set_clockgating_state,
1617 .set_powergating_state = sdma_v3_0_set_powergating_state,
1618 .get_clockgating_state = sdma_v3_0_get_clockgating_state,
1619};
1620
1621static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1622 .type = AMDGPU_RING_TYPE_SDMA,
1623 .align_mask = 0xf,
1624 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1625 .support_64bit_ptrs = false,
1626 .get_rptr = sdma_v3_0_ring_get_rptr,
1627 .get_wptr = sdma_v3_0_ring_get_wptr,
1628 .set_wptr = sdma_v3_0_ring_set_wptr,
1629 .emit_frame_size =
1630 6 + /* sdma_v3_0_ring_emit_hdp_flush */
1631 3 + /* hdp invalidate */
1632 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
1633 VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v3_0_ring_emit_vm_flush */
1634 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
1635 .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
1636 .emit_ib = sdma_v3_0_ring_emit_ib,
1637 .emit_fence = sdma_v3_0_ring_emit_fence,
1638 .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
1639 .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
1640 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
1641 .test_ring = sdma_v3_0_ring_test_ring,
1642 .test_ib = sdma_v3_0_ring_test_ib,
1643 .insert_nop = sdma_v3_0_ring_insert_nop,
1644 .pad_ib = sdma_v3_0_ring_pad_ib,
1645 .emit_wreg = sdma_v3_0_ring_emit_wreg,
1646};
1647
1648static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
1649{
1650 int i;
1651
1652 for (i = 0; i < adev->sdma.num_instances; i++)
1653 adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
1654}
1655
1656static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
1657 .set = sdma_v3_0_set_trap_irq_state,
1658 .process = sdma_v3_0_process_trap_irq,
1659};
1660
1661static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
1662 .process = sdma_v3_0_process_illegal_inst_irq,
1663};
1664
1665static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
1666{
1667 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1668 adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
1669 adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
1670}
1671
1672/**
1673 * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine
1674 *
1675 * @ring: amdgpu_ring structure holding ring information
1676 * @src_offset: src GPU address
1677 * @dst_offset: dst GPU address
1678 * @byte_count: number of bytes to xfer
1679 *
1680 * Copy GPU buffers using the DMA engine (VI).
1681 * Used by the amdgpu ttm implementation to move pages if
1682 * registered as the asic copy callback.
1683 */
1684static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
1685 uint64_t src_offset,
1686 uint64_t dst_offset,
1687 uint32_t byte_count)
1688{
1689 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1690 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1691 ib->ptr[ib->length_dw++] = byte_count;
1692 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1693 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1694 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1695 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1696 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1697}
1698
1699/**
1700 * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine
1701 *
1702 * @ring: amdgpu_ring structure holding ring information
1703 * @src_data: value to write to buffer
1704 * @dst_offset: dst GPU address
1705 * @byte_count: number of bytes to xfer
1706 *
1707 * Fill GPU buffers using the DMA engine (VI).
1708 */
1709static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
1710 uint32_t src_data,
1711 uint64_t dst_offset,
1712 uint32_t byte_count)
1713{
1714 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1715 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1716 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1717 ib->ptr[ib->length_dw++] = src_data;
1718 ib->ptr[ib->length_dw++] = byte_count;
1719}
1720
1721static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
1722 .copy_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
1723 .copy_num_dw = 7,
1724 .emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
1725
1726 .fill_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
1727 .fill_num_dw = 5,
1728 .emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
1729};
1730
1731static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
1732{
1733 if (adev->mman.buffer_funcs == NULL) {
1734 adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
1735 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1736 }
1737}
1738
1739static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
1740 .copy_pte_num_dw = 7,
1741 .copy_pte = sdma_v3_0_vm_copy_pte,
1742
1743 .write_pte = sdma_v3_0_vm_write_pte,
1744 .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
1745};
1746
1747static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1748{
1749 unsigned i;
1750
1751 if (adev->vm_manager.vm_pte_funcs == NULL) {
1752 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
1753 for (i = 0; i < adev->sdma.num_instances; i++)
1754 adev->vm_manager.vm_pte_rings[i] =
1755 &adev->sdma.instance[i].ring;
1756
1757 adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
1758 }
1759}
1760
1761const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
1762{
1763 .type = AMD_IP_BLOCK_TYPE_SDMA,
1764 .major = 3,
1765 .minor = 0,
1766 .rev = 0,
1767 .funcs = &sdma_v3_0_ip_funcs,
1768};
1769
1770const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
1771{
1772 .type = AMD_IP_BLOCK_TYPE_SDMA,
1773 .major = 3,
1774 .minor = 1,
1775 .rev = 0,
1776 .funcs = &sdma_v3_0_ip_funcs,
1777};