Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/firmware.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ucode.h"
31#include "amdgpu_trace.h"
32
33#include "gc/gc_10_1_0_offset.h"
34#include "gc/gc_10_1_0_sh_mask.h"
35#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
36#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
37
38#include "soc15_common.h"
39#include "soc15.h"
40#include "navi10_sdma_pkt_open.h"
41#include "nbio_v2_3.h"
42#include "sdma_common.h"
43#include "sdma_v5_0.h"
44
45MODULE_FIRMWARE("amdgpu/navi10_sdma.bin");
46MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin");
47
48MODULE_FIRMWARE("amdgpu/navi14_sdma.bin");
49MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin");
50
51MODULE_FIRMWARE("amdgpu/navi12_sdma.bin");
52MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin");
53
54MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma.bin");
55MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma1.bin");
56
57#define SDMA1_REG_OFFSET 0x600
58#define SDMA0_HYP_DEC_REG_START 0x5880
59#define SDMA0_HYP_DEC_REG_END 0x5893
60#define SDMA1_HYP_DEC_REG_OFFSET 0x20
61
62static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_0[] = {
63 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG),
64 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG),
65 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG),
66 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG),
67 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM),
68 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI),
69 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH),
70 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS),
71 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS),
72 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0),
73 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1),
74 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0),
75 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1),
76 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL),
77 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR),
78 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI),
79 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR),
80 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI),
81 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET),
82 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO),
83 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI),
84 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL),
85 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR),
86 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN),
87 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG),
88 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL),
89 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR),
90 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI),
91 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR),
92 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI),
93 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET),
94 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO),
95 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI),
96 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG),
97 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL),
98 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR),
99 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI),
100 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR),
101 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI),
102 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET),
103 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO),
104 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI),
105 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG),
106 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_INT_STATUS),
107 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL),
108 SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2)
109};
110
111static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
112static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
113static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
114static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
115
116static const struct soc15_reg_golden golden_settings_sdma_5[] = {
117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
124 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
125 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
126 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
127 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
128 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x00ffffff, 0x000c5c00),
129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
130 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
134 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
141};
142
143static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = {
144 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
145 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
146 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
147 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
148 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
149 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
152 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
153 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
157 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
161 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
162 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
163 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
164};
165
166static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
167 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
168 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
169};
170
171static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
172 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
173 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
174};
175
176static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
177 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
178 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
181 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
182 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
183};
184
185static const struct soc15_reg_golden golden_settings_sdma_cyan_skillfish[] = {
186 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
187 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
188 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
189 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
190 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
191 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
192 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
193 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
194 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
195 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
196 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
197 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
198 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
199 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x007fffff, 0x004c5c00),
200 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
201 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
202 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
203 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
204 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
205 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
206 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
207 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
208 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
209 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
210 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
211 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
212 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
213 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x007fffff, 0x004c5c00)
214};
215
216static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
217{
218 u32 base;
219
220 if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
221 internal_offset <= SDMA0_HYP_DEC_REG_END) {
222 base = adev->reg_offset[GC_HWIP][0][1];
223 if (instance == 1)
224 internal_offset += SDMA1_HYP_DEC_REG_OFFSET;
225 } else {
226 base = adev->reg_offset[GC_HWIP][0][0];
227 if (instance == 1)
228 internal_offset += SDMA1_REG_OFFSET;
229 }
230
231 return base + internal_offset;
232}
233
234static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
235{
236 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
237 case IP_VERSION(5, 0, 0):
238 soc15_program_register_sequence(adev,
239 golden_settings_sdma_5,
240 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
241 soc15_program_register_sequence(adev,
242 golden_settings_sdma_nv10,
243 (const u32)ARRAY_SIZE(golden_settings_sdma_nv10));
244 break;
245 case IP_VERSION(5, 0, 2):
246 soc15_program_register_sequence(adev,
247 golden_settings_sdma_5,
248 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
249 soc15_program_register_sequence(adev,
250 golden_settings_sdma_nv14,
251 (const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
252 break;
253 case IP_VERSION(5, 0, 5):
254 if (amdgpu_sriov_vf(adev))
255 soc15_program_register_sequence(adev,
256 golden_settings_sdma_5_sriov,
257 (const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov));
258 else
259 soc15_program_register_sequence(adev,
260 golden_settings_sdma_5,
261 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
262 soc15_program_register_sequence(adev,
263 golden_settings_sdma_nv12,
264 (const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
265 break;
266 case IP_VERSION(5, 0, 1):
267 soc15_program_register_sequence(adev,
268 golden_settings_sdma_cyan_skillfish,
269 (const u32)ARRAY_SIZE(golden_settings_sdma_cyan_skillfish));
270 break;
271 default:
272 break;
273 }
274}
275
276/**
277 * sdma_v5_0_init_microcode - load ucode images from disk
278 *
279 * @adev: amdgpu_device pointer
280 *
281 * Use the firmware interface to load the ucode images into
282 * the driver (not loaded into hw).
283 * Returns 0 on success, error on failure.
284 */
285
286// emulation only, won't work on real chip
287// navi10 real chip need to use PSP to load firmware
288static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
289{
290 int ret, i;
291
292 for (i = 0; i < adev->sdma.num_instances; i++) {
293 ret = amdgpu_sdma_init_microcode(adev, i, false);
294 if (ret)
295 return ret;
296 }
297
298 return ret;
299}
300
301static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring,
302 uint64_t addr)
303{
304 unsigned ret;
305
306 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
307 amdgpu_ring_write(ring, lower_32_bits(addr));
308 amdgpu_ring_write(ring, upper_32_bits(addr));
309 amdgpu_ring_write(ring, 1);
310 /* this is the offset we need patch later */
311 ret = ring->wptr & ring->buf_mask;
312 /* insert dummy here and patch it later */
313 amdgpu_ring_write(ring, 0);
314
315 return ret;
316}
317
318/**
319 * sdma_v5_0_ring_get_rptr - get the current read pointer
320 *
321 * @ring: amdgpu ring pointer
322 *
323 * Get the current rptr from the hardware (NAVI10+).
324 */
325static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
326{
327 u64 *rptr;
328
329 /* XXX check if swapping is necessary on BE */
330 rptr = (u64 *)ring->rptr_cpu_addr;
331
332 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
333 return ((*rptr) >> 2);
334}
335
336/**
337 * sdma_v5_0_ring_get_wptr - get the current write pointer
338 *
339 * @ring: amdgpu ring pointer
340 *
341 * Get the current wptr from the hardware (NAVI10+).
342 */
343static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
344{
345 struct amdgpu_device *adev = ring->adev;
346 u64 wptr;
347
348 if (ring->use_doorbell) {
349 /* XXX check if swapping is necessary on BE */
350 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
351 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
352 } else {
353 wptr = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
354 wptr = wptr << 32;
355 wptr |= RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
356 DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
357 }
358
359 return wptr >> 2;
360}
361
362/**
363 * sdma_v5_0_ring_set_wptr - commit the write pointer
364 *
365 * @ring: amdgpu ring pointer
366 *
367 * Write the wptr back to the hardware (NAVI10+).
368 */
369static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
370{
371 struct amdgpu_device *adev = ring->adev;
372 uint32_t *wptr_saved;
373 uint32_t *is_queue_unmap;
374 uint64_t aggregated_db_index;
375 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
376
377 DRM_DEBUG("Setting write pointer\n");
378 if (ring->is_mes_queue) {
379 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
380 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
381 sizeof(uint32_t));
382 aggregated_db_index =
383 amdgpu_mes_get_aggregated_doorbell_index(adev,
384 AMDGPU_MES_PRIORITY_LEVEL_NORMAL);
385
386 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
387 ring->wptr << 2);
388 *wptr_saved = ring->wptr << 2;
389 if (*is_queue_unmap) {
390 WDOORBELL64(aggregated_db_index, ring->wptr << 2);
391 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
392 ring->doorbell_index, ring->wptr << 2);
393 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
394 } else {
395 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
396 ring->doorbell_index, ring->wptr << 2);
397 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
398
399 if (*is_queue_unmap)
400 WDOORBELL64(aggregated_db_index,
401 ring->wptr << 2);
402 }
403 } else {
404 if (ring->use_doorbell) {
405 DRM_DEBUG("Using doorbell -- "
406 "wptr_offs == 0x%08x "
407 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
408 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
409 ring->wptr_offs,
410 lower_32_bits(ring->wptr << 2),
411 upper_32_bits(ring->wptr << 2));
412 /* XXX check if swapping is necessary on BE */
413 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
414 ring->wptr << 2);
415 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
416 ring->doorbell_index, ring->wptr << 2);
417 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
418 } else {
419 DRM_DEBUG("Not using doorbell -- "
420 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
421 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
422 ring->me,
423 lower_32_bits(ring->wptr << 2),
424 ring->me,
425 upper_32_bits(ring->wptr << 2));
426 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
427 ring->me, mmSDMA0_GFX_RB_WPTR),
428 lower_32_bits(ring->wptr << 2));
429 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
430 ring->me, mmSDMA0_GFX_RB_WPTR_HI),
431 upper_32_bits(ring->wptr << 2));
432 }
433 }
434}
435
436static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
437{
438 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
439 int i;
440
441 for (i = 0; i < count; i++)
442 if (sdma && sdma->burst_nop && (i == 0))
443 amdgpu_ring_write(ring, ring->funcs->nop |
444 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
445 else
446 amdgpu_ring_write(ring, ring->funcs->nop);
447}
448
449/**
450 * sdma_v5_0_ring_emit_ib - Schedule an IB on the DMA engine
451 *
452 * @ring: amdgpu ring pointer
453 * @job: job to retrieve vmid from
454 * @ib: IB object to schedule
455 * @flags: unused
456 *
457 * Schedule an IB in the DMA ring (NAVI10).
458 */
459static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
460 struct amdgpu_job *job,
461 struct amdgpu_ib *ib,
462 uint32_t flags)
463{
464 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
465 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
466
467 /* An IB packet must end on a 8 DW boundary--the next dword
468 * must be on a 8-dword boundary. Our IB packet below is 6
469 * dwords long, thus add x number of NOPs, such that, in
470 * modular arithmetic,
471 * wptr + 6 + x = 8k, k >= 0, which in C is,
472 * (wptr + 6 + x) % 8 = 0.
473 * The expression below, is a solution of x.
474 */
475 sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
476
477 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
478 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
479 /* base must be 32 byte aligned */
480 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
481 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
482 amdgpu_ring_write(ring, ib->length_dw);
483 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
484 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
485}
486
487/**
488 * sdma_v5_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
489 *
490 * @ring: amdgpu ring pointer
491 *
492 * flush the IB by graphics cache rinse.
493 */
494static void sdma_v5_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
495{
496 uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
497 SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
498 SDMA_GCR_GLI_INV(1);
499
500 /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
501 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
502 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
503 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
504 SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
505 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
506 SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
507 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
508 SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
509}
510
511/**
512 * sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
513 *
514 * @ring: amdgpu ring pointer
515 *
516 * Emit an hdp flush packet on the requested DMA ring.
517 */
518static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
519{
520 struct amdgpu_device *adev = ring->adev;
521 u32 ref_and_mask = 0;
522 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
523
524 if (ring->me == 0)
525 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
526 else
527 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
528
529 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
530 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
531 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
532 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
533 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
534 amdgpu_ring_write(ring, ref_and_mask); /* reference */
535 amdgpu_ring_write(ring, ref_and_mask); /* mask */
536 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
537 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
538}
539
540/**
541 * sdma_v5_0_ring_emit_fence - emit a fence on the DMA ring
542 *
543 * @ring: amdgpu ring pointer
544 * @addr: address
545 * @seq: sequence number
546 * @flags: fence related flags
547 *
548 * Add a DMA fence packet to the ring to write
549 * the fence seq number and DMA trap packet to generate
550 * an interrupt if needed (NAVI10).
551 */
552static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
553 unsigned flags)
554{
555 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
556 /* write the fence */
557 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
558 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
559 /* zero in first two bits */
560 BUG_ON(addr & 0x3);
561 amdgpu_ring_write(ring, lower_32_bits(addr));
562 amdgpu_ring_write(ring, upper_32_bits(addr));
563 amdgpu_ring_write(ring, lower_32_bits(seq));
564
565 /* optionally write high bits as well */
566 if (write64bit) {
567 addr += 4;
568 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
569 SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
570 /* zero in first two bits */
571 BUG_ON(addr & 0x3);
572 amdgpu_ring_write(ring, lower_32_bits(addr));
573 amdgpu_ring_write(ring, upper_32_bits(addr));
574 amdgpu_ring_write(ring, upper_32_bits(seq));
575 }
576
577 if (flags & AMDGPU_FENCE_FLAG_INT) {
578 uint32_t ctx = ring->is_mes_queue ?
579 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
580 /* generate an interrupt */
581 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
582 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
583 }
584}
585
586
587/**
588 * sdma_v5_0_gfx_stop - stop the gfx async dma engines
589 *
590 * @adev: amdgpu_device pointer
591 *
592 * Stop the gfx async dma ring buffers (NAVI10).
593 */
594static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
595{
596 u32 rb_cntl, ib_cntl;
597 int i;
598
599 for (i = 0; i < adev->sdma.num_instances; i++) {
600 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
601 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
602 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
603 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
604 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
605 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
606 }
607}
608
609/**
610 * sdma_v5_0_rlc_stop - stop the compute async dma engines
611 *
612 * @adev: amdgpu_device pointer
613 *
614 * Stop the compute async dma queues (NAVI10).
615 */
616static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
617{
618 /* XXX todo */
619}
620
621/**
622 * sdma_v5_0_ctx_switch_enable - stop the async dma engines context switch
623 *
624 * @adev: amdgpu_device pointer
625 * @enable: enable/disable the DMA MEs context switch.
626 *
627 * Halt or unhalt the async dma engines context switch (NAVI10).
628 */
629static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
630{
631 u32 f32_cntl = 0, phase_quantum = 0;
632 int i;
633
634 if (amdgpu_sdma_phase_quantum) {
635 unsigned value = amdgpu_sdma_phase_quantum;
636 unsigned unit = 0;
637
638 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
639 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
640 value = (value + 1) >> 1;
641 unit++;
642 }
643 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
644 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
645 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
646 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
647 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
648 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
649 WARN_ONCE(1,
650 "clamping sdma_phase_quantum to %uK clock cycles\n",
651 value << unit);
652 }
653 phase_quantum =
654 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
655 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
656 }
657
658 for (i = 0; i < adev->sdma.num_instances; i++) {
659 if (!amdgpu_sriov_vf(adev)) {
660 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
661 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
662 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
663 }
664
665 if (enable && amdgpu_sdma_phase_quantum) {
666 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
667 phase_quantum);
668 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
669 phase_quantum);
670 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
671 phase_quantum);
672 }
673 if (!amdgpu_sriov_vf(adev))
674 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
675 }
676
677}
678
679/**
680 * sdma_v5_0_enable - stop the async dma engines
681 *
682 * @adev: amdgpu_device pointer
683 * @enable: enable/disable the DMA MEs.
684 *
685 * Halt or unhalt the async dma engines (NAVI10).
686 */
687static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
688{
689 u32 f32_cntl;
690 int i;
691
692 if (!enable) {
693 sdma_v5_0_gfx_stop(adev);
694 sdma_v5_0_rlc_stop(adev);
695 }
696
697 if (amdgpu_sriov_vf(adev))
698 return;
699
700 for (i = 0; i < adev->sdma.num_instances; i++) {
701 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
702 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
703 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
704 }
705}
706
707/**
708 * sdma_v5_0_gfx_resume_instance - start/restart a certain sdma engine
709 *
710 * @adev: amdgpu_device pointer
711 * @i: instance
712 * @restore: used to restore wptr when restart
713 *
714 * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
715 * Return 0 for success.
716 */
717static int sdma_v5_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
718{
719 struct amdgpu_ring *ring;
720 u32 rb_cntl, ib_cntl;
721 u32 rb_bufsz;
722 u32 doorbell;
723 u32 doorbell_offset;
724 u32 temp;
725 u32 wptr_poll_cntl;
726 u64 wptr_gpu_addr;
727
728 ring = &adev->sdma.instance[i].ring;
729
730 if (!amdgpu_sriov_vf(adev))
731 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
732
733 /* Set ring buffer size in dwords */
734 rb_bufsz = order_base_2(ring->ring_size / 4);
735 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
736 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
737#ifdef __BIG_ENDIAN
738 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
739 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
740 RPTR_WRITEBACK_SWAP_ENABLE, 1);
741#endif
742 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
743
744 /* Initialize the ring buffer's read and write pointers */
745 if (restore) {
746 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), lower_32_bits(ring->wptr << 2));
747 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
748 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
749 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
750 } else {
751 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
752 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
753 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
754 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
755 }
756 /* setup the wptr shadow polling */
757 wptr_gpu_addr = ring->wptr_gpu_addr;
758 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
759 lower_32_bits(wptr_gpu_addr));
760 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
761 upper_32_bits(wptr_gpu_addr));
762 wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
763 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
764 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
765 SDMA0_GFX_RB_WPTR_POLL_CNTL,
766 F32_POLL_ENABLE, 1);
767 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
768 wptr_poll_cntl);
769
770 /* set the wb address whether it's enabled or not */
771 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
772 upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
773 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
774 lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
775
776 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
777
778 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE),
779 ring->gpu_addr >> 8);
780 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI),
781 ring->gpu_addr >> 40);
782
783 if (!restore)
784 ring->wptr = 0;
785
786 /* before programing wptr to a less value, need set minor_ptr_update first */
787 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
788
789 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
790 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
791 lower_32_bits(ring->wptr << 2));
792 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
793 upper_32_bits(ring->wptr << 2));
794 }
795
796 doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
797 doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
798 mmSDMA0_GFX_DOORBELL_OFFSET));
799
800 if (ring->use_doorbell) {
801 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
802 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
803 OFFSET, ring->doorbell_index);
804 } else {
805 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
806 }
807 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
808 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET),
809 doorbell_offset);
810
811 adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
812 ring->doorbell_index, 20);
813
814 if (amdgpu_sriov_vf(adev))
815 sdma_v5_0_ring_set_wptr(ring);
816
817 /* set minor_ptr_update to 0 after wptr programed */
818 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
819
820 if (!amdgpu_sriov_vf(adev)) {
821 /* set utc l1 enable flag always to 1 */
822 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
823 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
824
825 /* enable MCBP */
826 temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
827 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
828
829 /* Set up RESP_MODE to non-copy addresses */
830 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
831 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
832 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
833 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
834
835 /* program default cache read and write policy */
836 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
837 /* clean read policy and write policy bits */
838 temp &= 0xFF0FFF;
839 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
840 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
841 }
842
843 if (!amdgpu_sriov_vf(adev)) {
844 /* unhalt engine */
845 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
846 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
847 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
848 }
849
850 /* enable DMA RB */
851 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
852 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
853
854 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
855 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
856#ifdef __BIG_ENDIAN
857 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
858#endif
859 /* enable DMA IBs */
860 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
861
862 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
863 sdma_v5_0_ctx_switch_enable(adev, true);
864 sdma_v5_0_enable(adev, true);
865 }
866
867 return amdgpu_ring_test_helper(ring);
868}
869
870/**
871 * sdma_v5_0_gfx_resume - setup and start the async dma engines
872 *
873 * @adev: amdgpu_device pointer
874 *
875 * Set up the gfx DMA ring buffers and enable them (NAVI10).
876 * Returns 0 for success, error for failure.
877 */
878static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
879{
880 int i, r;
881
882 for (i = 0; i < adev->sdma.num_instances; i++) {
883 r = sdma_v5_0_gfx_resume_instance(adev, i, false);
884 if (r)
885 return r;
886 }
887
888 return 0;
889}
890
891/**
892 * sdma_v5_0_rlc_resume - setup and start the async dma engines
893 *
894 * @adev: amdgpu_device pointer
895 *
896 * Set up the compute DMA queues and enable them (NAVI10).
897 * Returns 0 for success, error for failure.
898 */
899static int sdma_v5_0_rlc_resume(struct amdgpu_device *adev)
900{
901 return 0;
902}
903
904/**
905 * sdma_v5_0_load_microcode - load the sDMA ME ucode
906 *
907 * @adev: amdgpu_device pointer
908 *
909 * Loads the sDMA0/1 ucode.
910 * Returns 0 for success, -EINVAL if the ucode is not available.
911 */
912static int sdma_v5_0_load_microcode(struct amdgpu_device *adev)
913{
914 const struct sdma_firmware_header_v1_0 *hdr;
915 const __le32 *fw_data;
916 u32 fw_size;
917 int i, j;
918
919 /* halt the MEs */
920 sdma_v5_0_enable(adev, false);
921
922 for (i = 0; i < adev->sdma.num_instances; i++) {
923 if (!adev->sdma.instance[i].fw)
924 return -EINVAL;
925
926 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
927 amdgpu_ucode_print_sdma_hdr(&hdr->header);
928 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
929
930 fw_data = (const __le32 *)
931 (adev->sdma.instance[i].fw->data +
932 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
933
934 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
935
936 for (j = 0; j < fw_size; j++) {
937 if (amdgpu_emu_mode == 1 && j % 500 == 0)
938 msleep(1);
939 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
940 }
941
942 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
943 }
944
945 return 0;
946}
947
948/**
949 * sdma_v5_0_start - setup and start the async dma engines
950 *
951 * @adev: amdgpu_device pointer
952 *
953 * Set up the DMA engines and enable them (NAVI10).
954 * Returns 0 for success, error for failure.
955 */
956static int sdma_v5_0_start(struct amdgpu_device *adev)
957{
958 int r = 0;
959
960 if (amdgpu_sriov_vf(adev)) {
961 sdma_v5_0_ctx_switch_enable(adev, false);
962 sdma_v5_0_enable(adev, false);
963
964 /* set RB registers */
965 r = sdma_v5_0_gfx_resume(adev);
966 return r;
967 }
968
969 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
970 r = sdma_v5_0_load_microcode(adev);
971 if (r)
972 return r;
973 }
974
975 /* unhalt the MEs */
976 sdma_v5_0_enable(adev, true);
977 /* enable sdma ring preemption */
978 sdma_v5_0_ctx_switch_enable(adev, true);
979
980 /* start the gfx rings and rlc compute queues */
981 r = sdma_v5_0_gfx_resume(adev);
982 if (r)
983 return r;
984 r = sdma_v5_0_rlc_resume(adev);
985
986 return r;
987}
988
989static int sdma_v5_0_mqd_init(struct amdgpu_device *adev, void *mqd,
990 struct amdgpu_mqd_prop *prop)
991{
992 struct v10_sdma_mqd *m = mqd;
993 uint64_t wb_gpu_addr;
994
995 m->sdmax_rlcx_rb_cntl =
996 order_base_2(prop->queue_size / 4) << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
997 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
998 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
999 1 << SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT;
1000
1001 m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
1002 m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
1003
1004 m->sdmax_rlcx_rb_wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, 0,
1005 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
1006
1007 wb_gpu_addr = prop->wptr_gpu_addr;
1008 m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr);
1009 m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr);
1010
1011 wb_gpu_addr = prop->rptr_gpu_addr;
1012 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr);
1013 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr);
1014
1015 m->sdmax_rlcx_ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, 0,
1016 mmSDMA0_GFX_IB_CNTL));
1017
1018 m->sdmax_rlcx_doorbell_offset =
1019 prop->doorbell_index << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
1020
1021 m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_RLC0_DOORBELL, ENABLE, 1);
1022
1023 return 0;
1024}
1025
1026static void sdma_v5_0_set_mqd_funcs(struct amdgpu_device *adev)
1027{
1028 adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v10_sdma_mqd);
1029 adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v5_0_mqd_init;
1030}
1031
1032/**
1033 * sdma_v5_0_ring_test_ring - simple async dma engine test
1034 *
1035 * @ring: amdgpu_ring structure holding ring information
1036 *
1037 * Test the DMA engine by writing using it to write an
1038 * value to memory. (NAVI10).
1039 * Returns 0 for success, error for failure.
1040 */
1041static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
1042{
1043 struct amdgpu_device *adev = ring->adev;
1044 unsigned i;
1045 unsigned index;
1046 int r;
1047 u32 tmp;
1048 u64 gpu_addr;
1049 volatile uint32_t *cpu_ptr = NULL;
1050
1051 tmp = 0xCAFEDEAD;
1052
1053 if (ring->is_mes_queue) {
1054 uint32_t offset = 0;
1055 offset = amdgpu_mes_ctx_get_offs(ring,
1056 AMDGPU_MES_CTX_PADDING_OFFS);
1057 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1058 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
1059 *cpu_ptr = tmp;
1060 } else {
1061 r = amdgpu_device_wb_get(adev, &index);
1062 if (r) {
1063 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
1064 return r;
1065 }
1066
1067 gpu_addr = adev->wb.gpu_addr + (index * 4);
1068 adev->wb.wb[index] = cpu_to_le32(tmp);
1069 }
1070
1071 r = amdgpu_ring_alloc(ring, 20);
1072 if (r) {
1073 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
1074 if (!ring->is_mes_queue)
1075 amdgpu_device_wb_free(adev, index);
1076 return r;
1077 }
1078
1079 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1080 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
1081 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
1082 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
1083 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
1084 amdgpu_ring_write(ring, 0xDEADBEEF);
1085 amdgpu_ring_commit(ring);
1086
1087 for (i = 0; i < adev->usec_timeout; i++) {
1088 if (ring->is_mes_queue)
1089 tmp = le32_to_cpu(*cpu_ptr);
1090 else
1091 tmp = le32_to_cpu(adev->wb.wb[index]);
1092 if (tmp == 0xDEADBEEF)
1093 break;
1094 if (amdgpu_emu_mode == 1)
1095 msleep(1);
1096 else
1097 udelay(1);
1098 }
1099
1100 if (i >= adev->usec_timeout)
1101 r = -ETIMEDOUT;
1102
1103 if (!ring->is_mes_queue)
1104 amdgpu_device_wb_free(adev, index);
1105
1106 return r;
1107}
1108
1109/**
1110 * sdma_v5_0_ring_test_ib - test an IB on the DMA engine
1111 *
1112 * @ring: amdgpu_ring structure holding ring information
1113 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1114 *
1115 * Test a simple IB in the DMA ring (NAVI10).
1116 * Returns 0 on success, error on failure.
1117 */
1118static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1119{
1120 struct amdgpu_device *adev = ring->adev;
1121 struct amdgpu_ib ib;
1122 struct dma_fence *f = NULL;
1123 unsigned index;
1124 long r;
1125 u32 tmp = 0;
1126 u64 gpu_addr;
1127 volatile uint32_t *cpu_ptr = NULL;
1128
1129 tmp = 0xCAFEDEAD;
1130 memset(&ib, 0, sizeof(ib));
1131
1132 if (ring->is_mes_queue) {
1133 uint32_t offset = 0;
1134 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
1135 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1136 ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
1137
1138 offset = amdgpu_mes_ctx_get_offs(ring,
1139 AMDGPU_MES_CTX_PADDING_OFFS);
1140 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1141 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
1142 *cpu_ptr = tmp;
1143 } else {
1144 r = amdgpu_device_wb_get(adev, &index);
1145 if (r) {
1146 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
1147 return r;
1148 }
1149
1150 gpu_addr = adev->wb.gpu_addr + (index * 4);
1151 adev->wb.wb[index] = cpu_to_le32(tmp);
1152
1153 r = amdgpu_ib_get(adev, NULL, 256,
1154 AMDGPU_IB_POOL_DIRECT, &ib);
1155 if (r) {
1156 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
1157 goto err0;
1158 }
1159 }
1160
1161 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1162 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1163 ib.ptr[1] = lower_32_bits(gpu_addr);
1164 ib.ptr[2] = upper_32_bits(gpu_addr);
1165 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1166 ib.ptr[4] = 0xDEADBEEF;
1167 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1168 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1169 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1170 ib.length_dw = 8;
1171
1172 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1173 if (r)
1174 goto err1;
1175
1176 r = dma_fence_wait_timeout(f, false, timeout);
1177 if (r == 0) {
1178 DRM_ERROR("amdgpu: IB test timed out\n");
1179 r = -ETIMEDOUT;
1180 goto err1;
1181 } else if (r < 0) {
1182 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1183 goto err1;
1184 }
1185
1186 if (ring->is_mes_queue)
1187 tmp = le32_to_cpu(*cpu_ptr);
1188 else
1189 tmp = le32_to_cpu(adev->wb.wb[index]);
1190
1191 if (tmp == 0xDEADBEEF)
1192 r = 0;
1193 else
1194 r = -EINVAL;
1195
1196err1:
1197 amdgpu_ib_free(adev, &ib, NULL);
1198 dma_fence_put(f);
1199err0:
1200 if (!ring->is_mes_queue)
1201 amdgpu_device_wb_free(adev, index);
1202 return r;
1203}
1204
1205
1206/**
1207 * sdma_v5_0_vm_copy_pte - update PTEs by copying them from the GART
1208 *
1209 * @ib: indirect buffer to fill with commands
1210 * @pe: addr of the page entry
1211 * @src: src addr to copy from
1212 * @count: number of page entries to update
1213 *
1214 * Update PTEs by copying them from the GART using sDMA (NAVI10).
1215 */
1216static void sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib,
1217 uint64_t pe, uint64_t src,
1218 unsigned count)
1219{
1220 unsigned bytes = count * 8;
1221
1222 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1223 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1224 ib->ptr[ib->length_dw++] = bytes - 1;
1225 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1226 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1227 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1228 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1229 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1230
1231}
1232
1233/**
1234 * sdma_v5_0_vm_write_pte - update PTEs by writing them manually
1235 *
1236 * @ib: indirect buffer to fill with commands
1237 * @pe: addr of the page entry
1238 * @value: dst addr to write into pe
1239 * @count: number of page entries to update
1240 * @incr: increase next addr by incr bytes
1241 *
1242 * Update PTEs by writing them manually using sDMA (NAVI10).
1243 */
1244static void sdma_v5_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1245 uint64_t value, unsigned count,
1246 uint32_t incr)
1247{
1248 unsigned ndw = count * 2;
1249
1250 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1251 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1252 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1253 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1254 ib->ptr[ib->length_dw++] = ndw - 1;
1255 for (; ndw > 0; ndw -= 2) {
1256 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1257 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1258 value += incr;
1259 }
1260}
1261
1262/**
1263 * sdma_v5_0_vm_set_pte_pde - update the page tables using sDMA
1264 *
1265 * @ib: indirect buffer to fill with commands
1266 * @pe: addr of the page entry
1267 * @addr: dst addr to write into pe
1268 * @count: number of page entries to update
1269 * @incr: increase next addr by incr bytes
1270 * @flags: access flags
1271 *
1272 * Update the page tables using sDMA (NAVI10).
1273 */
1274static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1275 uint64_t pe,
1276 uint64_t addr, unsigned count,
1277 uint32_t incr, uint64_t flags)
1278{
1279 /* for physically contiguous pages (vram) */
1280 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1281 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1282 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1283 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1284 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1285 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1286 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1287 ib->ptr[ib->length_dw++] = incr; /* increment size */
1288 ib->ptr[ib->length_dw++] = 0;
1289 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1290}
1291
1292/**
1293 * sdma_v5_0_ring_pad_ib - pad the IB
1294 * @ring: amdgpu_ring structure holding ring information
1295 * @ib: indirect buffer to fill with padding
1296 *
1297 * Pad the IB with NOPs to a boundary multiple of 8.
1298 */
1299static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1300{
1301 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1302 u32 pad_count;
1303 int i;
1304
1305 pad_count = (-ib->length_dw) & 0x7;
1306 for (i = 0; i < pad_count; i++)
1307 if (sdma && sdma->burst_nop && (i == 0))
1308 ib->ptr[ib->length_dw++] =
1309 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1310 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1311 else
1312 ib->ptr[ib->length_dw++] =
1313 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1314}
1315
1316
1317/**
1318 * sdma_v5_0_ring_emit_pipeline_sync - sync the pipeline
1319 *
1320 * @ring: amdgpu_ring pointer
1321 *
1322 * Make sure all previous operations are completed (CIK).
1323 */
1324static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1325{
1326 uint32_t seq = ring->fence_drv.sync_seq;
1327 uint64_t addr = ring->fence_drv.gpu_addr;
1328
1329 /* wait for idle */
1330 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1331 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1332 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1333 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1334 amdgpu_ring_write(ring, addr & 0xfffffffc);
1335 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1336 amdgpu_ring_write(ring, seq); /* reference */
1337 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1338 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1339 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1340}
1341
1342
1343/**
1344 * sdma_v5_0_ring_emit_vm_flush - vm flush using sDMA
1345 *
1346 * @ring: amdgpu_ring pointer
1347 * @vmid: vmid number to use
1348 * @pd_addr: address
1349 *
1350 * Update the page table base and flush the VM TLB
1351 * using sDMA (NAVI10).
1352 */
1353static void sdma_v5_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1354 unsigned vmid, uint64_t pd_addr)
1355{
1356 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1357}
1358
1359static void sdma_v5_0_ring_emit_wreg(struct amdgpu_ring *ring,
1360 uint32_t reg, uint32_t val)
1361{
1362 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1363 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1364 amdgpu_ring_write(ring, reg);
1365 amdgpu_ring_write(ring, val);
1366}
1367
1368static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1369 uint32_t val, uint32_t mask)
1370{
1371 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1372 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1373 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1374 amdgpu_ring_write(ring, reg << 2);
1375 amdgpu_ring_write(ring, 0);
1376 amdgpu_ring_write(ring, val); /* reference */
1377 amdgpu_ring_write(ring, mask); /* mask */
1378 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1379 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1380}
1381
1382static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1383 uint32_t reg0, uint32_t reg1,
1384 uint32_t ref, uint32_t mask)
1385{
1386 amdgpu_ring_emit_wreg(ring, reg0, ref);
1387 /* wait for a cycle to reset vm_inv_eng*_ack */
1388 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1389 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1390}
1391
1392static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
1393{
1394 struct amdgpu_device *adev = ip_block->adev;
1395 int r;
1396
1397 r = sdma_v5_0_init_microcode(adev);
1398 if (r)
1399 return r;
1400
1401 sdma_v5_0_set_ring_funcs(adev);
1402 sdma_v5_0_set_buffer_funcs(adev);
1403 sdma_v5_0_set_vm_pte_funcs(adev);
1404 sdma_v5_0_set_irq_funcs(adev);
1405 sdma_v5_0_set_mqd_funcs(adev);
1406
1407 return 0;
1408}
1409
1410
1411static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
1412{
1413 struct amdgpu_ring *ring;
1414 int r, i;
1415 struct amdgpu_device *adev = ip_block->adev;
1416 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
1417 uint32_t *ptr;
1418
1419 /* SDMA trap event */
1420 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0,
1421 SDMA0_5_0__SRCID__SDMA_TRAP,
1422 &adev->sdma.trap_irq);
1423 if (r)
1424 return r;
1425
1426 /* SDMA trap event */
1427 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1,
1428 SDMA1_5_0__SRCID__SDMA_TRAP,
1429 &adev->sdma.trap_irq);
1430 if (r)
1431 return r;
1432
1433 for (i = 0; i < adev->sdma.num_instances; i++) {
1434 ring = &adev->sdma.instance[i].ring;
1435 ring->ring_obj = NULL;
1436 ring->use_doorbell = true;
1437
1438 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1439 ring->use_doorbell?"true":"false");
1440
1441 ring->doorbell_index = (i == 0) ?
1442 (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset
1443 : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
1444
1445 ring->vm_hub = AMDGPU_GFXHUB(0);
1446 sprintf(ring->name, "sdma%d", i);
1447 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1448 (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
1449 AMDGPU_SDMA_IRQ_INSTANCE1,
1450 AMDGPU_RING_PRIO_DEFAULT, NULL);
1451 if (r)
1452 return r;
1453 }
1454
1455 adev->sdma.supported_reset =
1456 amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
1457 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1458 case IP_VERSION(5, 0, 0):
1459 case IP_VERSION(5, 0, 2):
1460 case IP_VERSION(5, 0, 5):
1461 if (adev->sdma.instance[0].fw_version >= 35)
1462 adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1463 break;
1464 default:
1465 break;
1466 }
1467
1468 /* Allocate memory for SDMA IP Dump buffer */
1469 ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
1470 if (ptr)
1471 adev->sdma.ip_dump = ptr;
1472 else
1473 DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
1474
1475 r = amdgpu_sdma_sysfs_reset_mask_init(adev);
1476 if (r)
1477 return r;
1478
1479 return r;
1480}
1481
1482static int sdma_v5_0_sw_fini(struct amdgpu_ip_block *ip_block)
1483{
1484 struct amdgpu_device *adev = ip_block->adev;
1485 int i;
1486
1487 for (i = 0; i < adev->sdma.num_instances; i++)
1488 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1489
1490 amdgpu_sdma_sysfs_reset_mask_fini(adev);
1491 amdgpu_sdma_destroy_inst_ctx(adev, false);
1492
1493 kfree(adev->sdma.ip_dump);
1494
1495 return 0;
1496}
1497
1498static int sdma_v5_0_hw_init(struct amdgpu_ip_block *ip_block)
1499{
1500 int r;
1501 struct amdgpu_device *adev = ip_block->adev;
1502
1503 sdma_v5_0_init_golden_registers(adev);
1504
1505 r = sdma_v5_0_start(adev);
1506
1507 return r;
1508}
1509
1510static int sdma_v5_0_hw_fini(struct amdgpu_ip_block *ip_block)
1511{
1512 struct amdgpu_device *adev = ip_block->adev;
1513
1514 if (amdgpu_sriov_vf(adev))
1515 return 0;
1516
1517 sdma_v5_0_ctx_switch_enable(adev, false);
1518 sdma_v5_0_enable(adev, false);
1519
1520 return 0;
1521}
1522
1523static int sdma_v5_0_suspend(struct amdgpu_ip_block *ip_block)
1524{
1525 return sdma_v5_0_hw_fini(ip_block);
1526}
1527
1528static int sdma_v5_0_resume(struct amdgpu_ip_block *ip_block)
1529{
1530 return sdma_v5_0_hw_init(ip_block);
1531}
1532
1533static bool sdma_v5_0_is_idle(void *handle)
1534{
1535 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1536 u32 i;
1537
1538 for (i = 0; i < adev->sdma.num_instances; i++) {
1539 u32 tmp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
1540
1541 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1542 return false;
1543 }
1544
1545 return true;
1546}
1547
1548static int sdma_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1549{
1550 unsigned i;
1551 u32 sdma0, sdma1;
1552 struct amdgpu_device *adev = ip_block->adev;
1553
1554 for (i = 0; i < adev->usec_timeout; i++) {
1555 sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
1556 sdma1 = RREG32(sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
1557
1558 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1559 return 0;
1560 udelay(1);
1561 }
1562 return -ETIMEDOUT;
1563}
1564
1565static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
1566{
1567 /* todo */
1568
1569 return 0;
1570}
1571
1572static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
1573{
1574 struct amdgpu_device *adev = ring->adev;
1575 int i, j, r;
1576 u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
1577
1578 if (amdgpu_sriov_vf(adev))
1579 return -EINVAL;
1580
1581 for (i = 0; i < adev->sdma.num_instances; i++) {
1582 if (ring == &adev->sdma.instance[i].ring)
1583 break;
1584 }
1585
1586 if (i == adev->sdma.num_instances) {
1587 DRM_ERROR("sdma instance not found\n");
1588 return -EINVAL;
1589 }
1590
1591 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
1592
1593 /* stop queue */
1594 ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
1595 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
1596 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
1597
1598 rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
1599 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
1600 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
1601
1602 /* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
1603 freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
1604 freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1);
1605 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
1606
1607 for (j = 0; j < adev->usec_timeout; j++) {
1608 freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
1609 if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1)
1610 break;
1611 udelay(1);
1612 }
1613
1614 /* check sdma copy engine all idle if frozen not received*/
1615 if (j == adev->usec_timeout) {
1616 stat1_reg = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG));
1617 if ((stat1_reg & 0x3FF) != 0x3FF) {
1618 DRM_ERROR("cannot soft reset as sdma not idle\n");
1619 r = -ETIMEDOUT;
1620 goto err0;
1621 }
1622 }
1623
1624 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
1625 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
1626 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
1627
1628 cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
1629 cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
1630 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
1631
1632 /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
1633 preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
1634 preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
1635 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
1636
1637 soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
1638 soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
1639
1640 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
1641
1642 udelay(50);
1643
1644 soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
1645 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
1646
1647 /* unfreeze*/
1648 freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
1649 freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
1650 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
1651
1652 r = sdma_v5_0_gfx_resume_instance(adev, i, true);
1653
1654err0:
1655 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
1656 return r;
1657}
1658
1659static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
1660{
1661 int i, r = 0;
1662 struct amdgpu_device *adev = ring->adev;
1663 u32 index = 0;
1664 u64 sdma_gfx_preempt;
1665
1666 amdgpu_sdma_get_index_from_ring(ring, &index);
1667 if (index == 0)
1668 sdma_gfx_preempt = mmSDMA0_GFX_PREEMPT;
1669 else
1670 sdma_gfx_preempt = mmSDMA1_GFX_PREEMPT;
1671
1672 /* assert preemption condition */
1673 amdgpu_ring_set_preempt_cond_exec(ring, false);
1674
1675 /* emit the trailing fence */
1676 ring->trail_seq += 1;
1677 amdgpu_ring_alloc(ring, 10);
1678 sdma_v5_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1679 ring->trail_seq, 0);
1680 amdgpu_ring_commit(ring);
1681
1682 /* assert IB preemption */
1683 WREG32(sdma_gfx_preempt, 1);
1684
1685 /* poll the trailing fence */
1686 for (i = 0; i < adev->usec_timeout; i++) {
1687 if (ring->trail_seq ==
1688 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1689 break;
1690 udelay(1);
1691 }
1692
1693 if (i >= adev->usec_timeout) {
1694 r = -EINVAL;
1695 DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1696 }
1697
1698 /* deassert IB preemption */
1699 WREG32(sdma_gfx_preempt, 0);
1700
1701 /* deassert the preemption condition */
1702 amdgpu_ring_set_preempt_cond_exec(ring, true);
1703 return r;
1704}
1705
1706static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
1707 struct amdgpu_irq_src *source,
1708 unsigned type,
1709 enum amdgpu_interrupt_state state)
1710{
1711 u32 sdma_cntl;
1712
1713 if (!amdgpu_sriov_vf(adev)) {
1714 u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
1715 sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
1716 sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
1717
1718 sdma_cntl = RREG32(reg_offset);
1719 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1720 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1721 WREG32(reg_offset, sdma_cntl);
1722 }
1723
1724 return 0;
1725}
1726
1727static int sdma_v5_0_process_trap_irq(struct amdgpu_device *adev,
1728 struct amdgpu_irq_src *source,
1729 struct amdgpu_iv_entry *entry)
1730{
1731 uint32_t mes_queue_id = entry->src_data[0];
1732
1733 DRM_DEBUG("IH: SDMA trap\n");
1734
1735 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
1736 struct amdgpu_mes_queue *queue;
1737
1738 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
1739
1740 spin_lock(&adev->mes.queue_id_lock);
1741 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
1742 if (queue) {
1743 DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
1744 amdgpu_fence_process(queue->ring);
1745 }
1746 spin_unlock(&adev->mes.queue_id_lock);
1747 return 0;
1748 }
1749
1750 switch (entry->client_id) {
1751 case SOC15_IH_CLIENTID_SDMA0:
1752 switch (entry->ring_id) {
1753 case 0:
1754 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1755 break;
1756 case 1:
1757 /* XXX compute */
1758 break;
1759 case 2:
1760 /* XXX compute */
1761 break;
1762 case 3:
1763 /* XXX page queue*/
1764 break;
1765 }
1766 break;
1767 case SOC15_IH_CLIENTID_SDMA1:
1768 switch (entry->ring_id) {
1769 case 0:
1770 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1771 break;
1772 case 1:
1773 /* XXX compute */
1774 break;
1775 case 2:
1776 /* XXX compute */
1777 break;
1778 case 3:
1779 /* XXX page queue*/
1780 break;
1781 }
1782 break;
1783 }
1784 return 0;
1785}
1786
1787static int sdma_v5_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1788 struct amdgpu_irq_src *source,
1789 struct amdgpu_iv_entry *entry)
1790{
1791 return 0;
1792}
1793
1794static void sdma_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
1795 bool enable)
1796{
1797 uint32_t data, def;
1798 int i;
1799
1800 for (i = 0; i < adev->sdma.num_instances; i++) {
1801 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1802 /* Enable sdma clock gating */
1803 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1804 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1805 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1806 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1807 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1808 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1809 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1810 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1811 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1812 if (def != data)
1813 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1814 } else {
1815 /* Disable sdma clock gating */
1816 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1817 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1818 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1819 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1820 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1821 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1822 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1823 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1824 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1825 if (def != data)
1826 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1827 }
1828 }
1829}
1830
1831static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
1832 bool enable)
1833{
1834 uint32_t data, def;
1835 int i;
1836
1837 for (i = 0; i < adev->sdma.num_instances; i++) {
1838 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1839 /* Enable sdma mem light sleep */
1840 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1841 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1842 if (def != data)
1843 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1844
1845 } else {
1846 /* Disable sdma mem light sleep */
1847 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1848 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1849 if (def != data)
1850 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1851
1852 }
1853 }
1854}
1855
1856static int sdma_v5_0_set_clockgating_state(void *handle,
1857 enum amd_clockgating_state state)
1858{
1859 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1860
1861 if (amdgpu_sriov_vf(adev))
1862 return 0;
1863
1864 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1865 case IP_VERSION(5, 0, 0):
1866 case IP_VERSION(5, 0, 2):
1867 case IP_VERSION(5, 0, 5):
1868 sdma_v5_0_update_medium_grain_clock_gating(adev,
1869 state == AMD_CG_STATE_GATE);
1870 sdma_v5_0_update_medium_grain_light_sleep(adev,
1871 state == AMD_CG_STATE_GATE);
1872 break;
1873 default:
1874 break;
1875 }
1876
1877 return 0;
1878}
1879
1880static int sdma_v5_0_set_powergating_state(void *handle,
1881 enum amd_powergating_state state)
1882{
1883 return 0;
1884}
1885
1886static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags)
1887{
1888 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1889 int data;
1890
1891 if (amdgpu_sriov_vf(adev))
1892 *flags = 0;
1893
1894 /* AMD_CG_SUPPORT_SDMA_MGCG */
1895 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL));
1896 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
1897 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1898
1899 /* AMD_CG_SUPPORT_SDMA_LS */
1900 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL));
1901 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1902 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1903}
1904
1905static void sdma_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1906{
1907 struct amdgpu_device *adev = ip_block->adev;
1908 int i, j;
1909 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
1910 uint32_t instance_offset;
1911
1912 if (!adev->sdma.ip_dump)
1913 return;
1914
1915 drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
1916 for (i = 0; i < adev->sdma.num_instances; i++) {
1917 instance_offset = i * reg_count;
1918 drm_printf(p, "\nInstance:%d\n", i);
1919
1920 for (j = 0; j < reg_count; j++)
1921 drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_5_0[j].reg_name,
1922 adev->sdma.ip_dump[instance_offset + j]);
1923 }
1924}
1925
1926static void sdma_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
1927{
1928 struct amdgpu_device *adev = ip_block->adev;
1929 int i, j;
1930 uint32_t instance_offset;
1931 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
1932
1933 if (!adev->sdma.ip_dump)
1934 return;
1935
1936 amdgpu_gfx_off_ctrl(adev, false);
1937 for (i = 0; i < adev->sdma.num_instances; i++) {
1938 instance_offset = i * reg_count;
1939 for (j = 0; j < reg_count; j++)
1940 adev->sdma.ip_dump[instance_offset + j] =
1941 RREG32(sdma_v5_0_get_reg_offset(adev, i,
1942 sdma_reg_list_5_0[j].reg_offset));
1943 }
1944 amdgpu_gfx_off_ctrl(adev, true);
1945}
1946
1947static const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
1948 .name = "sdma_v5_0",
1949 .early_init = sdma_v5_0_early_init,
1950 .sw_init = sdma_v5_0_sw_init,
1951 .sw_fini = sdma_v5_0_sw_fini,
1952 .hw_init = sdma_v5_0_hw_init,
1953 .hw_fini = sdma_v5_0_hw_fini,
1954 .suspend = sdma_v5_0_suspend,
1955 .resume = sdma_v5_0_resume,
1956 .is_idle = sdma_v5_0_is_idle,
1957 .wait_for_idle = sdma_v5_0_wait_for_idle,
1958 .soft_reset = sdma_v5_0_soft_reset,
1959 .set_clockgating_state = sdma_v5_0_set_clockgating_state,
1960 .set_powergating_state = sdma_v5_0_set_powergating_state,
1961 .get_clockgating_state = sdma_v5_0_get_clockgating_state,
1962 .dump_ip_state = sdma_v5_0_dump_ip_state,
1963 .print_ip_state = sdma_v5_0_print_ip_state,
1964};
1965
1966static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
1967 .type = AMDGPU_RING_TYPE_SDMA,
1968 .align_mask = 0xf,
1969 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1970 .support_64bit_ptrs = true,
1971 .secure_submission_supported = true,
1972 .get_rptr = sdma_v5_0_ring_get_rptr,
1973 .get_wptr = sdma_v5_0_ring_get_wptr,
1974 .set_wptr = sdma_v5_0_ring_set_wptr,
1975 .emit_frame_size =
1976 5 + /* sdma_v5_0_ring_init_cond_exec */
1977 6 + /* sdma_v5_0_ring_emit_hdp_flush */
1978 3 + /* hdp_invalidate */
1979 6 + /* sdma_v5_0_ring_emit_pipeline_sync */
1980 /* sdma_v5_0_ring_emit_vm_flush */
1981 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1982 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
1983 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
1984 .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
1985 .emit_ib = sdma_v5_0_ring_emit_ib,
1986 .emit_mem_sync = sdma_v5_0_ring_emit_mem_sync,
1987 .emit_fence = sdma_v5_0_ring_emit_fence,
1988 .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
1989 .emit_vm_flush = sdma_v5_0_ring_emit_vm_flush,
1990 .emit_hdp_flush = sdma_v5_0_ring_emit_hdp_flush,
1991 .test_ring = sdma_v5_0_ring_test_ring,
1992 .test_ib = sdma_v5_0_ring_test_ib,
1993 .insert_nop = sdma_v5_0_ring_insert_nop,
1994 .pad_ib = sdma_v5_0_ring_pad_ib,
1995 .emit_wreg = sdma_v5_0_ring_emit_wreg,
1996 .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
1997 .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
1998 .init_cond_exec = sdma_v5_0_ring_init_cond_exec,
1999 .preempt_ib = sdma_v5_0_ring_preempt_ib,
2000 .reset = sdma_v5_0_reset_queue,
2001};
2002
2003static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev)
2004{
2005 int i;
2006
2007 for (i = 0; i < adev->sdma.num_instances; i++) {
2008 adev->sdma.instance[i].ring.funcs = &sdma_v5_0_ring_funcs;
2009 adev->sdma.instance[i].ring.me = i;
2010 }
2011}
2012
2013static const struct amdgpu_irq_src_funcs sdma_v5_0_trap_irq_funcs = {
2014 .set = sdma_v5_0_set_trap_irq_state,
2015 .process = sdma_v5_0_process_trap_irq,
2016};
2017
2018static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = {
2019 .process = sdma_v5_0_process_illegal_inst_irq,
2020};
2021
2022static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
2023{
2024 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
2025 adev->sdma.num_instances;
2026 adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs;
2027 adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs;
2028}
2029
2030/**
2031 * sdma_v5_0_emit_copy_buffer - copy buffer using the sDMA engine
2032 *
2033 * @ib: indirect buffer to copy to
2034 * @src_offset: src GPU address
2035 * @dst_offset: dst GPU address
2036 * @byte_count: number of bytes to xfer
2037 * @copy_flags: copy flags for the buffers
2038 *
2039 * Copy GPU buffers using the DMA engine (NAVI10).
2040 * Used by the amdgpu ttm implementation to move pages if
2041 * registered as the asic copy callback.
2042 */
2043static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
2044 uint64_t src_offset,
2045 uint64_t dst_offset,
2046 uint32_t byte_count,
2047 uint32_t copy_flags)
2048{
2049 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
2050 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
2051 SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
2052 ib->ptr[ib->length_dw++] = byte_count - 1;
2053 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
2054 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
2055 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
2056 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2057 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2058}
2059
2060/**
2061 * sdma_v5_0_emit_fill_buffer - fill buffer using the sDMA engine
2062 *
2063 * @ib: indirect buffer to fill
2064 * @src_data: value to write to buffer
2065 * @dst_offset: dst GPU address
2066 * @byte_count: number of bytes to xfer
2067 *
2068 * Fill GPU buffers using the DMA engine (NAVI10).
2069 */
2070static void sdma_v5_0_emit_fill_buffer(struct amdgpu_ib *ib,
2071 uint32_t src_data,
2072 uint64_t dst_offset,
2073 uint32_t byte_count)
2074{
2075 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
2076 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2077 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2078 ib->ptr[ib->length_dw++] = src_data;
2079 ib->ptr[ib->length_dw++] = byte_count - 1;
2080}
2081
2082static const struct amdgpu_buffer_funcs sdma_v5_0_buffer_funcs = {
2083 .copy_max_bytes = 0x400000,
2084 .copy_num_dw = 7,
2085 .emit_copy_buffer = sdma_v5_0_emit_copy_buffer,
2086
2087 .fill_max_bytes = 0x400000,
2088 .fill_num_dw = 5,
2089 .emit_fill_buffer = sdma_v5_0_emit_fill_buffer,
2090};
2091
2092static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev)
2093{
2094 if (adev->mman.buffer_funcs == NULL) {
2095 adev->mman.buffer_funcs = &sdma_v5_0_buffer_funcs;
2096 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
2097 }
2098}
2099
2100static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
2101 .copy_pte_num_dw = 7,
2102 .copy_pte = sdma_v5_0_vm_copy_pte,
2103 .write_pte = sdma_v5_0_vm_write_pte,
2104 .set_pte_pde = sdma_v5_0_vm_set_pte_pde,
2105};
2106
2107static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
2108{
2109 unsigned i;
2110
2111 if (adev->vm_manager.vm_pte_funcs == NULL) {
2112 adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
2113 for (i = 0; i < adev->sdma.num_instances; i++) {
2114 adev->vm_manager.vm_pte_scheds[i] =
2115 &adev->sdma.instance[i].ring.sched;
2116 }
2117 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
2118 }
2119}
2120
2121const struct amdgpu_ip_block_version sdma_v5_0_ip_block = {
2122 .type = AMD_IP_BLOCK_TYPE_SDMA,
2123 .major = 5,
2124 .minor = 0,
2125 .rev = 0,
2126 .funcs = &sdma_v5_0_ip_funcs,
2127};
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/firmware.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ucode.h"
31#include "amdgpu_trace.h"
32
33#include "gc/gc_10_1_0_offset.h"
34#include "gc/gc_10_1_0_sh_mask.h"
35#include "hdp/hdp_5_0_0_offset.h"
36#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
37#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
38
39#include "soc15_common.h"
40#include "soc15.h"
41#include "navi10_sdma_pkt_open.h"
42#include "nbio_v2_3.h"
43#include "sdma_v5_0.h"
44
45MODULE_FIRMWARE("amdgpu/navi10_sdma.bin");
46MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin");
47
48MODULE_FIRMWARE("amdgpu/navi14_sdma.bin");
49MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin");
50
51MODULE_FIRMWARE("amdgpu/navi12_sdma.bin");
52MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin");
53
54#define SDMA1_REG_OFFSET 0x600
55#define SDMA0_HYP_DEC_REG_START 0x5880
56#define SDMA0_HYP_DEC_REG_END 0x5893
57#define SDMA1_HYP_DEC_REG_OFFSET 0x20
58
59static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
60static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
61static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
62static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
63
64static const struct soc15_reg_golden golden_settings_sdma_5[] = {
65 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
66 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
67 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
68 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
69 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
70 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
71 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
72 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
73 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
74 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
75 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
76 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x00ffffff, 0x000c5c00),
77 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
78 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
79 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
80 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
81 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
82 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
83 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
84 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
85 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
86 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
87 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
88 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
89};
90
91static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
92 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
93 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
94};
95
96static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
97 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
99};
100
101static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
104};
105
106static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
107{
108 u32 base;
109
110 if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
111 internal_offset <= SDMA0_HYP_DEC_REG_END) {
112 base = adev->reg_offset[GC_HWIP][0][1];
113 if (instance == 1)
114 internal_offset += SDMA1_HYP_DEC_REG_OFFSET;
115 } else {
116 base = adev->reg_offset[GC_HWIP][0][0];
117 if (instance == 1)
118 internal_offset += SDMA1_REG_OFFSET;
119 }
120
121 return base + internal_offset;
122}
123
124static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
125{
126 switch (adev->asic_type) {
127 case CHIP_NAVI10:
128 soc15_program_register_sequence(adev,
129 golden_settings_sdma_5,
130 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
131 soc15_program_register_sequence(adev,
132 golden_settings_sdma_nv10,
133 (const u32)ARRAY_SIZE(golden_settings_sdma_nv10));
134 break;
135 case CHIP_NAVI14:
136 soc15_program_register_sequence(adev,
137 golden_settings_sdma_5,
138 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
139 soc15_program_register_sequence(adev,
140 golden_settings_sdma_nv14,
141 (const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
142 break;
143 case CHIP_NAVI12:
144 soc15_program_register_sequence(adev,
145 golden_settings_sdma_5,
146 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
147 soc15_program_register_sequence(adev,
148 golden_settings_sdma_nv12,
149 (const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
150 break;
151 default:
152 break;
153 }
154}
155
156/**
157 * sdma_v5_0_init_microcode - load ucode images from disk
158 *
159 * @adev: amdgpu_device pointer
160 *
161 * Use the firmware interface to load the ucode images into
162 * the driver (not loaded into hw).
163 * Returns 0 on success, error on failure.
164 */
165
166// emulation only, won't work on real chip
167// navi10 real chip need to use PSP to load firmware
168static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
169{
170 const char *chip_name;
171 char fw_name[30];
172 int err = 0, i;
173 struct amdgpu_firmware_info *info = NULL;
174 const struct common_firmware_header *header = NULL;
175 const struct sdma_firmware_header_v1_0 *hdr;
176
177 DRM_DEBUG("\n");
178
179 switch (adev->asic_type) {
180 case CHIP_NAVI10:
181 chip_name = "navi10";
182 break;
183 case CHIP_NAVI14:
184 chip_name = "navi14";
185 break;
186 case CHIP_NAVI12:
187 chip_name = "navi12";
188 break;
189 default:
190 BUG();
191 }
192
193 for (i = 0; i < adev->sdma.num_instances; i++) {
194 if (i == 0)
195 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
196 else
197 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
198 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
199 if (err)
200 goto out;
201 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
202 if (err)
203 goto out;
204 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
205 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
206 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
207 if (adev->sdma.instance[i].feature_version >= 20)
208 adev->sdma.instance[i].burst_nop = true;
209 DRM_DEBUG("psp_load == '%s'\n",
210 adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
211
212 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
213 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
214 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
215 info->fw = adev->sdma.instance[i].fw;
216 header = (const struct common_firmware_header *)info->fw->data;
217 adev->firmware.fw_size +=
218 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
219 }
220 }
221out:
222 if (err) {
223 DRM_ERROR("sdma_v5_0: Failed to load firmware \"%s\"\n", fw_name);
224 for (i = 0; i < adev->sdma.num_instances; i++) {
225 release_firmware(adev->sdma.instance[i].fw);
226 adev->sdma.instance[i].fw = NULL;
227 }
228 }
229 return err;
230}
231
232static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring)
233{
234 unsigned ret;
235
236 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
237 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
238 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
239 amdgpu_ring_write(ring, 1);
240 ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
241 amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
242
243 return ret;
244}
245
246static void sdma_v5_0_ring_patch_cond_exec(struct amdgpu_ring *ring,
247 unsigned offset)
248{
249 unsigned cur;
250
251 BUG_ON(offset > ring->buf_mask);
252 BUG_ON(ring->ring[offset] != 0x55aa55aa);
253
254 cur = (ring->wptr - 1) & ring->buf_mask;
255 if (cur > offset)
256 ring->ring[offset] = cur - offset;
257 else
258 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
259}
260
261/**
262 * sdma_v5_0_ring_get_rptr - get the current read pointer
263 *
264 * @ring: amdgpu ring pointer
265 *
266 * Get the current rptr from the hardware (NAVI10+).
267 */
268static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
269{
270 u64 *rptr;
271
272 /* XXX check if swapping is necessary on BE */
273 rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
274
275 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
276 return ((*rptr) >> 2);
277}
278
279/**
280 * sdma_v5_0_ring_get_wptr - get the current write pointer
281 *
282 * @ring: amdgpu ring pointer
283 *
284 * Get the current wptr from the hardware (NAVI10+).
285 */
286static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
287{
288 struct amdgpu_device *adev = ring->adev;
289 u64 *wptr = NULL;
290 uint64_t local_wptr = 0;
291
292 if (ring->use_doorbell) {
293 /* XXX check if swapping is necessary on BE */
294 wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
295 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
296 *wptr = (*wptr) >> 2;
297 DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
298 } else {
299 u32 lowbit, highbit;
300
301 wptr = &local_wptr;
302 lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
303 highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
304
305 DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
306 ring->me, highbit, lowbit);
307 *wptr = highbit;
308 *wptr = (*wptr) << 32;
309 *wptr |= lowbit;
310 }
311
312 return *wptr;
313}
314
315/**
316 * sdma_v5_0_ring_set_wptr - commit the write pointer
317 *
318 * @ring: amdgpu ring pointer
319 *
320 * Write the wptr back to the hardware (NAVI10+).
321 */
322static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
323{
324 struct amdgpu_device *adev = ring->adev;
325
326 DRM_DEBUG("Setting write pointer\n");
327 if (ring->use_doorbell) {
328 DRM_DEBUG("Using doorbell -- "
329 "wptr_offs == 0x%08x "
330 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
331 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
332 ring->wptr_offs,
333 lower_32_bits(ring->wptr << 2),
334 upper_32_bits(ring->wptr << 2));
335 /* XXX check if swapping is necessary on BE */
336 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2);
337 adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2);
338 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
339 ring->doorbell_index, ring->wptr << 2);
340 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
341 } else {
342 DRM_DEBUG("Not using doorbell -- "
343 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
344 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
345 ring->me,
346 lower_32_bits(ring->wptr << 2),
347 ring->me,
348 upper_32_bits(ring->wptr << 2));
349 WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
350 lower_32_bits(ring->wptr << 2));
351 WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
352 upper_32_bits(ring->wptr << 2));
353 }
354}
355
356static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
357{
358 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
359 int i;
360
361 for (i = 0; i < count; i++)
362 if (sdma && sdma->burst_nop && (i == 0))
363 amdgpu_ring_write(ring, ring->funcs->nop |
364 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
365 else
366 amdgpu_ring_write(ring, ring->funcs->nop);
367}
368
369/**
370 * sdma_v5_0_ring_emit_ib - Schedule an IB on the DMA engine
371 *
372 * @ring: amdgpu ring pointer
373 * @ib: IB object to schedule
374 *
375 * Schedule an IB in the DMA ring (NAVI10).
376 */
377static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
378 struct amdgpu_job *job,
379 struct amdgpu_ib *ib,
380 uint32_t flags)
381{
382 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
383 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
384
385 /* IB packet must end on a 8 DW boundary */
386 sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
387
388 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
389 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
390 /* base must be 32 byte aligned */
391 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
392 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
393 amdgpu_ring_write(ring, ib->length_dw);
394 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
395 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
396}
397
398/**
399 * sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
400 *
401 * @ring: amdgpu ring pointer
402 *
403 * Emit an hdp flush packet on the requested DMA ring.
404 */
405static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
406{
407 struct amdgpu_device *adev = ring->adev;
408 u32 ref_and_mask = 0;
409 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
410
411 if (ring->me == 0)
412 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
413 else
414 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
415
416 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
417 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
418 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
419 amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
420 amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
421 amdgpu_ring_write(ring, ref_and_mask); /* reference */
422 amdgpu_ring_write(ring, ref_and_mask); /* mask */
423 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
424 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
425}
426
427/**
428 * sdma_v5_0_ring_emit_fence - emit a fence on the DMA ring
429 *
430 * @ring: amdgpu ring pointer
431 * @fence: amdgpu fence object
432 *
433 * Add a DMA fence packet to the ring to write
434 * the fence seq number and DMA trap packet to generate
435 * an interrupt if needed (NAVI10).
436 */
437static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
438 unsigned flags)
439{
440 struct amdgpu_device *adev = ring->adev;
441 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
442 /* write the fence */
443 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
444 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
445 /* zero in first two bits */
446 BUG_ON(addr & 0x3);
447 amdgpu_ring_write(ring, lower_32_bits(addr));
448 amdgpu_ring_write(ring, upper_32_bits(addr));
449 amdgpu_ring_write(ring, lower_32_bits(seq));
450
451 /* optionally write high bits as well */
452 if (write64bit) {
453 addr += 4;
454 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
455 SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
456 /* zero in first two bits */
457 BUG_ON(addr & 0x3);
458 amdgpu_ring_write(ring, lower_32_bits(addr));
459 amdgpu_ring_write(ring, upper_32_bits(addr));
460 amdgpu_ring_write(ring, upper_32_bits(seq));
461 }
462
463 /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */
464 if ((flags & AMDGPU_FENCE_FLAG_INT) && adev->pdev->device != 0x50) {
465 /* generate an interrupt */
466 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
467 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
468 }
469}
470
471
472/**
473 * sdma_v5_0_gfx_stop - stop the gfx async dma engines
474 *
475 * @adev: amdgpu_device pointer
476 *
477 * Stop the gfx async dma ring buffers (NAVI10).
478 */
479static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
480{
481 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
482 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
483 u32 rb_cntl, ib_cntl;
484 int i;
485
486 if ((adev->mman.buffer_funcs_ring == sdma0) ||
487 (adev->mman.buffer_funcs_ring == sdma1))
488 amdgpu_ttm_set_buffer_funcs_status(adev, false);
489
490 for (i = 0; i < adev->sdma.num_instances; i++) {
491 rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
492 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
493 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
494 ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
495 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
496 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
497 }
498
499 sdma0->sched.ready = false;
500 sdma1->sched.ready = false;
501}
502
503/**
504 * sdma_v5_0_rlc_stop - stop the compute async dma engines
505 *
506 * @adev: amdgpu_device pointer
507 *
508 * Stop the compute async dma queues (NAVI10).
509 */
510static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
511{
512 /* XXX todo */
513}
514
515/**
516 * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
517 *
518 * @adev: amdgpu_device pointer
519 * @enable: enable/disable the DMA MEs context switch.
520 *
521 * Halt or unhalt the async dma engines context switch (NAVI10).
522 */
523static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
524{
525 u32 f32_cntl, phase_quantum = 0;
526 int i;
527
528 if (amdgpu_sdma_phase_quantum) {
529 unsigned value = amdgpu_sdma_phase_quantum;
530 unsigned unit = 0;
531
532 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
533 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
534 value = (value + 1) >> 1;
535 unit++;
536 }
537 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
538 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
539 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
540 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
541 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
542 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
543 WARN_ONCE(1,
544 "clamping sdma_phase_quantum to %uK clock cycles\n",
545 value << unit);
546 }
547 phase_quantum =
548 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
549 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
550 }
551
552 for (i = 0; i < adev->sdma.num_instances; i++) {
553 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
554 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
555 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
556 if (enable && amdgpu_sdma_phase_quantum) {
557 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
558 phase_quantum);
559 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
560 phase_quantum);
561 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
562 phase_quantum);
563 }
564 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
565 }
566
567}
568
569/**
570 * sdma_v5_0_enable - stop the async dma engines
571 *
572 * @adev: amdgpu_device pointer
573 * @enable: enable/disable the DMA MEs.
574 *
575 * Halt or unhalt the async dma engines (NAVI10).
576 */
577static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
578{
579 u32 f32_cntl;
580 int i;
581
582 if (enable == false) {
583 sdma_v5_0_gfx_stop(adev);
584 sdma_v5_0_rlc_stop(adev);
585 }
586
587 for (i = 0; i < adev->sdma.num_instances; i++) {
588 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
589 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
590 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
591 }
592}
593
594/**
595 * sdma_v5_0_gfx_resume - setup and start the async dma engines
596 *
597 * @adev: amdgpu_device pointer
598 *
599 * Set up the gfx DMA ring buffers and enable them (NAVI10).
600 * Returns 0 for success, error for failure.
601 */
602static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
603{
604 struct amdgpu_ring *ring;
605 u32 rb_cntl, ib_cntl;
606 u32 rb_bufsz;
607 u32 wb_offset;
608 u32 doorbell;
609 u32 doorbell_offset;
610 u32 temp;
611 u32 wptr_poll_cntl;
612 u64 wptr_gpu_addr;
613 int i, r;
614
615 for (i = 0; i < adev->sdma.num_instances; i++) {
616 ring = &adev->sdma.instance[i].ring;
617 wb_offset = (ring->rptr_offs * 4);
618
619 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
620
621 /* Set ring buffer size in dwords */
622 rb_bufsz = order_base_2(ring->ring_size / 4);
623 rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
624 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
625#ifdef __BIG_ENDIAN
626 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
627 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
628 RPTR_WRITEBACK_SWAP_ENABLE, 1);
629#endif
630 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
631
632 /* Initialize the ring buffer's read and write pointers */
633 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
634 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
635 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
636 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
637
638 /* setup the wptr shadow polling */
639 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
640 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
641 lower_32_bits(wptr_gpu_addr));
642 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
643 upper_32_bits(wptr_gpu_addr));
644 wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i,
645 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
646 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
647 SDMA0_GFX_RB_WPTR_POLL_CNTL,
648 F32_POLL_ENABLE, 1);
649 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
650 wptr_poll_cntl);
651
652 /* set the wb address whether it's enabled or not */
653 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
654 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
655 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
656 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
657
658 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
659
660 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
661 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
662
663 ring->wptr = 0;
664
665 /* before programing wptr to a less value, need set minor_ptr_update first */
666 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
667
668 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
669 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
670 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
671 }
672
673 doorbell = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
674 doorbell_offset = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
675
676 if (ring->use_doorbell) {
677 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
678 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
679 OFFSET, ring->doorbell_index);
680 } else {
681 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
682 }
683 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
684 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
685
686 adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
687 ring->doorbell_index, 20);
688
689 if (amdgpu_sriov_vf(adev))
690 sdma_v5_0_ring_set_wptr(ring);
691
692 /* set minor_ptr_update to 0 after wptr programed */
693 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
694
695 /* set utc l1 enable flag always to 1 */
696 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
697 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
698
699 /* enable MCBP */
700 temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
701 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
702
703 /* Set up RESP_MODE to non-copy addresses */
704 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
705 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
706 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
707 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
708
709 /* program default cache read and write policy */
710 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
711 /* clean read policy and write policy bits */
712 temp &= 0xFF0FFF;
713 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
714 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
715
716 if (!amdgpu_sriov_vf(adev)) {
717 /* unhalt engine */
718 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
719 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
720 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
721 }
722
723 /* enable DMA RB */
724 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
725 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
726
727 ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
728 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
729#ifdef __BIG_ENDIAN
730 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
731#endif
732 /* enable DMA IBs */
733 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
734
735 ring->sched.ready = true;
736
737 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
738 sdma_v5_0_ctx_switch_enable(adev, true);
739 sdma_v5_0_enable(adev, true);
740 }
741
742 r = amdgpu_ring_test_ring(ring);
743 if (r) {
744 ring->sched.ready = false;
745 return r;
746 }
747
748 if (adev->mman.buffer_funcs_ring == ring)
749 amdgpu_ttm_set_buffer_funcs_status(adev, true);
750 }
751
752 return 0;
753}
754
755/**
756 * sdma_v5_0_rlc_resume - setup and start the async dma engines
757 *
758 * @adev: amdgpu_device pointer
759 *
760 * Set up the compute DMA queues and enable them (NAVI10).
761 * Returns 0 for success, error for failure.
762 */
763static int sdma_v5_0_rlc_resume(struct amdgpu_device *adev)
764{
765 return 0;
766}
767
768/**
769 * sdma_v5_0_load_microcode - load the sDMA ME ucode
770 *
771 * @adev: amdgpu_device pointer
772 *
773 * Loads the sDMA0/1 ucode.
774 * Returns 0 for success, -EINVAL if the ucode is not available.
775 */
776static int sdma_v5_0_load_microcode(struct amdgpu_device *adev)
777{
778 const struct sdma_firmware_header_v1_0 *hdr;
779 const __le32 *fw_data;
780 u32 fw_size;
781 int i, j;
782
783 /* halt the MEs */
784 sdma_v5_0_enable(adev, false);
785
786 for (i = 0; i < adev->sdma.num_instances; i++) {
787 if (!adev->sdma.instance[i].fw)
788 return -EINVAL;
789
790 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
791 amdgpu_ucode_print_sdma_hdr(&hdr->header);
792 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
793
794 fw_data = (const __le32 *)
795 (adev->sdma.instance[i].fw->data +
796 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
797
798 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
799
800 for (j = 0; j < fw_size; j++) {
801 if (amdgpu_emu_mode == 1 && j % 500 == 0)
802 msleep(1);
803 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
804 }
805
806 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
807 }
808
809 return 0;
810}
811
812/**
813 * sdma_v5_0_start - setup and start the async dma engines
814 *
815 * @adev: amdgpu_device pointer
816 *
817 * Set up the DMA engines and enable them (NAVI10).
818 * Returns 0 for success, error for failure.
819 */
820static int sdma_v5_0_start(struct amdgpu_device *adev)
821{
822 int r = 0;
823
824 if (amdgpu_sriov_vf(adev)) {
825 sdma_v5_0_ctx_switch_enable(adev, false);
826 sdma_v5_0_enable(adev, false);
827
828 /* set RB registers */
829 r = sdma_v5_0_gfx_resume(adev);
830 return r;
831 }
832
833 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
834 r = sdma_v5_0_load_microcode(adev);
835 if (r)
836 return r;
837
838 /* The value of mmSDMA_F32_CNTL is invalid the moment after loading fw */
839 if (amdgpu_emu_mode == 1 && adev->pdev->device == 0x4d)
840 msleep(1000);
841 }
842
843 /* unhalt the MEs */
844 sdma_v5_0_enable(adev, true);
845 /* enable sdma ring preemption */
846 sdma_v5_0_ctx_switch_enable(adev, true);
847
848 /* start the gfx rings and rlc compute queues */
849 r = sdma_v5_0_gfx_resume(adev);
850 if (r)
851 return r;
852 r = sdma_v5_0_rlc_resume(adev);
853
854 return r;
855}
856
857/**
858 * sdma_v5_0_ring_test_ring - simple async dma engine test
859 *
860 * @ring: amdgpu_ring structure holding ring information
861 *
862 * Test the DMA engine by writing using it to write an
863 * value to memory. (NAVI10).
864 * Returns 0 for success, error for failure.
865 */
866static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
867{
868 struct amdgpu_device *adev = ring->adev;
869 unsigned i;
870 unsigned index;
871 int r;
872 u32 tmp;
873 u64 gpu_addr;
874
875 r = amdgpu_device_wb_get(adev, &index);
876 if (r) {
877 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
878 return r;
879 }
880
881 gpu_addr = adev->wb.gpu_addr + (index * 4);
882 tmp = 0xCAFEDEAD;
883 adev->wb.wb[index] = cpu_to_le32(tmp);
884
885 r = amdgpu_ring_alloc(ring, 5);
886 if (r) {
887 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
888 amdgpu_device_wb_free(adev, index);
889 return r;
890 }
891
892 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
893 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
894 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
895 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
896 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
897 amdgpu_ring_write(ring, 0xDEADBEEF);
898 amdgpu_ring_commit(ring);
899
900 for (i = 0; i < adev->usec_timeout; i++) {
901 tmp = le32_to_cpu(adev->wb.wb[index]);
902 if (tmp == 0xDEADBEEF)
903 break;
904 if (amdgpu_emu_mode == 1)
905 msleep(1);
906 else
907 udelay(1);
908 }
909
910 if (i < adev->usec_timeout) {
911 if (amdgpu_emu_mode == 1)
912 DRM_INFO("ring test on %d succeeded in %d msecs\n", ring->idx, i);
913 else
914 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
915 } else {
916 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
917 ring->idx, tmp);
918 r = -EINVAL;
919 }
920 amdgpu_device_wb_free(adev, index);
921
922 return r;
923}
924
925/**
926 * sdma_v5_0_ring_test_ib - test an IB on the DMA engine
927 *
928 * @ring: amdgpu_ring structure holding ring information
929 *
930 * Test a simple IB in the DMA ring (NAVI10).
931 * Returns 0 on success, error on failure.
932 */
933static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
934{
935 struct amdgpu_device *adev = ring->adev;
936 struct amdgpu_ib ib;
937 struct dma_fence *f = NULL;
938 unsigned index;
939 long r;
940 u32 tmp = 0;
941 u64 gpu_addr;
942
943 r = amdgpu_device_wb_get(adev, &index);
944 if (r) {
945 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
946 return r;
947 }
948
949 gpu_addr = adev->wb.gpu_addr + (index * 4);
950 tmp = 0xCAFEDEAD;
951 adev->wb.wb[index] = cpu_to_le32(tmp);
952 memset(&ib, 0, sizeof(ib));
953 r = amdgpu_ib_get(adev, NULL, 256, &ib);
954 if (r) {
955 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
956 goto err0;
957 }
958
959 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
960 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
961 ib.ptr[1] = lower_32_bits(gpu_addr);
962 ib.ptr[2] = upper_32_bits(gpu_addr);
963 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
964 ib.ptr[4] = 0xDEADBEEF;
965 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
966 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
967 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
968 ib.length_dw = 8;
969
970 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
971 if (r)
972 goto err1;
973
974 r = dma_fence_wait_timeout(f, false, timeout);
975 if (r == 0) {
976 DRM_ERROR("amdgpu: IB test timed out\n");
977 r = -ETIMEDOUT;
978 goto err1;
979 } else if (r < 0) {
980 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
981 goto err1;
982 }
983 tmp = le32_to_cpu(adev->wb.wb[index]);
984 if (tmp == 0xDEADBEEF) {
985 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
986 r = 0;
987 } else {
988 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
989 r = -EINVAL;
990 }
991
992err1:
993 amdgpu_ib_free(adev, &ib, NULL);
994 dma_fence_put(f);
995err0:
996 amdgpu_device_wb_free(adev, index);
997 return r;
998}
999
1000
1001/**
1002 * sdma_v5_0_vm_copy_pte - update PTEs by copying them from the GART
1003 *
1004 * @ib: indirect buffer to fill with commands
1005 * @pe: addr of the page entry
1006 * @src: src addr to copy from
1007 * @count: number of page entries to update
1008 *
1009 * Update PTEs by copying them from the GART using sDMA (NAVI10).
1010 */
1011static void sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib,
1012 uint64_t pe, uint64_t src,
1013 unsigned count)
1014{
1015 unsigned bytes = count * 8;
1016
1017 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1018 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1019 ib->ptr[ib->length_dw++] = bytes - 1;
1020 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1021 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1022 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1023 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1024 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1025
1026}
1027
1028/**
1029 * sdma_v5_0_vm_write_pte - update PTEs by writing them manually
1030 *
1031 * @ib: indirect buffer to fill with commands
1032 * @pe: addr of the page entry
1033 * @addr: dst addr to write into pe
1034 * @count: number of page entries to update
1035 * @incr: increase next addr by incr bytes
1036 * @flags: access flags
1037 *
1038 * Update PTEs by writing them manually using sDMA (NAVI10).
1039 */
1040static void sdma_v5_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1041 uint64_t value, unsigned count,
1042 uint32_t incr)
1043{
1044 unsigned ndw = count * 2;
1045
1046 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1047 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1048 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1049 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1050 ib->ptr[ib->length_dw++] = ndw - 1;
1051 for (; ndw > 0; ndw -= 2) {
1052 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1053 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1054 value += incr;
1055 }
1056}
1057
1058/**
1059 * sdma_v5_0_vm_set_pte_pde - update the page tables using sDMA
1060 *
1061 * @ib: indirect buffer to fill with commands
1062 * @pe: addr of the page entry
1063 * @addr: dst addr to write into pe
1064 * @count: number of page entries to update
1065 * @incr: increase next addr by incr bytes
1066 * @flags: access flags
1067 *
1068 * Update the page tables using sDMA (NAVI10).
1069 */
1070static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1071 uint64_t pe,
1072 uint64_t addr, unsigned count,
1073 uint32_t incr, uint64_t flags)
1074{
1075 /* for physically contiguous pages (vram) */
1076 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1077 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1078 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1079 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1080 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1081 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1082 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1083 ib->ptr[ib->length_dw++] = incr; /* increment size */
1084 ib->ptr[ib->length_dw++] = 0;
1085 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1086}
1087
1088/**
1089 * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw
1090 *
1091 * @ib: indirect buffer to fill with padding
1092 *
1093 */
1094static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1095{
1096 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1097 u32 pad_count;
1098 int i;
1099
1100 pad_count = (8 - (ib->length_dw & 0x7)) % 8;
1101 for (i = 0; i < pad_count; i++)
1102 if (sdma && sdma->burst_nop && (i == 0))
1103 ib->ptr[ib->length_dw++] =
1104 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1105 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1106 else
1107 ib->ptr[ib->length_dw++] =
1108 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1109}
1110
1111
1112/**
1113 * sdma_v5_0_ring_emit_pipeline_sync - sync the pipeline
1114 *
1115 * @ring: amdgpu_ring pointer
1116 *
1117 * Make sure all previous operations are completed (CIK).
1118 */
1119static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1120{
1121 uint32_t seq = ring->fence_drv.sync_seq;
1122 uint64_t addr = ring->fence_drv.gpu_addr;
1123
1124 /* wait for idle */
1125 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1126 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1127 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1128 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1129 amdgpu_ring_write(ring, addr & 0xfffffffc);
1130 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1131 amdgpu_ring_write(ring, seq); /* reference */
1132 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1133 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1134 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1135}
1136
1137
1138/**
1139 * sdma_v5_0_ring_emit_vm_flush - vm flush using sDMA
1140 *
1141 * @ring: amdgpu_ring pointer
1142 * @vm: amdgpu_vm pointer
1143 *
1144 * Update the page table base and flush the VM TLB
1145 * using sDMA (NAVI10).
1146 */
1147static void sdma_v5_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1148 unsigned vmid, uint64_t pd_addr)
1149{
1150 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1151}
1152
1153static void sdma_v5_0_ring_emit_wreg(struct amdgpu_ring *ring,
1154 uint32_t reg, uint32_t val)
1155{
1156 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1157 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1158 amdgpu_ring_write(ring, reg);
1159 amdgpu_ring_write(ring, val);
1160}
1161
1162static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1163 uint32_t val, uint32_t mask)
1164{
1165 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1166 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1167 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1168 amdgpu_ring_write(ring, reg << 2);
1169 amdgpu_ring_write(ring, 0);
1170 amdgpu_ring_write(ring, val); /* reference */
1171 amdgpu_ring_write(ring, mask); /* mask */
1172 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1173 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1174}
1175
1176static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1177 uint32_t reg0, uint32_t reg1,
1178 uint32_t ref, uint32_t mask)
1179{
1180 amdgpu_ring_emit_wreg(ring, reg0, ref);
1181 /* wait for a cycle to reset vm_inv_eng*_ack */
1182 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1183 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1184}
1185
1186static int sdma_v5_0_early_init(void *handle)
1187{
1188 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1189
1190 adev->sdma.num_instances = 2;
1191
1192 sdma_v5_0_set_ring_funcs(adev);
1193 sdma_v5_0_set_buffer_funcs(adev);
1194 sdma_v5_0_set_vm_pte_funcs(adev);
1195 sdma_v5_0_set_irq_funcs(adev);
1196
1197 return 0;
1198}
1199
1200
1201static int sdma_v5_0_sw_init(void *handle)
1202{
1203 struct amdgpu_ring *ring;
1204 int r, i;
1205 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1206
1207 /* SDMA trap event */
1208 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0,
1209 SDMA0_5_0__SRCID__SDMA_TRAP,
1210 &adev->sdma.trap_irq);
1211 if (r)
1212 return r;
1213
1214 /* SDMA trap event */
1215 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1,
1216 SDMA1_5_0__SRCID__SDMA_TRAP,
1217 &adev->sdma.trap_irq);
1218 if (r)
1219 return r;
1220
1221 r = sdma_v5_0_init_microcode(adev);
1222 if (r) {
1223 DRM_ERROR("Failed to load sdma firmware!\n");
1224 return r;
1225 }
1226
1227 for (i = 0; i < adev->sdma.num_instances; i++) {
1228 ring = &adev->sdma.instance[i].ring;
1229 ring->ring_obj = NULL;
1230 ring->use_doorbell = true;
1231
1232 DRM_INFO("use_doorbell being set to: [%s]\n",
1233 ring->use_doorbell?"true":"false");
1234
1235 ring->doorbell_index = (i == 0) ?
1236 (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset
1237 : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
1238
1239 sprintf(ring->name, "sdma%d", i);
1240 r = amdgpu_ring_init(adev, ring, 1024,
1241 &adev->sdma.trap_irq,
1242 (i == 0) ?
1243 AMDGPU_SDMA_IRQ_INSTANCE0 :
1244 AMDGPU_SDMA_IRQ_INSTANCE1);
1245 if (r)
1246 return r;
1247 }
1248
1249 return r;
1250}
1251
1252static int sdma_v5_0_sw_fini(void *handle)
1253{
1254 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1255 int i;
1256
1257 for (i = 0; i < adev->sdma.num_instances; i++)
1258 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1259
1260 return 0;
1261}
1262
1263static int sdma_v5_0_hw_init(void *handle)
1264{
1265 int r;
1266 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1267
1268 sdma_v5_0_init_golden_registers(adev);
1269
1270 r = sdma_v5_0_start(adev);
1271
1272 return r;
1273}
1274
1275static int sdma_v5_0_hw_fini(void *handle)
1276{
1277 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1278
1279 if (amdgpu_sriov_vf(adev))
1280 return 0;
1281
1282 sdma_v5_0_ctx_switch_enable(adev, false);
1283 sdma_v5_0_enable(adev, false);
1284
1285 return 0;
1286}
1287
1288static int sdma_v5_0_suspend(void *handle)
1289{
1290 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1291
1292 return sdma_v5_0_hw_fini(adev);
1293}
1294
1295static int sdma_v5_0_resume(void *handle)
1296{
1297 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1298
1299 return sdma_v5_0_hw_init(adev);
1300}
1301
1302static bool sdma_v5_0_is_idle(void *handle)
1303{
1304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1305 u32 i;
1306
1307 for (i = 0; i < adev->sdma.num_instances; i++) {
1308 u32 tmp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
1309
1310 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1311 return false;
1312 }
1313
1314 return true;
1315}
1316
1317static int sdma_v5_0_wait_for_idle(void *handle)
1318{
1319 unsigned i;
1320 u32 sdma0, sdma1;
1321 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1322
1323 for (i = 0; i < adev->usec_timeout; i++) {
1324 sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
1325 sdma1 = RREG32(sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
1326
1327 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1328 return 0;
1329 udelay(1);
1330 }
1331 return -ETIMEDOUT;
1332}
1333
1334static int sdma_v5_0_soft_reset(void *handle)
1335{
1336 /* todo */
1337
1338 return 0;
1339}
1340
1341static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
1342{
1343 int i, r = 0;
1344 struct amdgpu_device *adev = ring->adev;
1345 u32 index = 0;
1346 u64 sdma_gfx_preempt;
1347
1348 amdgpu_sdma_get_index_from_ring(ring, &index);
1349 if (index == 0)
1350 sdma_gfx_preempt = mmSDMA0_GFX_PREEMPT;
1351 else
1352 sdma_gfx_preempt = mmSDMA1_GFX_PREEMPT;
1353
1354 /* assert preemption condition */
1355 amdgpu_ring_set_preempt_cond_exec(ring, false);
1356
1357 /* emit the trailing fence */
1358 ring->trail_seq += 1;
1359 amdgpu_ring_alloc(ring, 10);
1360 sdma_v5_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1361 ring->trail_seq, 0);
1362 amdgpu_ring_commit(ring);
1363
1364 /* assert IB preemption */
1365 WREG32(sdma_gfx_preempt, 1);
1366
1367 /* poll the trailing fence */
1368 for (i = 0; i < adev->usec_timeout; i++) {
1369 if (ring->trail_seq ==
1370 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1371 break;
1372 udelay(1);
1373 }
1374
1375 if (i >= adev->usec_timeout) {
1376 r = -EINVAL;
1377 DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1378 }
1379
1380 /* deassert IB preemption */
1381 WREG32(sdma_gfx_preempt, 0);
1382
1383 /* deassert the preemption condition */
1384 amdgpu_ring_set_preempt_cond_exec(ring, true);
1385 return r;
1386}
1387
1388static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
1389 struct amdgpu_irq_src *source,
1390 unsigned type,
1391 enum amdgpu_interrupt_state state)
1392{
1393 u32 sdma_cntl;
1394
1395 u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
1396 sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
1397 sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
1398
1399 sdma_cntl = RREG32(reg_offset);
1400 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1401 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1402 WREG32(reg_offset, sdma_cntl);
1403
1404 return 0;
1405}
1406
1407static int sdma_v5_0_process_trap_irq(struct amdgpu_device *adev,
1408 struct amdgpu_irq_src *source,
1409 struct amdgpu_iv_entry *entry)
1410{
1411 DRM_DEBUG("IH: SDMA trap\n");
1412 switch (entry->client_id) {
1413 case SOC15_IH_CLIENTID_SDMA0:
1414 switch (entry->ring_id) {
1415 case 0:
1416 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1417 break;
1418 case 1:
1419 /* XXX compute */
1420 break;
1421 case 2:
1422 /* XXX compute */
1423 break;
1424 case 3:
1425 /* XXX page queue*/
1426 break;
1427 }
1428 break;
1429 case SOC15_IH_CLIENTID_SDMA1:
1430 switch (entry->ring_id) {
1431 case 0:
1432 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1433 break;
1434 case 1:
1435 /* XXX compute */
1436 break;
1437 case 2:
1438 /* XXX compute */
1439 break;
1440 case 3:
1441 /* XXX page queue*/
1442 break;
1443 }
1444 break;
1445 }
1446 return 0;
1447}
1448
1449static int sdma_v5_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1450 struct amdgpu_irq_src *source,
1451 struct amdgpu_iv_entry *entry)
1452{
1453 return 0;
1454}
1455
1456static void sdma_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
1457 bool enable)
1458{
1459 uint32_t data, def;
1460 int i;
1461
1462 for (i = 0; i < adev->sdma.num_instances; i++) {
1463 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1464 /* Enable sdma clock gating */
1465 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1466 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1467 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1468 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1469 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1470 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1471 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1472 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1473 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1474 if (def != data)
1475 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1476 } else {
1477 /* Disable sdma clock gating */
1478 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1479 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1480 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1481 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1482 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1483 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1484 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1485 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1486 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1487 if (def != data)
1488 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1489 }
1490 }
1491}
1492
1493static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
1494 bool enable)
1495{
1496 uint32_t data, def;
1497 int i;
1498
1499 for (i = 0; i < adev->sdma.num_instances; i++) {
1500 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1501 /* Enable sdma mem light sleep */
1502 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1503 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1504 if (def != data)
1505 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1506
1507 } else {
1508 /* Disable sdma mem light sleep */
1509 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1510 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1511 if (def != data)
1512 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1513
1514 }
1515 }
1516}
1517
1518static int sdma_v5_0_set_clockgating_state(void *handle,
1519 enum amd_clockgating_state state)
1520{
1521 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1522
1523 if (amdgpu_sriov_vf(adev))
1524 return 0;
1525
1526 switch (adev->asic_type) {
1527 case CHIP_NAVI10:
1528 case CHIP_NAVI14:
1529 case CHIP_NAVI12:
1530 sdma_v5_0_update_medium_grain_clock_gating(adev,
1531 state == AMD_CG_STATE_GATE ? true : false);
1532 sdma_v5_0_update_medium_grain_light_sleep(adev,
1533 state == AMD_CG_STATE_GATE ? true : false);
1534 break;
1535 default:
1536 break;
1537 }
1538
1539 return 0;
1540}
1541
1542static int sdma_v5_0_set_powergating_state(void *handle,
1543 enum amd_powergating_state state)
1544{
1545 return 0;
1546}
1547
1548static void sdma_v5_0_get_clockgating_state(void *handle, u32 *flags)
1549{
1550 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1551 int data;
1552
1553 if (amdgpu_sriov_vf(adev))
1554 *flags = 0;
1555
1556 /* AMD_CG_SUPPORT_SDMA_MGCG */
1557 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL));
1558 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
1559 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1560
1561 /* AMD_CG_SUPPORT_SDMA_LS */
1562 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL));
1563 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1564 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1565}
1566
1567const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
1568 .name = "sdma_v5_0",
1569 .early_init = sdma_v5_0_early_init,
1570 .late_init = NULL,
1571 .sw_init = sdma_v5_0_sw_init,
1572 .sw_fini = sdma_v5_0_sw_fini,
1573 .hw_init = sdma_v5_0_hw_init,
1574 .hw_fini = sdma_v5_0_hw_fini,
1575 .suspend = sdma_v5_0_suspend,
1576 .resume = sdma_v5_0_resume,
1577 .is_idle = sdma_v5_0_is_idle,
1578 .wait_for_idle = sdma_v5_0_wait_for_idle,
1579 .soft_reset = sdma_v5_0_soft_reset,
1580 .set_clockgating_state = sdma_v5_0_set_clockgating_state,
1581 .set_powergating_state = sdma_v5_0_set_powergating_state,
1582 .get_clockgating_state = sdma_v5_0_get_clockgating_state,
1583};
1584
1585static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
1586 .type = AMDGPU_RING_TYPE_SDMA,
1587 .align_mask = 0xf,
1588 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1589 .support_64bit_ptrs = true,
1590 .vmhub = AMDGPU_GFXHUB_0,
1591 .get_rptr = sdma_v5_0_ring_get_rptr,
1592 .get_wptr = sdma_v5_0_ring_get_wptr,
1593 .set_wptr = sdma_v5_0_ring_set_wptr,
1594 .emit_frame_size =
1595 5 + /* sdma_v5_0_ring_init_cond_exec */
1596 6 + /* sdma_v5_0_ring_emit_hdp_flush */
1597 3 + /* hdp_invalidate */
1598 6 + /* sdma_v5_0_ring_emit_pipeline_sync */
1599 /* sdma_v5_0_ring_emit_vm_flush */
1600 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1601 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
1602 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
1603 .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
1604 .emit_ib = sdma_v5_0_ring_emit_ib,
1605 .emit_fence = sdma_v5_0_ring_emit_fence,
1606 .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
1607 .emit_vm_flush = sdma_v5_0_ring_emit_vm_flush,
1608 .emit_hdp_flush = sdma_v5_0_ring_emit_hdp_flush,
1609 .test_ring = sdma_v5_0_ring_test_ring,
1610 .test_ib = sdma_v5_0_ring_test_ib,
1611 .insert_nop = sdma_v5_0_ring_insert_nop,
1612 .pad_ib = sdma_v5_0_ring_pad_ib,
1613 .emit_wreg = sdma_v5_0_ring_emit_wreg,
1614 .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
1615 .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
1616 .init_cond_exec = sdma_v5_0_ring_init_cond_exec,
1617 .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
1618 .preempt_ib = sdma_v5_0_ring_preempt_ib,
1619};
1620
1621static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev)
1622{
1623 int i;
1624
1625 for (i = 0; i < adev->sdma.num_instances; i++) {
1626 adev->sdma.instance[i].ring.funcs = &sdma_v5_0_ring_funcs;
1627 adev->sdma.instance[i].ring.me = i;
1628 }
1629}
1630
1631static const struct amdgpu_irq_src_funcs sdma_v5_0_trap_irq_funcs = {
1632 .set = sdma_v5_0_set_trap_irq_state,
1633 .process = sdma_v5_0_process_trap_irq,
1634};
1635
1636static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = {
1637 .process = sdma_v5_0_process_illegal_inst_irq,
1638};
1639
1640static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
1641{
1642 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
1643 adev->sdma.num_instances;
1644 adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs;
1645 adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs;
1646}
1647
1648/**
1649 * sdma_v5_0_emit_copy_buffer - copy buffer using the sDMA engine
1650 *
1651 * @ring: amdgpu_ring structure holding ring information
1652 * @src_offset: src GPU address
1653 * @dst_offset: dst GPU address
1654 * @byte_count: number of bytes to xfer
1655 *
1656 * Copy GPU buffers using the DMA engine (NAVI10).
1657 * Used by the amdgpu ttm implementation to move pages if
1658 * registered as the asic copy callback.
1659 */
1660static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
1661 uint64_t src_offset,
1662 uint64_t dst_offset,
1663 uint32_t byte_count)
1664{
1665 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1666 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1667 ib->ptr[ib->length_dw++] = byte_count - 1;
1668 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1669 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1670 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1671 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1672 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1673}
1674
1675/**
1676 * sdma_v5_0_emit_fill_buffer - fill buffer using the sDMA engine
1677 *
1678 * @ring: amdgpu_ring structure holding ring information
1679 * @src_data: value to write to buffer
1680 * @dst_offset: dst GPU address
1681 * @byte_count: number of bytes to xfer
1682 *
1683 * Fill GPU buffers using the DMA engine (NAVI10).
1684 */
1685static void sdma_v5_0_emit_fill_buffer(struct amdgpu_ib *ib,
1686 uint32_t src_data,
1687 uint64_t dst_offset,
1688 uint32_t byte_count)
1689{
1690 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1691 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1692 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1693 ib->ptr[ib->length_dw++] = src_data;
1694 ib->ptr[ib->length_dw++] = byte_count - 1;
1695}
1696
1697static const struct amdgpu_buffer_funcs sdma_v5_0_buffer_funcs = {
1698 .copy_max_bytes = 0x400000,
1699 .copy_num_dw = 7,
1700 .emit_copy_buffer = sdma_v5_0_emit_copy_buffer,
1701
1702 .fill_max_bytes = 0x400000,
1703 .fill_num_dw = 5,
1704 .emit_fill_buffer = sdma_v5_0_emit_fill_buffer,
1705};
1706
1707static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev)
1708{
1709 if (adev->mman.buffer_funcs == NULL) {
1710 adev->mman.buffer_funcs = &sdma_v5_0_buffer_funcs;
1711 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1712 }
1713}
1714
1715static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
1716 .copy_pte_num_dw = 7,
1717 .copy_pte = sdma_v5_0_vm_copy_pte,
1718 .write_pte = sdma_v5_0_vm_write_pte,
1719 .set_pte_pde = sdma_v5_0_vm_set_pte_pde,
1720};
1721
1722static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1723{
1724 struct drm_gpu_scheduler *sched;
1725 unsigned i;
1726
1727 if (adev->vm_manager.vm_pte_funcs == NULL) {
1728 adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
1729 for (i = 0; i < adev->sdma.num_instances; i++) {
1730 sched = &adev->sdma.instance[i].ring.sched;
1731 adev->vm_manager.vm_pte_rqs[i] =
1732 &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
1733 }
1734 adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
1735 }
1736}
1737
1738const struct amdgpu_ip_block_version sdma_v5_0_ip_block = {
1739 .type = AMD_IP_BLOCK_TYPE_SDMA,
1740 .major = 5,
1741 .minor = 0,
1742 .rev = 0,
1743 .funcs = &sdma_v5_0_ip_funcs,
1744};