Loading...
1/*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24
25#include "amdgpu.h"
26#include "amdgpu_gfx.h"
27#include "soc15.h"
28#include "soc15d.h"
29#include "soc15_common.h"
30#include "vega10_enum.h"
31
32#include "v9_structs.h"
33
34#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
35
36#include "gc/gc_9_4_3_offset.h"
37#include "gc/gc_9_4_3_sh_mask.h"
38
39#include "gfx_v9_4_3.h"
40#include "gfx_v9_4_3_cleaner_shader.h"
41#include "amdgpu_xcp.h"
42#include "amdgpu_aca.h"
43
44MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
45MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
46MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
47MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
48MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin");
49MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
50
51#define GFX9_MEC_HPD_SIZE 4096
52#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
53
54#define GOLDEN_GB_ADDR_CONFIG 0x2a114042
55#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
56
57#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
58#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
59#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
60
61#define XCC_REG_RANGE_0_LOW 0x2000 /* XCC gfxdec0 lower Bound */
62#define XCC_REG_RANGE_0_HIGH 0x3400 /* XCC gfxdec0 upper Bound */
63#define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */
64#define XCC_REG_RANGE_1_HIGH 0x10000 /* XCC gfxdec1 upper Bound */
65
66#define NORMALIZE_XCC_REG_OFFSET(offset) \
67 (offset & 0xFFFF)
68
69static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
70 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
71 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
72 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
73 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
74 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
75 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
76 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
77 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
78 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
79 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
80 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
81 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
82 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
83 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
84 SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
85 SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
86 SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
87 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
88 SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS),
89 SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS),
90 SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS),
91 SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS),
92 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
93 SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL),
94 SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS),
95 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
96 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
97 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
98 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR),
99 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
100 SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT),
101 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND),
102 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE),
103 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1),
104 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2),
105 SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE),
106 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE),
107 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
108 SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
109 SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
110 /* cp header registers */
111 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
112 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
113 /* SE status registers */
114 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
115 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
116 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
117 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
118};
119
120static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
121 /* compute queue registers */
122 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
123 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE),
124 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
125 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
126 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
127 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
128 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
129 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
130 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
131 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
132 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
133 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
134 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
135 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
136 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
137 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
138 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
139 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
140 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
141 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
142 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
143 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
144 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
145 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
146 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
147 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
148 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
149 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
150 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
151 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
152 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
153 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
154 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
155 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
156 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
157 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
158 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
159};
160
161struct amdgpu_gfx_ras gfx_v9_4_3_ras;
162
163static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
164static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
165static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
166static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
167static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
168 struct amdgpu_cu_info *cu_info);
169static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
170static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
171
172static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
173 uint64_t queue_mask)
174{
175 struct amdgpu_device *adev = kiq_ring->adev;
176 u64 shader_mc_addr;
177
178 /* Cleaner shader MC address */
179 shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
180
181 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
182 amdgpu_ring_write(kiq_ring,
183 PACKET3_SET_RESOURCES_VMID_MASK(0) |
184 /* vmid_mask:0* queue_type:0 (KIQ) */
185 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
186 amdgpu_ring_write(kiq_ring,
187 lower_32_bits(queue_mask)); /* queue mask lo */
188 amdgpu_ring_write(kiq_ring,
189 upper_32_bits(queue_mask)); /* queue mask hi */
190 amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
191 amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
192 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
193 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
194}
195
196static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
197 struct amdgpu_ring *ring)
198{
199 struct amdgpu_device *adev = kiq_ring->adev;
200 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
201 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
202 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
203
204 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
205 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
206 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
207 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
208 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
209 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
210 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
211 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
212 /*queue_type: normal compute queue */
213 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
214 /* alloc format: all_on_one_pipe */
215 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
216 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
217 /* num_queues: must be 1 */
218 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
219 amdgpu_ring_write(kiq_ring,
220 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
221 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
222 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
223 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
224 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
225}
226
227static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
228 struct amdgpu_ring *ring,
229 enum amdgpu_unmap_queues_action action,
230 u64 gpu_addr, u64 seq)
231{
232 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
233
234 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
235 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
236 PACKET3_UNMAP_QUEUES_ACTION(action) |
237 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
238 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
239 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
240 amdgpu_ring_write(kiq_ring,
241 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
242
243 if (action == PREEMPT_QUEUES_NO_UNMAP) {
244 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
245 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
246 amdgpu_ring_write(kiq_ring, seq);
247 } else {
248 amdgpu_ring_write(kiq_ring, 0);
249 amdgpu_ring_write(kiq_ring, 0);
250 amdgpu_ring_write(kiq_ring, 0);
251 }
252}
253
254static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
255 struct amdgpu_ring *ring,
256 u64 addr,
257 u64 seq)
258{
259 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
260
261 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
262 amdgpu_ring_write(kiq_ring,
263 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
264 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
265 PACKET3_QUERY_STATUS_COMMAND(2));
266 /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
267 amdgpu_ring_write(kiq_ring,
268 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
269 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
270 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
271 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
272 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
273 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
274}
275
276static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
277 uint16_t pasid, uint32_t flush_type,
278 bool all_hub)
279{
280 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
281 amdgpu_ring_write(kiq_ring,
282 PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
283 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
284 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
285 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
286}
287
288static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
289 uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
290 uint32_t xcc_id, uint32_t vmid)
291{
292 struct amdgpu_device *adev = kiq_ring->adev;
293 unsigned i;
294
295 /* enter save mode */
296 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
297 mutex_lock(&adev->srbm_mutex);
298 soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id);
299
300 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
301 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2);
302 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1);
303 /* wait till dequeue take effects */
304 for (i = 0; i < adev->usec_timeout; i++) {
305 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
306 break;
307 udelay(1);
308 }
309 if (i >= adev->usec_timeout)
310 dev_err(adev->dev, "fail to wait on hqd deactive\n");
311 } else {
312 dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type);
313 }
314
315 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
316 mutex_unlock(&adev->srbm_mutex);
317 /* exit safe mode */
318 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
319}
320
321static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
322 .kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
323 .kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
324 .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
325 .kiq_query_status = gfx_v9_4_3_kiq_query_status,
326 .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
327 .kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue,
328 .set_resources_size = 8,
329 .map_queues_size = 7,
330 .unmap_queues_size = 6,
331 .query_status_size = 7,
332 .invalidate_tlbs_size = 2,
333};
334
335static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
336{
337 int i, num_xcc;
338
339 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
340 for (i = 0; i < num_xcc; i++)
341 adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
342}
343
344static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
345{
346 int i, num_xcc, dev_inst;
347
348 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
349 for (i = 0; i < num_xcc; i++) {
350 dev_inst = GET_INST(GC, i);
351
352 WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
353 GOLDEN_GB_ADDR_CONFIG);
354 /* Golden settings applied by driver for ASIC with rev_id 0 */
355 if (adev->rev_id == 0) {
356 WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
357 REDUCE_FIFO_DEPTH_BY_2, 2);
358 } else {
359 WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
360 SPARE, 0x1);
361 }
362 }
363}
364
365static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)
366{
367 uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
368
369 /* If it is an XCC reg, normalize the reg to keep
370 lower 16 bits in local xcc */
371
372 if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) ||
373 ((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH)))
374 return normalized_reg;
375 else
376 return reg;
377}
378
379static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
380 bool wc, uint32_t reg, uint32_t val)
381{
382 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
383 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
384 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
385 WRITE_DATA_DST_SEL(0) |
386 (wc ? WR_CONFIRM : 0));
387 amdgpu_ring_write(ring, reg);
388 amdgpu_ring_write(ring, 0);
389 amdgpu_ring_write(ring, val);
390}
391
392static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
393 int mem_space, int opt, uint32_t addr0,
394 uint32_t addr1, uint32_t ref, uint32_t mask,
395 uint32_t inv)
396{
397 /* Only do the normalization on regspace */
398 if (mem_space == 0) {
399 addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0);
400 addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1);
401 }
402
403 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
404 amdgpu_ring_write(ring,
405 /* memory (1) or register (0) */
406 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
407 WAIT_REG_MEM_OPERATION(opt) | /* wait */
408 WAIT_REG_MEM_FUNCTION(3) | /* equal */
409 WAIT_REG_MEM_ENGINE(eng_sel)));
410
411 if (mem_space)
412 BUG_ON(addr0 & 0x3); /* Dword align */
413 amdgpu_ring_write(ring, addr0);
414 amdgpu_ring_write(ring, addr1);
415 amdgpu_ring_write(ring, ref);
416 amdgpu_ring_write(ring, mask);
417 amdgpu_ring_write(ring, inv); /* poll interval */
418}
419
420static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
421{
422 uint32_t scratch_reg0_offset, xcc_offset;
423 struct amdgpu_device *adev = ring->adev;
424 uint32_t tmp = 0;
425 unsigned i;
426 int r;
427
428 /* Use register offset which is local to XCC in the packet */
429 xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
430 scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
431 WREG32(scratch_reg0_offset, 0xCAFEDEAD);
432 tmp = RREG32(scratch_reg0_offset);
433
434 r = amdgpu_ring_alloc(ring, 3);
435 if (r)
436 return r;
437
438 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
439 amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
440 amdgpu_ring_write(ring, 0xDEADBEEF);
441 amdgpu_ring_commit(ring);
442
443 for (i = 0; i < adev->usec_timeout; i++) {
444 tmp = RREG32(scratch_reg0_offset);
445 if (tmp == 0xDEADBEEF)
446 break;
447 udelay(1);
448 }
449
450 if (i >= adev->usec_timeout)
451 r = -ETIMEDOUT;
452 return r;
453}
454
455static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
456{
457 struct amdgpu_device *adev = ring->adev;
458 struct amdgpu_ib ib;
459 struct dma_fence *f = NULL;
460
461 unsigned index;
462 uint64_t gpu_addr;
463 uint32_t tmp;
464 long r;
465
466 r = amdgpu_device_wb_get(adev, &index);
467 if (r)
468 return r;
469
470 gpu_addr = adev->wb.gpu_addr + (index * 4);
471 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
472 memset(&ib, 0, sizeof(ib));
473
474 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
475 if (r)
476 goto err1;
477
478 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
479 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
480 ib.ptr[2] = lower_32_bits(gpu_addr);
481 ib.ptr[3] = upper_32_bits(gpu_addr);
482 ib.ptr[4] = 0xDEADBEEF;
483 ib.length_dw = 5;
484
485 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
486 if (r)
487 goto err2;
488
489 r = dma_fence_wait_timeout(f, false, timeout);
490 if (r == 0) {
491 r = -ETIMEDOUT;
492 goto err2;
493 } else if (r < 0) {
494 goto err2;
495 }
496
497 tmp = adev->wb.wb[index];
498 if (tmp == 0xDEADBEEF)
499 r = 0;
500 else
501 r = -EINVAL;
502
503err2:
504 amdgpu_ib_free(adev, &ib, NULL);
505 dma_fence_put(f);
506err1:
507 amdgpu_device_wb_free(adev, index);
508 return r;
509}
510
511
512/* This value might differs per partition */
513static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
514{
515 uint64_t clock;
516
517 mutex_lock(&adev->gfx.gpu_clock_mutex);
518 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
519 clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
520 ((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
521 mutex_unlock(&adev->gfx.gpu_clock_mutex);
522
523 return clock;
524}
525
526static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
527{
528 amdgpu_ucode_release(&adev->gfx.pfp_fw);
529 amdgpu_ucode_release(&adev->gfx.me_fw);
530 amdgpu_ucode_release(&adev->gfx.ce_fw);
531 amdgpu_ucode_release(&adev->gfx.rlc_fw);
532 amdgpu_ucode_release(&adev->gfx.mec_fw);
533 amdgpu_ucode_release(&adev->gfx.mec2_fw);
534
535 kfree(adev->gfx.rlc.register_list_format);
536}
537
538static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
539 const char *chip_name)
540{
541 int err;
542 const struct rlc_firmware_header_v2_0 *rlc_hdr;
543 uint16_t version_major;
544 uint16_t version_minor;
545
546
547 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
548 "amdgpu/%s_rlc.bin", chip_name);
549 if (err)
550 goto out;
551 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
552
553 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
554 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
555 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
556out:
557 if (err)
558 amdgpu_ucode_release(&adev->gfx.rlc_fw);
559
560 return err;
561}
562
563static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
564{
565 return true;
566}
567
568static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
569{
570 if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
571 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
572}
573
574static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
575 const char *chip_name)
576{
577 int err;
578
579 if (amdgpu_sriov_vf(adev))
580 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
581 "amdgpu/%s_sjt_mec.bin", chip_name);
582 else
583 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
584 "amdgpu/%s_mec.bin", chip_name);
585 if (err)
586 goto out;
587 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
588 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
589
590 adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
591 adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
592
593 gfx_v9_4_3_check_if_need_gfxoff(adev);
594
595out:
596 if (err)
597 amdgpu_ucode_release(&adev->gfx.mec_fw);
598 return err;
599}
600
601static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
602{
603 char ucode_prefix[15];
604 int r;
605
606 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
607
608 r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
609 if (r)
610 return r;
611
612 r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
613 if (r)
614 return r;
615
616 return r;
617}
618
619static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
620{
621 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
622 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
623}
624
625static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
626{
627 int r, i, num_xcc;
628 u32 *hpd;
629 const __le32 *fw_data;
630 unsigned fw_size;
631 u32 *fw;
632 size_t mec_hpd_size;
633
634 const struct gfx_firmware_header_v1_0 *mec_hdr;
635
636 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
637 for (i = 0; i < num_xcc; i++)
638 bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
639 AMDGPU_MAX_COMPUTE_QUEUES);
640
641 /* take ownership of the relevant compute queues */
642 amdgpu_gfx_compute_queue_acquire(adev);
643 mec_hpd_size =
644 adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
645 if (mec_hpd_size) {
646 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
647 AMDGPU_GEM_DOMAIN_VRAM |
648 AMDGPU_GEM_DOMAIN_GTT,
649 &adev->gfx.mec.hpd_eop_obj,
650 &adev->gfx.mec.hpd_eop_gpu_addr,
651 (void **)&hpd);
652 if (r) {
653 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
654 gfx_v9_4_3_mec_fini(adev);
655 return r;
656 }
657
658 if (amdgpu_emu_mode == 1) {
659 for (i = 0; i < mec_hpd_size / 4; i++) {
660 memset((void *)(hpd + i), 0, 4);
661 if (i % 50 == 0)
662 msleep(1);
663 }
664 } else {
665 memset(hpd, 0, mec_hpd_size);
666 }
667
668 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
669 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
670 }
671
672 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
673
674 fw_data = (const __le32 *)
675 (adev->gfx.mec_fw->data +
676 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
677 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
678
679 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
680 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
681 &adev->gfx.mec.mec_fw_obj,
682 &adev->gfx.mec.mec_fw_gpu_addr,
683 (void **)&fw);
684 if (r) {
685 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
686 gfx_v9_4_3_mec_fini(adev);
687 return r;
688 }
689
690 memcpy(fw, fw_data, fw_size);
691
692 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
693 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
694
695 return 0;
696}
697
698static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
699 u32 sh_num, u32 instance, int xcc_id)
700{
701 u32 data;
702
703 if (instance == 0xffffffff)
704 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
705 INSTANCE_BROADCAST_WRITES, 1);
706 else
707 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
708 INSTANCE_INDEX, instance);
709
710 if (se_num == 0xffffffff)
711 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
712 SE_BROADCAST_WRITES, 1);
713 else
714 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
715
716 if (sh_num == 0xffffffff)
717 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
718 SH_BROADCAST_WRITES, 1);
719 else
720 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
721
722 WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
723}
724
725static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
726{
727 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
728 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
729 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
730 (address << SQ_IND_INDEX__INDEX__SHIFT) |
731 (SQ_IND_INDEX__FORCE_READ_MASK));
732 return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
733}
734
735static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
736 uint32_t wave, uint32_t thread,
737 uint32_t regno, uint32_t num, uint32_t *out)
738{
739 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
740 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
741 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
742 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
743 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
744 (SQ_IND_INDEX__FORCE_READ_MASK) |
745 (SQ_IND_INDEX__AUTO_INCR_MASK));
746 while (num--)
747 *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
748}
749
750static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
751 uint32_t xcc_id, uint32_t simd, uint32_t wave,
752 uint32_t *dst, int *no_fields)
753{
754 /* type 1 wave data */
755 dst[(*no_fields)++] = 1;
756 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
757 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
758 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
759 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
760 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
761 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
762 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
763 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
764 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
765 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
766 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
767 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
768 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
769 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
770 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
771}
772
773static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
774 uint32_t wave, uint32_t start,
775 uint32_t size, uint32_t *dst)
776{
777 wave_read_regs(adev, xcc_id, simd, wave, 0,
778 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
779}
780
781static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
782 uint32_t wave, uint32_t thread,
783 uint32_t start, uint32_t size,
784 uint32_t *dst)
785{
786 wave_read_regs(adev, xcc_id, simd, wave, thread,
787 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
788}
789
790static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
791 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
792{
793 soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
794}
795
796static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev)
797{
798 u32 xcp_ctl;
799
800 /* Value is expected to be the same on all, fetch from first instance */
801 xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL);
802
803 return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP);
804}
805
806static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
807 int num_xccs_per_xcp)
808{
809 int ret, i, num_xcc;
810 u32 tmp = 0;
811
812 if (adev->psp.funcs) {
813 ret = psp_spatial_partition(&adev->psp,
814 NUM_XCC(adev->gfx.xcc_mask) /
815 num_xccs_per_xcp);
816 if (ret)
817 return ret;
818 } else {
819 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
820
821 for (i = 0; i < num_xcc; i++) {
822 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
823 num_xccs_per_xcp);
824 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
825 i % num_xccs_per_xcp);
826 WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
827 tmp);
828 }
829 ret = 0;
830 }
831
832 adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
833
834 return ret;
835}
836
837static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
838{
839 int xcc;
840
841 xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
842 if (!xcc) {
843 dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
844 return -EINVAL;
845 }
846
847 return xcc - 1;
848}
849
850static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
851 .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
852 .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
853 .read_wave_data = &gfx_v9_4_3_read_wave_data,
854 .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
855 .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
856 .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
857 .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
858 .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
859 .get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
860};
861
862static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
863 struct aca_bank *bank, enum aca_smu_type type,
864 void *data)
865{
866 struct aca_bank_info info;
867 u64 misc0;
868 u32 instlo;
869 int ret;
870
871 ret = aca_bank_info_decode(bank, &info);
872 if (ret)
873 return ret;
874
875 /* NOTE: overwrite info.die_id with xcd id for gfx */
876 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
877 instlo &= GENMASK(31, 1);
878 info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
879
880 misc0 = bank->regs[ACA_REG_IDX_MISC0];
881
882 switch (type) {
883 case ACA_SMU_TYPE_UE:
884 ret = aca_error_cache_log_bank_error(handle, &info,
885 ACA_ERROR_TYPE_UE, 1ULL);
886 break;
887 case ACA_SMU_TYPE_CE:
888 ret = aca_error_cache_log_bank_error(handle, &info,
889 ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0));
890 break;
891 default:
892 return -EINVAL;
893 }
894
895 return ret;
896}
897
898static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
899 enum aca_smu_type type, void *data)
900{
901 u32 instlo;
902
903 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
904 instlo &= GENMASK(31, 1);
905 switch (instlo) {
906 case mmSMNAID_XCD0_MCA_SMU:
907 case mmSMNAID_XCD1_MCA_SMU:
908 case mmSMNXCD_XCD0_MCA_SMU:
909 return true;
910 default:
911 break;
912 }
913
914 return false;
915}
916
917static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
918 .aca_bank_parser = gfx_v9_4_3_aca_bank_parser,
919 .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
920};
921
922static const struct aca_info gfx_v9_4_3_aca_info = {
923 .hwip = ACA_HWIP_TYPE_SMU,
924 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
925 .bank_ops = &gfx_v9_4_3_aca_bank_ops,
926};
927
928static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
929{
930 u32 gb_addr_config;
931
932 adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
933 adev->gfx.ras = &gfx_v9_4_3_ras;
934
935 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
936 case IP_VERSION(9, 4, 3):
937 case IP_VERSION(9, 4, 4):
938 adev->gfx.config.max_hw_contexts = 8;
939 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
940 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
941 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
942 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
943 gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG);
944 break;
945 default:
946 BUG();
947 break;
948 }
949
950 adev->gfx.config.gb_addr_config = gb_addr_config;
951
952 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
953 REG_GET_FIELD(
954 adev->gfx.config.gb_addr_config,
955 GB_ADDR_CONFIG,
956 NUM_PIPES);
957
958 adev->gfx.config.max_tile_pipes =
959 adev->gfx.config.gb_addr_config_fields.num_pipes;
960
961 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
962 REG_GET_FIELD(
963 adev->gfx.config.gb_addr_config,
964 GB_ADDR_CONFIG,
965 NUM_BANKS);
966 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
967 REG_GET_FIELD(
968 adev->gfx.config.gb_addr_config,
969 GB_ADDR_CONFIG,
970 MAX_COMPRESSED_FRAGS);
971 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
972 REG_GET_FIELD(
973 adev->gfx.config.gb_addr_config,
974 GB_ADDR_CONFIG,
975 NUM_RB_PER_SE);
976 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
977 REG_GET_FIELD(
978 adev->gfx.config.gb_addr_config,
979 GB_ADDR_CONFIG,
980 NUM_SHADER_ENGINES);
981 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
982 REG_GET_FIELD(
983 adev->gfx.config.gb_addr_config,
984 GB_ADDR_CONFIG,
985 PIPE_INTERLEAVE_SIZE));
986
987 return 0;
988}
989
990static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
991 int xcc_id, int mec, int pipe, int queue)
992{
993 unsigned irq_type;
994 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
995 unsigned int hw_prio;
996 uint32_t xcc_doorbell_start;
997
998 ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
999 ring_id];
1000
1001 /* mec0 is me1 */
1002 ring->xcc_id = xcc_id;
1003 ring->me = mec + 1;
1004 ring->pipe = pipe;
1005 ring->queue = queue;
1006
1007 ring->ring_obj = NULL;
1008 ring->use_doorbell = true;
1009 xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
1010 xcc_id * adev->doorbell_index.xcc_doorbell_range;
1011 ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
1012 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
1013 (ring_id + xcc_id * adev->gfx.num_compute_rings) *
1014 GFX9_MEC_HPD_SIZE;
1015 ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
1016 sprintf(ring->name, "comp_%d.%d.%d.%d",
1017 ring->xcc_id, ring->me, ring->pipe, ring->queue);
1018
1019 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1020 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1021 + ring->pipe;
1022 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1023 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1024 /* type-2 packets are deprecated on MEC, use type-3 instead */
1025 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1026 hw_prio, NULL);
1027}
1028
1029static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
1030{
1031 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
1032 uint32_t *ptr, num_xcc, inst;
1033
1034 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1035
1036 ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1037 if (!ptr) {
1038 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
1039 adev->gfx.ip_dump_core = NULL;
1040 } else {
1041 adev->gfx.ip_dump_core = ptr;
1042 }
1043
1044 /* Allocate memory for compute queue registers for all the instances */
1045 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
1046 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
1047 adev->gfx.mec.num_queue_per_pipe;
1048
1049 ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1050 if (!ptr) {
1051 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
1052 adev->gfx.ip_dump_compute_queues = NULL;
1053 } else {
1054 adev->gfx.ip_dump_compute_queues = ptr;
1055 }
1056}
1057
1058static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
1059{
1060 int i, j, k, r, ring_id, xcc_id, num_xcc;
1061 struct amdgpu_device *adev = ip_block->adev;
1062
1063 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1064 case IP_VERSION(9, 4, 3):
1065 case IP_VERSION(9, 4, 4):
1066 adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex;
1067 adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex);
1068 if (adev->gfx.mec_fw_version >= 153) {
1069 adev->gfx.enable_cleaner_shader = true;
1070 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1071 if (r) {
1072 adev->gfx.enable_cleaner_shader = false;
1073 dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1074 }
1075 }
1076 break;
1077 default:
1078 adev->gfx.enable_cleaner_shader = false;
1079 break;
1080 }
1081
1082 adev->gfx.mec.num_mec = 2;
1083 adev->gfx.mec.num_pipe_per_mec = 4;
1084 adev->gfx.mec.num_queue_per_pipe = 8;
1085
1086 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1087
1088 /* EOP Event */
1089 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1090 if (r)
1091 return r;
1092
1093 /* Bad opcode Event */
1094 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1095 GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
1096 &adev->gfx.bad_op_irq);
1097 if (r)
1098 return r;
1099
1100 /* Privileged reg */
1101 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1102 &adev->gfx.priv_reg_irq);
1103 if (r)
1104 return r;
1105
1106 /* Privileged inst */
1107 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1108 &adev->gfx.priv_inst_irq);
1109 if (r)
1110 return r;
1111
1112 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1113
1114 r = adev->gfx.rlc.funcs->init(adev);
1115 if (r) {
1116 DRM_ERROR("Failed to init rlc BOs!\n");
1117 return r;
1118 }
1119
1120 r = gfx_v9_4_3_mec_init(adev);
1121 if (r) {
1122 DRM_ERROR("Failed to init MEC BOs!\n");
1123 return r;
1124 }
1125
1126 /* set up the compute queues - allocate horizontally across pipes */
1127 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1128 ring_id = 0;
1129 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1130 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1131 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
1132 k++) {
1133 if (!amdgpu_gfx_is_mec_queue_enabled(
1134 adev, xcc_id, i, k, j))
1135 continue;
1136
1137 r = gfx_v9_4_3_compute_ring_init(adev,
1138 ring_id,
1139 xcc_id,
1140 i, k, j);
1141 if (r)
1142 return r;
1143
1144 ring_id++;
1145 }
1146 }
1147 }
1148
1149 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
1150 if (r) {
1151 DRM_ERROR("Failed to init KIQ BOs!\n");
1152 return r;
1153 }
1154
1155 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1156 if (r)
1157 return r;
1158
1159 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1160 r = amdgpu_gfx_mqd_sw_init(adev,
1161 sizeof(struct v9_mqd_allocation), xcc_id);
1162 if (r)
1163 return r;
1164 }
1165
1166 adev->gfx.compute_supported_reset =
1167 amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
1168 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1169 case IP_VERSION(9, 4, 3):
1170 case IP_VERSION(9, 4, 4):
1171 if (adev->gfx.mec_fw_version >= 155) {
1172 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1173 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
1174 }
1175 break;
1176 default:
1177 break;
1178 }
1179 r = gfx_v9_4_3_gpu_early_init(adev);
1180 if (r)
1181 return r;
1182
1183 r = amdgpu_gfx_ras_sw_init(adev);
1184 if (r)
1185 return r;
1186
1187 r = amdgpu_gfx_sysfs_init(adev);
1188 if (r)
1189 return r;
1190
1191 gfx_v9_4_3_alloc_ip_dump(adev);
1192
1193 return 0;
1194}
1195
1196static int gfx_v9_4_3_sw_fini(struct amdgpu_ip_block *ip_block)
1197{
1198 int i, num_xcc;
1199 struct amdgpu_device *adev = ip_block->adev;
1200
1201 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1202 for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1203 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1204
1205 for (i = 0; i < num_xcc; i++) {
1206 amdgpu_gfx_mqd_sw_fini(adev, i);
1207 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1208 amdgpu_gfx_kiq_fini(adev, i);
1209 }
1210
1211 amdgpu_gfx_cleaner_shader_sw_fini(adev);
1212
1213 gfx_v9_4_3_mec_fini(adev);
1214 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
1215 gfx_v9_4_3_free_microcode(adev);
1216 amdgpu_gfx_sysfs_fini(adev);
1217
1218 kfree(adev->gfx.ip_dump_core);
1219 kfree(adev->gfx.ip_dump_compute_queues);
1220
1221 return 0;
1222}
1223
1224#define DEFAULT_SH_MEM_BASES (0x6000)
1225static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
1226 int xcc_id)
1227{
1228 int i;
1229 uint32_t sh_mem_config;
1230 uint32_t sh_mem_bases;
1231 uint32_t data;
1232
1233 /*
1234 * Configure apertures:
1235 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1236 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1237 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1238 */
1239 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1240
1241 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1242 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1243 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1244
1245 mutex_lock(&adev->srbm_mutex);
1246 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1247 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1248 /* CP and shaders */
1249 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
1250 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1251
1252 /* Enable trap for each kfd vmid. */
1253 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1254 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1255 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1256 }
1257 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1258 mutex_unlock(&adev->srbm_mutex);
1259
1260 /*
1261 * Initialize all compute VMIDs to have no GDS, GWS, or OA
1262 * access. These should be enabled by FW for target VMIDs.
1263 */
1264 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1265 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
1266 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
1267 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
1268 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
1269 }
1270}
1271
1272static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
1273{
1274 int vmid;
1275
1276 /*
1277 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1278 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1279 * the driver can enable them for graphics. VMID0 should maintain
1280 * access so that HWS firmware can save/restore entries.
1281 */
1282 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
1283 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
1284 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
1285 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
1286 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
1287 }
1288}
1289
1290static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
1291 int xcc_id)
1292{
1293 u32 tmp;
1294 int i;
1295
1296 /* XXX SH_MEM regs */
1297 /* where to put LDS, scratch, GPUVM in FSA64 space */
1298 mutex_lock(&adev->srbm_mutex);
1299 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1300 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1301 /* CP and shaders */
1302 if (i == 0) {
1303 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1304 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1305 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1306 !!adev->gmc.noretry);
1307 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1308 regSH_MEM_CONFIG, tmp);
1309 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1310 regSH_MEM_BASES, 0);
1311 } else {
1312 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1313 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1314 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1315 !!adev->gmc.noretry);
1316 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1317 regSH_MEM_CONFIG, tmp);
1318 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1319 (adev->gmc.private_aperture_start >>
1320 48));
1321 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1322 (adev->gmc.shared_aperture_start >>
1323 48));
1324 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1325 regSH_MEM_BASES, tmp);
1326 }
1327 }
1328 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1329
1330 mutex_unlock(&adev->srbm_mutex);
1331
1332 gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
1333 gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
1334}
1335
1336static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
1337{
1338 int i, num_xcc;
1339
1340 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1341
1342 gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
1343 adev->gfx.config.db_debug2 =
1344 RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
1345
1346 for (i = 0; i < num_xcc; i++)
1347 gfx_v9_4_3_xcc_constants_init(adev, i);
1348}
1349
1350static void
1351gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
1352 int xcc_id)
1353{
1354 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
1355}
1356
1357static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
1358{
1359 /*
1360 * Rlc save restore list is workable since v2_1.
1361 * And it's needed by gfxoff feature.
1362 */
1363 if (adev->gfx.rlc.is_rlc_v2_1)
1364 gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
1365}
1366
1367static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
1368{
1369 uint32_t data;
1370
1371 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
1372 data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
1373 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
1374}
1375
1376static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
1377{
1378 uint32_t rlc_setting;
1379
1380 /* if RLC is not enabled, do nothing */
1381 rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
1382 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
1383 return false;
1384
1385 return true;
1386}
1387
1388static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
1389{
1390 uint32_t data;
1391 unsigned i;
1392
1393 data = RLC_SAFE_MODE__CMD_MASK;
1394 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
1395 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1396
1397 /* wait for RLC_SAFE_MODE */
1398 for (i = 0; i < adev->usec_timeout; i++) {
1399 if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1400 break;
1401 udelay(1);
1402 }
1403}
1404
1405static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
1406 int xcc_id)
1407{
1408 uint32_t data;
1409
1410 data = RLC_SAFE_MODE__CMD_MASK;
1411 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1412}
1413
1414static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1415{
1416 int xcc_id, num_xcc;
1417 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1418
1419 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1420 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1421 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
1422 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
1423 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
1424 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
1425 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
1426 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
1427 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
1428 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
1429 }
1430 adev->gfx.rlc.rlcg_reg_access_supported = true;
1431}
1432
1433static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
1434{
1435 /* init spm vmid with 0xf */
1436 if (adev->gfx.rlc.funcs->update_spm_vmid)
1437 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
1438
1439 return 0;
1440}
1441
1442static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
1443 int xcc_id)
1444{
1445 u32 i, j, k;
1446 u32 mask;
1447
1448 mutex_lock(&adev->grbm_idx_mutex);
1449 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1450 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1451 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
1452 xcc_id);
1453 for (k = 0; k < adev->usec_timeout; k++) {
1454 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
1455 break;
1456 udelay(1);
1457 }
1458 if (k == adev->usec_timeout) {
1459 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
1460 0xffffffff,
1461 0xffffffff, xcc_id);
1462 mutex_unlock(&adev->grbm_idx_mutex);
1463 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1464 i, j);
1465 return;
1466 }
1467 }
1468 }
1469 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
1470 xcc_id);
1471 mutex_unlock(&adev->grbm_idx_mutex);
1472
1473 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1474 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1475 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1476 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1477 for (k = 0; k < adev->usec_timeout; k++) {
1478 if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1479 break;
1480 udelay(1);
1481 }
1482}
1483
1484static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1485 bool enable, int xcc_id)
1486{
1487 u32 tmp;
1488
1489 /* These interrupts should be enabled to drive DS clock */
1490
1491 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1492
1493 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1494 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1495 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1496
1497 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1498}
1499
1500static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
1501{
1502 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1503 RLC_ENABLE_F32, 0);
1504 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1505 gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
1506}
1507
1508static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
1509{
1510 int i, num_xcc;
1511
1512 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1513 for (i = 0; i < num_xcc; i++)
1514 gfx_v9_4_3_xcc_rlc_stop(adev, i);
1515}
1516
1517static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
1518{
1519 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1520 SOFT_RESET_RLC, 1);
1521 udelay(50);
1522 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1523 SOFT_RESET_RLC, 0);
1524 udelay(50);
1525}
1526
1527static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
1528{
1529 int i, num_xcc;
1530
1531 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1532 for (i = 0; i < num_xcc; i++)
1533 gfx_v9_4_3_xcc_rlc_reset(adev, i);
1534}
1535
1536static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
1537{
1538 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1539 RLC_ENABLE_F32, 1);
1540 udelay(50);
1541
1542 /* carrizo do enable cp interrupt after cp inited */
1543 if (!(adev->flags & AMD_IS_APU)) {
1544 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1545 udelay(50);
1546 }
1547}
1548
1549static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
1550{
1551#ifdef AMDGPU_RLC_DEBUG_RETRY
1552 u32 rlc_ucode_ver;
1553#endif
1554 int i, num_xcc;
1555
1556 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1557 for (i = 0; i < num_xcc; i++) {
1558 gfx_v9_4_3_xcc_rlc_start(adev, i);
1559#ifdef AMDGPU_RLC_DEBUG_RETRY
1560 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
1561 rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
1562 if (rlc_ucode_ver == 0x108) {
1563 dev_info(adev->dev,
1564 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1565 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1566 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1567 * default is 0x9C4 to create a 100us interval */
1568 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
1569 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1570 * to disable the page fault retry interrupts, default is
1571 * 0x100 (256) */
1572 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
1573 }
1574#endif
1575 }
1576}
1577
1578static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1579 int xcc_id)
1580{
1581 const struct rlc_firmware_header_v2_0 *hdr;
1582 const __le32 *fw_data;
1583 unsigned i, fw_size;
1584
1585 if (!adev->gfx.rlc_fw)
1586 return -EINVAL;
1587
1588 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1589 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1590
1591 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1592 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1593 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1594
1595 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1596 RLCG_UCODE_LOADING_START_ADDRESS);
1597 for (i = 0; i < fw_size; i++) {
1598 if (amdgpu_emu_mode == 1 && i % 100 == 0) {
1599 dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
1600 msleep(1);
1601 }
1602 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
1603 }
1604 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1605
1606 return 0;
1607}
1608
1609static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
1610{
1611 int r;
1612
1613 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1614 gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
1615 /* legacy rlc firmware loading */
1616 r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
1617 if (r)
1618 return r;
1619 gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
1620 }
1621
1622 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1623 /* disable CG */
1624 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1625 gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
1626 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1627
1628 return 0;
1629}
1630
1631static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
1632{
1633 int r, i, num_xcc;
1634
1635 if (amdgpu_sriov_vf(adev))
1636 return 0;
1637
1638 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1639 for (i = 0; i < num_xcc; i++) {
1640 r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
1641 if (r)
1642 return r;
1643 }
1644
1645 return 0;
1646}
1647
1648static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1649 unsigned vmid)
1650{
1651 u32 reg, pre_data, data;
1652
1653 reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL);
1654 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
1655 pre_data = RREG32_NO_KIQ(reg);
1656 else
1657 pre_data = RREG32(reg);
1658
1659 data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
1660 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
1661
1662 if (pre_data != data) {
1663 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
1664 WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1665 } else
1666 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1667 }
1668}
1669
1670static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
1671 {SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
1672 {SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
1673};
1674
1675static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
1676 uint32_t offset,
1677 struct soc15_reg_rlcg *entries, int arr_size)
1678{
1679 int i, inst;
1680 uint32_t reg;
1681
1682 if (!entries)
1683 return false;
1684
1685 for (i = 0; i < arr_size; i++) {
1686 const struct soc15_reg_rlcg *entry;
1687
1688 entry = &entries[i];
1689 inst = adev->ip_map.logical_to_dev_inst ?
1690 adev->ip_map.logical_to_dev_inst(
1691 adev, entry->hwip, entry->instance) :
1692 entry->instance;
1693 reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
1694 entry->reg;
1695 if (offset == reg)
1696 return true;
1697 }
1698
1699 return false;
1700}
1701
1702static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
1703{
1704 return gfx_v9_4_3_check_rlcg_range(adev, offset,
1705 (void *)rlcg_access_gc_9_4_3,
1706 ARRAY_SIZE(rlcg_access_gc_9_4_3));
1707}
1708
1709static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
1710 bool enable, int xcc_id)
1711{
1712 if (enable) {
1713 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
1714 } else {
1715 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
1716 (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
1717 CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
1718 CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
1719 CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
1720 CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
1721 CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
1722 CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
1723 CP_MEC_CNTL__MEC_ME1_HALT_MASK |
1724 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
1725 adev->gfx.kiq[xcc_id].ring.sched.ready = false;
1726 }
1727 udelay(50);
1728}
1729
1730static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
1731 int xcc_id)
1732{
1733 const struct gfx_firmware_header_v1_0 *mec_hdr;
1734 const __le32 *fw_data;
1735 unsigned i;
1736 u32 tmp;
1737 u32 mec_ucode_addr_offset;
1738 u32 mec_ucode_data_offset;
1739
1740 if (!adev->gfx.mec_fw)
1741 return -EINVAL;
1742
1743 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
1744
1745 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1746 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1747
1748 fw_data = (const __le32 *)
1749 (adev->gfx.mec_fw->data +
1750 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1751 tmp = 0;
1752 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1753 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1754 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1755
1756 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1757 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
1758 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1759 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1760
1761 mec_ucode_addr_offset =
1762 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
1763 mec_ucode_data_offset =
1764 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
1765
1766 /* MEC1 */
1767 WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
1768 for (i = 0; i < mec_hdr->jt_size; i++)
1769 WREG32(mec_ucode_data_offset,
1770 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
1771
1772 WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
1773 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
1774
1775 return 0;
1776}
1777
1778/* KIQ functions */
1779static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
1780{
1781 uint32_t tmp;
1782 struct amdgpu_device *adev = ring->adev;
1783
1784 /* tell RLC which is KIQ queue */
1785 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1786 tmp &= 0xffffff00;
1787 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1788 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1789 tmp |= 0x80;
1790 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1791}
1792
1793static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
1794{
1795 struct amdgpu_device *adev = ring->adev;
1796
1797 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
1798 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
1799 mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
1800 mqd->cp_hqd_queue_priority =
1801 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
1802 }
1803 }
1804}
1805
1806static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
1807{
1808 struct amdgpu_device *adev = ring->adev;
1809 struct v9_mqd *mqd = ring->mqd_ptr;
1810 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1811 uint32_t tmp;
1812
1813 mqd->header = 0xC0310800;
1814 mqd->compute_pipelinestat_enable = 0x00000001;
1815 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1816 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1817 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1818 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1819 mqd->compute_misc_reserved = 0x00000003;
1820
1821 mqd->dynamic_cu_mask_addr_lo =
1822 lower_32_bits(ring->mqd_gpu_addr
1823 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1824 mqd->dynamic_cu_mask_addr_hi =
1825 upper_32_bits(ring->mqd_gpu_addr
1826 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1827
1828 eop_base_addr = ring->eop_gpu_addr >> 8;
1829 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
1830 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1831
1832 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1833 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
1834 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1835 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
1836
1837 mqd->cp_hqd_eop_control = tmp;
1838
1839 /* enable doorbell? */
1840 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
1841
1842 if (ring->use_doorbell) {
1843 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1844 DOORBELL_OFFSET, ring->doorbell_index);
1845 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1846 DOORBELL_EN, 1);
1847 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1848 DOORBELL_SOURCE, 0);
1849 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1850 DOORBELL_HIT, 0);
1851 if (amdgpu_sriov_vf(adev))
1852 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1853 DOORBELL_MODE, 1);
1854 } else {
1855 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1856 DOORBELL_EN, 0);
1857 }
1858
1859 mqd->cp_hqd_pq_doorbell_control = tmp;
1860
1861 /* disable the queue if it's active */
1862 ring->wptr = 0;
1863 mqd->cp_hqd_dequeue_request = 0;
1864 mqd->cp_hqd_pq_rptr = 0;
1865 mqd->cp_hqd_pq_wptr_lo = 0;
1866 mqd->cp_hqd_pq_wptr_hi = 0;
1867
1868 /* set the pointer to the MQD */
1869 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1870 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1871
1872 /* set MQD vmid to 0 */
1873 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
1874 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1875 mqd->cp_mqd_control = tmp;
1876
1877 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1878 hqd_gpu_addr = ring->gpu_addr >> 8;
1879 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
1880 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1881
1882 /* set up the HQD, this is similar to CP_RB0_CNTL */
1883 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
1884 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1885 (order_base_2(ring->ring_size / 4) - 1));
1886 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1887 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1888#ifdef __BIG_ENDIAN
1889 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
1890#endif
1891 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
1892 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
1893 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1894 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1895 mqd->cp_hqd_pq_control = tmp;
1896
1897 /* set the wb address whether it's enabled or not */
1898 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1899 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1900 mqd->cp_hqd_pq_rptr_report_addr_hi =
1901 upper_32_bits(wb_gpu_addr) & 0xffff;
1902
1903 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1904 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1905 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
1906 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1907
1908 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1909 ring->wptr = 0;
1910 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
1911
1912 /* set the vmid for the queue */
1913 mqd->cp_hqd_vmid = 0;
1914
1915 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
1916 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
1917 mqd->cp_hqd_persistent_state = tmp;
1918
1919 /* set MIN_IB_AVAIL_SIZE */
1920 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
1921 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
1922 mqd->cp_hqd_ib_control = tmp;
1923
1924 /* set static priority for a queue/ring */
1925 gfx_v9_4_3_mqd_set_priority(ring, mqd);
1926 mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
1927
1928 /* map_queues packet doesn't need activate the queue,
1929 * so only kiq need set this field.
1930 */
1931 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
1932 mqd->cp_hqd_active = 1;
1933
1934 return 0;
1935}
1936
1937static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
1938 int xcc_id)
1939{
1940 struct amdgpu_device *adev = ring->adev;
1941 struct v9_mqd *mqd = ring->mqd_ptr;
1942 int j;
1943
1944 /* disable wptr polling */
1945 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
1946
1947 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
1948 mqd->cp_hqd_eop_base_addr_lo);
1949 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
1950 mqd->cp_hqd_eop_base_addr_hi);
1951
1952 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1953 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
1954 mqd->cp_hqd_eop_control);
1955
1956 /* enable doorbell? */
1957 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1958 mqd->cp_hqd_pq_doorbell_control);
1959
1960 /* disable the queue if it's active */
1961 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1962 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1963 for (j = 0; j < adev->usec_timeout; j++) {
1964 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1965 break;
1966 udelay(1);
1967 }
1968 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1969 mqd->cp_hqd_dequeue_request);
1970 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
1971 mqd->cp_hqd_pq_rptr);
1972 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1973 mqd->cp_hqd_pq_wptr_lo);
1974 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1975 mqd->cp_hqd_pq_wptr_hi);
1976 }
1977
1978 /* set the pointer to the MQD */
1979 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
1980 mqd->cp_mqd_base_addr_lo);
1981 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
1982 mqd->cp_mqd_base_addr_hi);
1983
1984 /* set MQD vmid to 0 */
1985 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
1986 mqd->cp_mqd_control);
1987
1988 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1989 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
1990 mqd->cp_hqd_pq_base_lo);
1991 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
1992 mqd->cp_hqd_pq_base_hi);
1993
1994 /* set up the HQD, this is similar to CP_RB0_CNTL */
1995 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
1996 mqd->cp_hqd_pq_control);
1997
1998 /* set the wb address whether it's enabled or not */
1999 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
2000 mqd->cp_hqd_pq_rptr_report_addr_lo);
2001 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2002 mqd->cp_hqd_pq_rptr_report_addr_hi);
2003
2004 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2005 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
2006 mqd->cp_hqd_pq_wptr_poll_addr_lo);
2007 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2008 mqd->cp_hqd_pq_wptr_poll_addr_hi);
2009
2010 /* enable the doorbell if requested */
2011 if (ring->use_doorbell) {
2012 WREG32_SOC15(
2013 GC, GET_INST(GC, xcc_id),
2014 regCP_MEC_DOORBELL_RANGE_LOWER,
2015 ((adev->doorbell_index.kiq +
2016 xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2017 2) << 2);
2018 WREG32_SOC15(
2019 GC, GET_INST(GC, xcc_id),
2020 regCP_MEC_DOORBELL_RANGE_UPPER,
2021 ((adev->doorbell_index.userqueue_end +
2022 xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2023 2) << 2);
2024 }
2025
2026 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2027 mqd->cp_hqd_pq_doorbell_control);
2028
2029 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2030 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2031 mqd->cp_hqd_pq_wptr_lo);
2032 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2033 mqd->cp_hqd_pq_wptr_hi);
2034
2035 /* set the vmid for the queue */
2036 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2037
2038 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2039 mqd->cp_hqd_persistent_state);
2040
2041 /* activate the queue */
2042 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2043 mqd->cp_hqd_active);
2044
2045 if (ring->use_doorbell)
2046 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2047
2048 return 0;
2049}
2050
2051static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
2052 int xcc_id)
2053{
2054 struct amdgpu_device *adev = ring->adev;
2055 int j;
2056
2057 /* disable the queue if it's active */
2058 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2059
2060 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2061
2062 for (j = 0; j < adev->usec_timeout; j++) {
2063 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2064 break;
2065 udelay(1);
2066 }
2067
2068 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
2069 DRM_DEBUG("%s dequeue request failed.\n", ring->name);
2070
2071 /* Manual disable if dequeue request times out */
2072 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2073 }
2074
2075 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2076 0);
2077 }
2078
2079 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
2080 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
2081 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
2082 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
2083 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
2084 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
2085 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0);
2086 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0);
2087
2088 return 0;
2089}
2090
2091static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
2092{
2093 struct amdgpu_device *adev = ring->adev;
2094 struct v9_mqd *mqd = ring->mqd_ptr;
2095 struct v9_mqd *tmp_mqd;
2096
2097 gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id);
2098
2099 /* GPU could be in bad state during probe, driver trigger the reset
2100 * after load the SMU, in this case , the mqd is not be initialized.
2101 * driver need to re-init the mqd.
2102 * check mqd->cp_hqd_pq_control since this value should not be 0
2103 */
2104 tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
2105 if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
2106 /* for GPU_RESET case , reset MQD to a clean status */
2107 if (adev->gfx.kiq[xcc_id].mqd_backup)
2108 memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
2109
2110 /* reset ring buffer */
2111 ring->wptr = 0;
2112 amdgpu_ring_clear_ring(ring);
2113 mutex_lock(&adev->srbm_mutex);
2114 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2115 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2116 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2117 mutex_unlock(&adev->srbm_mutex);
2118 } else {
2119 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2120 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2121 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2122 mutex_lock(&adev->srbm_mutex);
2123 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2124 amdgpu_ring_clear_ring(ring);
2125 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2126 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2127 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2128 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2129 mutex_unlock(&adev->srbm_mutex);
2130
2131 if (adev->gfx.kiq[xcc_id].mqd_backup)
2132 memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
2133 }
2134
2135 return 0;
2136}
2137
2138static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
2139{
2140 struct amdgpu_device *adev = ring->adev;
2141 struct v9_mqd *mqd = ring->mqd_ptr;
2142 int mqd_idx = ring - &adev->gfx.compute_ring[0];
2143 struct v9_mqd *tmp_mqd;
2144
2145 /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
2146 * is not be initialized before
2147 */
2148 tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
2149
2150 if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
2151 (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
2152 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2153 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2154 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2155 mutex_lock(&adev->srbm_mutex);
2156 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2157 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2158 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2159 mutex_unlock(&adev->srbm_mutex);
2160
2161 if (adev->gfx.mec.mqd_backup[mqd_idx])
2162 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2163 } else {
2164 /* restore MQD to a clean status */
2165 if (adev->gfx.mec.mqd_backup[mqd_idx])
2166 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2167 /* reset ring buffer */
2168 ring->wptr = 0;
2169 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
2170 amdgpu_ring_clear_ring(ring);
2171 }
2172
2173 return 0;
2174}
2175
2176static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
2177{
2178 struct amdgpu_ring *ring;
2179 int j;
2180
2181 for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2182 ring = &adev->gfx.compute_ring[j + xcc_id * adev->gfx.num_compute_rings];
2183 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2184 mutex_lock(&adev->srbm_mutex);
2185 soc15_grbm_select(adev, ring->me,
2186 ring->pipe,
2187 ring->queue, 0, GET_INST(GC, xcc_id));
2188 gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id);
2189 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2190 mutex_unlock(&adev->srbm_mutex);
2191 }
2192 }
2193
2194 return 0;
2195}
2196
2197static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
2198{
2199 struct amdgpu_ring *ring;
2200 int r;
2201
2202 ring = &adev->gfx.kiq[xcc_id].ring;
2203
2204 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2205 if (unlikely(r != 0))
2206 return r;
2207
2208 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2209 if (unlikely(r != 0)) {
2210 amdgpu_bo_unreserve(ring->mqd_obj);
2211 return r;
2212 }
2213
2214 gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id);
2215 amdgpu_bo_kunmap(ring->mqd_obj);
2216 ring->mqd_ptr = NULL;
2217 amdgpu_bo_unreserve(ring->mqd_obj);
2218 return 0;
2219}
2220
2221static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
2222{
2223 struct amdgpu_ring *ring = NULL;
2224 int r = 0, i;
2225
2226 gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
2227
2228 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2229 ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2230
2231 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2232 if (unlikely(r != 0))
2233 goto done;
2234 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2235 if (!r) {
2236 r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
2237 amdgpu_bo_kunmap(ring->mqd_obj);
2238 ring->mqd_ptr = NULL;
2239 }
2240 amdgpu_bo_unreserve(ring->mqd_obj);
2241 if (r)
2242 goto done;
2243 }
2244
2245 r = amdgpu_gfx_enable_kcq(adev, xcc_id);
2246done:
2247 return r;
2248}
2249
2250static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
2251{
2252 struct amdgpu_ring *ring;
2253 int r, j;
2254
2255 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2256
2257 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2258 gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id);
2259
2260 r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
2261 if (r)
2262 return r;
2263 } else {
2264 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2265 }
2266
2267 r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
2268 if (r)
2269 return r;
2270
2271 r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id);
2272 if (r)
2273 return r;
2274
2275 for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2276 ring = &adev->gfx.compute_ring
2277 [j + xcc_id * adev->gfx.num_compute_rings];
2278 r = amdgpu_ring_test_helper(ring);
2279 if (r)
2280 return r;
2281 }
2282
2283 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
2284
2285 return 0;
2286}
2287
2288static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
2289{
2290 int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp;
2291
2292 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2293 if (amdgpu_sriov_vf(adev)) {
2294 enum amdgpu_gfx_partition mode;
2295
2296 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2297 AMDGPU_XCP_FL_NONE);
2298 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2299 return -EINVAL;
2300 num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev);
2301 adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
2302 num_xcp = num_xcc / num_xcc_per_xcp;
2303 r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
2304
2305 } else {
2306 if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2307 AMDGPU_XCP_FL_NONE) ==
2308 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2309 r = amdgpu_xcp_switch_partition_mode(
2310 adev->xcp_mgr, amdgpu_user_partt_mode);
2311 }
2312 if (r)
2313 return r;
2314
2315 for (i = 0; i < num_xcc; i++) {
2316 r = gfx_v9_4_3_xcc_cp_resume(adev, i);
2317 if (r)
2318 return r;
2319 }
2320
2321 return 0;
2322}
2323
2324static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
2325{
2326 if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2327 DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
2328
2329 if (amdgpu_sriov_vf(adev)) {
2330 /* must disable polling for SRIOV when hw finished, otherwise
2331 * CPC engine may still keep fetching WB address which is already
2332 * invalid after sw finished and trigger DMAR reading error in
2333 * hypervisor side.
2334 */
2335 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2336 return;
2337 }
2338
2339 /* Use deinitialize sequence from CAIL when unbinding device
2340 * from driver, otherwise KIQ is hanging when binding back
2341 */
2342 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2343 mutex_lock(&adev->srbm_mutex);
2344 soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me,
2345 adev->gfx.kiq[xcc_id].ring.pipe,
2346 adev->gfx.kiq[xcc_id].ring.queue, 0,
2347 GET_INST(GC, xcc_id));
2348 gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring,
2349 xcc_id);
2350 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2351 mutex_unlock(&adev->srbm_mutex);
2352 }
2353
2354 gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
2355 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2356}
2357
2358static int gfx_v9_4_3_hw_init(struct amdgpu_ip_block *ip_block)
2359{
2360 int r;
2361 struct amdgpu_device *adev = ip_block->adev;
2362
2363 amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
2364 adev->gfx.cleaner_shader_ptr);
2365
2366 if (!amdgpu_sriov_vf(adev))
2367 gfx_v9_4_3_init_golden_registers(adev);
2368
2369 gfx_v9_4_3_constants_init(adev);
2370
2371 r = adev->gfx.rlc.funcs->resume(adev);
2372 if (r)
2373 return r;
2374
2375 r = gfx_v9_4_3_cp_resume(adev);
2376 if (r)
2377 return r;
2378
2379 return r;
2380}
2381
2382static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block)
2383{
2384 struct amdgpu_device *adev = ip_block->adev;
2385 int i, num_xcc;
2386
2387 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2388 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2389 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
2390
2391 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2392 for (i = 0; i < num_xcc; i++) {
2393 gfx_v9_4_3_xcc_fini(adev, i);
2394 }
2395
2396 return 0;
2397}
2398
2399static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block)
2400{
2401 return gfx_v9_4_3_hw_fini(ip_block);
2402}
2403
2404static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block)
2405{
2406 return gfx_v9_4_3_hw_init(ip_block);
2407}
2408
2409static bool gfx_v9_4_3_is_idle(void *handle)
2410{
2411 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2412 int i, num_xcc;
2413
2414 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2415 for (i = 0; i < num_xcc; i++) {
2416 if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS),
2417 GRBM_STATUS, GUI_ACTIVE))
2418 return false;
2419 }
2420 return true;
2421}
2422
2423static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
2424{
2425 unsigned i;
2426 struct amdgpu_device *adev = ip_block->adev;
2427
2428 for (i = 0; i < adev->usec_timeout; i++) {
2429 if (gfx_v9_4_3_is_idle(adev))
2430 return 0;
2431 udelay(1);
2432 }
2433 return -ETIMEDOUT;
2434}
2435
2436static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block)
2437{
2438 u32 grbm_soft_reset = 0;
2439 u32 tmp;
2440 struct amdgpu_device *adev = ip_block->adev;
2441
2442 /* GRBM_STATUS */
2443 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
2444 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2445 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2446 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2447 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2448 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2449 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2450 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2451 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2452 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2453 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2454 }
2455
2456 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2457 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2458 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2459 }
2460
2461 /* GRBM_STATUS2 */
2462 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2);
2463 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2464 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2465 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2466
2467
2468 if (grbm_soft_reset) {
2469 /* stop the rlc */
2470 adev->gfx.rlc.funcs->stop(adev);
2471
2472 /* Disable MEC parsing/prefetching */
2473 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
2474
2475 if (grbm_soft_reset) {
2476 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2477 tmp |= grbm_soft_reset;
2478 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2479 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2480 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2481
2482 udelay(50);
2483
2484 tmp &= ~grbm_soft_reset;
2485 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2486 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2487 }
2488
2489 /* Wait a little for things to settle down */
2490 udelay(50);
2491 }
2492 return 0;
2493}
2494
2495static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
2496 uint32_t vmid,
2497 uint32_t gds_base, uint32_t gds_size,
2498 uint32_t gws_base, uint32_t gws_size,
2499 uint32_t oa_base, uint32_t oa_size)
2500{
2501 struct amdgpu_device *adev = ring->adev;
2502
2503 /* GDS Base */
2504 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2505 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid,
2506 gds_base);
2507
2508 /* GDS Size */
2509 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2510 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid,
2511 gds_size);
2512
2513 /* GWS */
2514 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2515 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid,
2516 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
2517
2518 /* OA */
2519 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2520 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid,
2521 (1 << (oa_size + oa_base)) - (1 << oa_base));
2522}
2523
2524static int gfx_v9_4_3_early_init(struct amdgpu_ip_block *ip_block)
2525{
2526 struct amdgpu_device *adev = ip_block->adev;
2527
2528 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2529 AMDGPU_MAX_COMPUTE_RINGS);
2530 gfx_v9_4_3_set_kiq_pm4_funcs(adev);
2531 gfx_v9_4_3_set_ring_funcs(adev);
2532 gfx_v9_4_3_set_irq_funcs(adev);
2533 gfx_v9_4_3_set_gds_init(adev);
2534 gfx_v9_4_3_set_rlc_funcs(adev);
2535
2536 /* init rlcg reg access ctrl */
2537 gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev);
2538
2539 return gfx_v9_4_3_init_microcode(adev);
2540}
2541
2542static int gfx_v9_4_3_late_init(struct amdgpu_ip_block *ip_block)
2543{
2544 struct amdgpu_device *adev = ip_block->adev;
2545 int r;
2546
2547 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2548 if (r)
2549 return r;
2550
2551 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2552 if (r)
2553 return r;
2554
2555 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
2556 if (r)
2557 return r;
2558
2559 if (adev->gfx.ras &&
2560 adev->gfx.ras->enable_watchdog_timer)
2561 adev->gfx.ras->enable_watchdog_timer(adev);
2562
2563 return 0;
2564}
2565
2566static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
2567 bool enable, int xcc_id)
2568{
2569 uint32_t def, data;
2570
2571 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
2572 return;
2573
2574 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2575 regRLC_CGTT_MGCG_OVERRIDE);
2576
2577 if (enable)
2578 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2579 else
2580 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2581
2582 if (def != data)
2583 WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2584 regRLC_CGTT_MGCG_OVERRIDE, data);
2585
2586}
2587
2588static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2589 bool enable, int xcc_id)
2590{
2591 uint32_t def, data;
2592
2593 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
2594 return;
2595
2596 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2597 regRLC_CGTT_MGCG_OVERRIDE);
2598
2599 if (enable)
2600 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2601 else
2602 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2603
2604 if (def != data)
2605 WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2606 regRLC_CGTT_MGCG_OVERRIDE, data);
2607}
2608
2609static void
2610gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2611 bool enable, int xcc_id)
2612{
2613 uint32_t data, def;
2614
2615 /* It is disabled by HW by default */
2616 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2617 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
2618 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2619
2620 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2621 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2622 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2623 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2624
2625 if (def != data)
2626 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2627
2628 /* MGLS is a global flag to control all MGLS in GFX */
2629 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
2630 /* 2 - RLC memory Light sleep */
2631 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
2632 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2633 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2634 if (def != data)
2635 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2636 }
2637 /* 3 - CP memory Light sleep */
2638 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2639 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2640 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2641 if (def != data)
2642 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2643 }
2644 }
2645 } else {
2646 /* 1 - MGCG_OVERRIDE */
2647 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2648
2649 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2650 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2651 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2652 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2653
2654 if (def != data)
2655 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2656
2657 /* 2 - disable MGLS in RLC */
2658 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2659 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
2660 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2661 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2662 }
2663
2664 /* 3 - disable MGLS in CP */
2665 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2666 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2667 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2668 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2669 }
2670 }
2671
2672}
2673
2674static void
2675gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2676 bool enable, int xcc_id)
2677{
2678 uint32_t def, data;
2679
2680 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2681
2682 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2683 /* unset CGCG override */
2684 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2685 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2686 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2687 else
2688 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2689 /* update CGCG and CGLS override bits */
2690 if (def != data)
2691 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2692
2693 /* CGCG Hysteresis: 400us */
2694 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2695
2696 data = (0x2710
2697 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2698 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2699 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2700 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2701 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2702 if (def != data)
2703 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2704
2705 /* set IDLE_POLL_COUNT(0x33450100)*/
2706 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2707 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2708 (0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2709 if (def != data)
2710 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2711 } else {
2712 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2713 /* reset CGCG/CGLS bits */
2714 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2715 /* disable cgcg and cgls in FSM */
2716 if (def != data)
2717 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2718 }
2719
2720}
2721
2722static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
2723 bool enable, int xcc_id)
2724{
2725 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
2726
2727 if (enable) {
2728 /* FGCG */
2729 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2730 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2731
2732 /* CGCG/CGLS should be enabled after MGCG/MGLS
2733 * === MGCG + MGLS ===
2734 */
2735 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2736 xcc_id);
2737 /* === CGCG + CGLS === */
2738 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2739 xcc_id);
2740 } else {
2741 /* CGCG/CGLS should be disabled before MGCG/MGLS
2742 * === CGCG + CGLS ===
2743 */
2744 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2745 xcc_id);
2746 /* === MGCG + MGLS === */
2747 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2748 xcc_id);
2749
2750 /* FGCG */
2751 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2752 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2753 }
2754
2755 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
2756
2757 return 0;
2758}
2759
2760static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
2761 .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
2762 .set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode,
2763 .unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode,
2764 .init = gfx_v9_4_3_rlc_init,
2765 .resume = gfx_v9_4_3_rlc_resume,
2766 .stop = gfx_v9_4_3_rlc_stop,
2767 .reset = gfx_v9_4_3_rlc_reset,
2768 .start = gfx_v9_4_3_rlc_start,
2769 .update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
2770 .is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
2771};
2772
2773static int gfx_v9_4_3_set_powergating_state(void *handle,
2774 enum amd_powergating_state state)
2775{
2776 return 0;
2777}
2778
2779static int gfx_v9_4_3_set_clockgating_state(void *handle,
2780 enum amd_clockgating_state state)
2781{
2782 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2783 int i, num_xcc;
2784
2785 if (amdgpu_sriov_vf(adev))
2786 return 0;
2787
2788 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2789 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2790 case IP_VERSION(9, 4, 3):
2791 case IP_VERSION(9, 4, 4):
2792 for (i = 0; i < num_xcc; i++)
2793 gfx_v9_4_3_xcc_update_gfx_clock_gating(
2794 adev, state == AMD_CG_STATE_GATE, i);
2795 break;
2796 default:
2797 break;
2798 }
2799 return 0;
2800}
2801
2802static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags)
2803{
2804 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2805 int data;
2806
2807 if (amdgpu_sriov_vf(adev))
2808 *flags = 0;
2809
2810 /* AMD_CG_SUPPORT_GFX_MGCG */
2811 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE));
2812 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
2813 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
2814
2815 /* AMD_CG_SUPPORT_GFX_CGCG */
2816 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL));
2817 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
2818 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
2819
2820 /* AMD_CG_SUPPORT_GFX_CGLS */
2821 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
2822 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
2823
2824 /* AMD_CG_SUPPORT_GFX_RLC_LS */
2825 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL));
2826 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
2827 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
2828
2829 /* AMD_CG_SUPPORT_GFX_CP_LS */
2830 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL));
2831 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
2832 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
2833}
2834
2835static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2836{
2837 struct amdgpu_device *adev = ring->adev;
2838 u32 ref_and_mask, reg_mem_engine;
2839 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
2840
2841 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2842 switch (ring->me) {
2843 case 1:
2844 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
2845 break;
2846 case 2:
2847 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
2848 break;
2849 default:
2850 return;
2851 }
2852 reg_mem_engine = 0;
2853 } else {
2854 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
2855 reg_mem_engine = 1; /* pfp */
2856 }
2857
2858 gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
2859 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
2860 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
2861 ref_and_mask, ref_and_mask, 0x20);
2862}
2863
2864static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
2865 struct amdgpu_job *job,
2866 struct amdgpu_ib *ib,
2867 uint32_t flags)
2868{
2869 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2870 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2871
2872 /* Currently, there is a high possibility to get wave ID mismatch
2873 * between ME and GDS, leading to a hw deadlock, because ME generates
2874 * different wave IDs than the GDS expects. This situation happens
2875 * randomly when at least 5 compute pipes use GDS ordered append.
2876 * The wave IDs generated by ME are also wrong after suspend/resume.
2877 * Those are probably bugs somewhere else in the kernel driver.
2878 *
2879 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2880 * GDS to 0 for this ring (me/pipe).
2881 */
2882 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2883 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2884 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
2885 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2886 }
2887
2888 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2889 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
2890 amdgpu_ring_write(ring,
2891#ifdef __BIG_ENDIAN
2892 (2 << 0) |
2893#endif
2894 lower_32_bits(ib->gpu_addr));
2895 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
2896 amdgpu_ring_write(ring, control);
2897}
2898
2899static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
2900 u64 seq, unsigned flags)
2901{
2902 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2903 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2904 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
2905
2906 /* RELEASE_MEM - flush caches, send int */
2907 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
2908 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
2909 EOP_TC_NC_ACTION_EN) :
2910 (EOP_TCL1_ACTION_EN |
2911 EOP_TC_ACTION_EN |
2912 EOP_TC_WB_ACTION_EN |
2913 EOP_TC_MD_ACTION_EN)) |
2914 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2915 EVENT_INDEX(5)));
2916 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2917
2918 /*
2919 * the address should be Qword aligned if 64bit write, Dword
2920 * aligned if only send 32bit data low (discard data high)
2921 */
2922 if (write64bit)
2923 BUG_ON(addr & 0x7);
2924 else
2925 BUG_ON(addr & 0x3);
2926 amdgpu_ring_write(ring, lower_32_bits(addr));
2927 amdgpu_ring_write(ring, upper_32_bits(addr));
2928 amdgpu_ring_write(ring, lower_32_bits(seq));
2929 amdgpu_ring_write(ring, upper_32_bits(seq));
2930 amdgpu_ring_write(ring, 0);
2931}
2932
2933static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
2934{
2935 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2936 uint32_t seq = ring->fence_drv.sync_seq;
2937 uint64_t addr = ring->fence_drv.gpu_addr;
2938
2939 gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
2940 lower_32_bits(addr), upper_32_bits(addr),
2941 seq, 0xffffffff, 4);
2942}
2943
2944static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
2945 unsigned vmid, uint64_t pd_addr)
2946{
2947 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2948}
2949
2950static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
2951{
2952 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
2953}
2954
2955static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
2956{
2957 u64 wptr;
2958
2959 /* XXX check if swapping is necessary on BE */
2960 if (ring->use_doorbell)
2961 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
2962 else
2963 BUG();
2964 return wptr;
2965}
2966
2967static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
2968{
2969 struct amdgpu_device *adev = ring->adev;
2970
2971 /* XXX check if swapping is necessary on BE */
2972 if (ring->use_doorbell) {
2973 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
2974 WDOORBELL64(ring->doorbell_index, ring->wptr);
2975 } else {
2976 BUG(); /* only DOORBELL method supported on gfx9 now */
2977 }
2978}
2979
2980static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
2981 u64 seq, unsigned int flags)
2982{
2983 struct amdgpu_device *adev = ring->adev;
2984
2985 /* we only allocate 32bit for each seq wb address */
2986 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
2987
2988 /* write fence seq to the "addr" */
2989 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2990 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2991 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
2992 amdgpu_ring_write(ring, lower_32_bits(addr));
2993 amdgpu_ring_write(ring, upper_32_bits(addr));
2994 amdgpu_ring_write(ring, lower_32_bits(seq));
2995
2996 if (flags & AMDGPU_FENCE_FLAG_INT) {
2997 /* set register to trigger INT */
2998 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2999 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3000 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3001 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
3002 amdgpu_ring_write(ring, 0);
3003 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3004 }
3005}
3006
3007static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
3008 uint32_t reg_val_offs)
3009{
3010 struct amdgpu_device *adev = ring->adev;
3011
3012 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
3013
3014 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3015 amdgpu_ring_write(ring, 0 | /* src: register*/
3016 (5 << 8) | /* dst: memory */
3017 (1 << 20)); /* write confirm */
3018 amdgpu_ring_write(ring, reg);
3019 amdgpu_ring_write(ring, 0);
3020 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3021 reg_val_offs * 4));
3022 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3023 reg_val_offs * 4));
3024}
3025
3026static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3027 uint32_t val)
3028{
3029 uint32_t cmd = 0;
3030
3031 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
3032
3033 switch (ring->funcs->type) {
3034 case AMDGPU_RING_TYPE_GFX:
3035 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
3036 break;
3037 case AMDGPU_RING_TYPE_KIQ:
3038 cmd = (1 << 16); /* no inc addr */
3039 break;
3040 default:
3041 cmd = WR_CONFIRM;
3042 break;
3043 }
3044 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3045 amdgpu_ring_write(ring, cmd);
3046 amdgpu_ring_write(ring, reg);
3047 amdgpu_ring_write(ring, 0);
3048 amdgpu_ring_write(ring, val);
3049}
3050
3051static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
3052 uint32_t val, uint32_t mask)
3053{
3054 gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
3055}
3056
3057static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
3058 uint32_t reg0, uint32_t reg1,
3059 uint32_t ref, uint32_t mask)
3060{
3061 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
3062 ref, mask);
3063}
3064
3065static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
3066 unsigned vmid)
3067{
3068 struct amdgpu_device *adev = ring->adev;
3069 uint32_t value = 0;
3070
3071 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
3072 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
3073 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
3074 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
3075 amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id);
3076 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value);
3077 amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id);
3078}
3079
3080static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3081 struct amdgpu_device *adev, int me, int pipe,
3082 enum amdgpu_interrupt_state state, int xcc_id)
3083{
3084 u32 mec_int_cntl, mec_int_cntl_reg;
3085
3086 /*
3087 * amdgpu controls only the first MEC. That's why this function only
3088 * handles the setting of interrupts for this specific MEC. All other
3089 * pipes' interrupts are set by amdkfd.
3090 */
3091
3092 if (me == 1) {
3093 switch (pipe) {
3094 case 0:
3095 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3096 break;
3097 case 1:
3098 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3099 break;
3100 case 2:
3101 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3102 break;
3103 case 3:
3104 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3105 break;
3106 default:
3107 DRM_DEBUG("invalid pipe %d\n", pipe);
3108 return;
3109 }
3110 } else {
3111 DRM_DEBUG("invalid me %d\n", me);
3112 return;
3113 }
3114
3115 switch (state) {
3116 case AMDGPU_IRQ_STATE_DISABLE:
3117 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3118 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3119 TIME_STAMP_INT_ENABLE, 0);
3120 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3121 break;
3122 case AMDGPU_IRQ_STATE_ENABLE:
3123 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3124 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3125 TIME_STAMP_INT_ENABLE, 1);
3126 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3127 break;
3128 default:
3129 break;
3130 }
3131}
3132
3133static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev,
3134 int xcc_id, int me, int pipe)
3135{
3136 /*
3137 * amdgpu controls only the first MEC. That's why this function only
3138 * handles the setting of interrupts for this specific MEC. All other
3139 * pipes' interrupts are set by amdkfd.
3140 */
3141 if (me != 1)
3142 return 0;
3143
3144 switch (pipe) {
3145 case 0:
3146 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3147 case 1:
3148 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3149 case 2:
3150 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3151 case 3:
3152 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3153 default:
3154 return 0;
3155 }
3156}
3157
3158static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
3159 struct amdgpu_irq_src *source,
3160 unsigned type,
3161 enum amdgpu_interrupt_state state)
3162{
3163 u32 mec_int_cntl_reg, mec_int_cntl;
3164 int i, j, k, num_xcc;
3165
3166 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3167 switch (state) {
3168 case AMDGPU_IRQ_STATE_DISABLE:
3169 case AMDGPU_IRQ_STATE_ENABLE:
3170 for (i = 0; i < num_xcc; i++) {
3171 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3172 PRIV_REG_INT_ENABLE,
3173 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3174 for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3175 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3176 /* MECs start at 1 */
3177 mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3178
3179 if (mec_int_cntl_reg) {
3180 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3181 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3182 PRIV_REG_INT_ENABLE,
3183 state == AMDGPU_IRQ_STATE_ENABLE ?
3184 1 : 0);
3185 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3186 }
3187 }
3188 }
3189 }
3190 break;
3191 default:
3192 break;
3193 }
3194
3195 return 0;
3196}
3197
3198static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev,
3199 struct amdgpu_irq_src *source,
3200 unsigned type,
3201 enum amdgpu_interrupt_state state)
3202{
3203 u32 mec_int_cntl_reg, mec_int_cntl;
3204 int i, j, k, num_xcc;
3205
3206 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3207 switch (state) {
3208 case AMDGPU_IRQ_STATE_DISABLE:
3209 case AMDGPU_IRQ_STATE_ENABLE:
3210 for (i = 0; i < num_xcc; i++) {
3211 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3212 OPCODE_ERROR_INT_ENABLE,
3213 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3214 for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3215 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3216 /* MECs start at 1 */
3217 mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3218
3219 if (mec_int_cntl_reg) {
3220 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3221 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3222 OPCODE_ERROR_INT_ENABLE,
3223 state == AMDGPU_IRQ_STATE_ENABLE ?
3224 1 : 0);
3225 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3226 }
3227 }
3228 }
3229 }
3230 break;
3231 default:
3232 break;
3233 }
3234
3235 return 0;
3236}
3237
3238static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
3239 struct amdgpu_irq_src *source,
3240 unsigned type,
3241 enum amdgpu_interrupt_state state)
3242{
3243 int i, num_xcc;
3244
3245 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3246 switch (state) {
3247 case AMDGPU_IRQ_STATE_DISABLE:
3248 case AMDGPU_IRQ_STATE_ENABLE:
3249 for (i = 0; i < num_xcc; i++)
3250 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3251 PRIV_INSTR_INT_ENABLE,
3252 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3253 break;
3254 default:
3255 break;
3256 }
3257
3258 return 0;
3259}
3260
3261static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
3262 struct amdgpu_irq_src *src,
3263 unsigned type,
3264 enum amdgpu_interrupt_state state)
3265{
3266 int i, num_xcc;
3267
3268 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3269 for (i = 0; i < num_xcc; i++) {
3270 switch (type) {
3271 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3272 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3273 adev, 1, 0, state, i);
3274 break;
3275 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3276 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3277 adev, 1, 1, state, i);
3278 break;
3279 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3280 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3281 adev, 1, 2, state, i);
3282 break;
3283 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3284 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3285 adev, 1, 3, state, i);
3286 break;
3287 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
3288 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3289 adev, 2, 0, state, i);
3290 break;
3291 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
3292 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3293 adev, 2, 1, state, i);
3294 break;
3295 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
3296 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3297 adev, 2, 2, state, i);
3298 break;
3299 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
3300 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3301 adev, 2, 3, state, i);
3302 break;
3303 default:
3304 break;
3305 }
3306 }
3307
3308 return 0;
3309}
3310
3311static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
3312 struct amdgpu_irq_src *source,
3313 struct amdgpu_iv_entry *entry)
3314{
3315 int i, xcc_id;
3316 u8 me_id, pipe_id, queue_id;
3317 struct amdgpu_ring *ring;
3318
3319 DRM_DEBUG("IH: CP EOP\n");
3320 me_id = (entry->ring_id & 0x0c) >> 2;
3321 pipe_id = (entry->ring_id & 0x03) >> 0;
3322 queue_id = (entry->ring_id & 0x70) >> 4;
3323
3324 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3325
3326 if (xcc_id == -EINVAL)
3327 return -EINVAL;
3328
3329 switch (me_id) {
3330 case 0:
3331 case 1:
3332 case 2:
3333 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3334 ring = &adev->gfx.compute_ring
3335 [i +
3336 xcc_id * adev->gfx.num_compute_rings];
3337 /* Per-queue interrupt is supported for MEC starting from VI.
3338 * The interrupt can only be enabled/disabled per pipe instead of per queue.
3339 */
3340
3341 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
3342 amdgpu_fence_process(ring);
3343 }
3344 break;
3345 }
3346 return 0;
3347}
3348
3349static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
3350 struct amdgpu_iv_entry *entry)
3351{
3352 u8 me_id, pipe_id, queue_id;
3353 struct amdgpu_ring *ring;
3354 int i, xcc_id;
3355
3356 me_id = (entry->ring_id & 0x0c) >> 2;
3357 pipe_id = (entry->ring_id & 0x03) >> 0;
3358 queue_id = (entry->ring_id & 0x70) >> 4;
3359
3360 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3361
3362 if (xcc_id == -EINVAL)
3363 return;
3364
3365 switch (me_id) {
3366 case 0:
3367 case 1:
3368 case 2:
3369 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3370 ring = &adev->gfx.compute_ring
3371 [i +
3372 xcc_id * adev->gfx.num_compute_rings];
3373 if (ring->me == me_id && ring->pipe == pipe_id &&
3374 ring->queue == queue_id)
3375 drm_sched_fault(&ring->sched);
3376 }
3377 break;
3378 }
3379}
3380
3381static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
3382 struct amdgpu_irq_src *source,
3383 struct amdgpu_iv_entry *entry)
3384{
3385 DRM_ERROR("Illegal register access in command stream\n");
3386 gfx_v9_4_3_fault(adev, entry);
3387 return 0;
3388}
3389
3390static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev,
3391 struct amdgpu_irq_src *source,
3392 struct amdgpu_iv_entry *entry)
3393{
3394 DRM_ERROR("Illegal opcode in command stream\n");
3395 gfx_v9_4_3_fault(adev, entry);
3396 return 0;
3397}
3398
3399static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
3400 struct amdgpu_irq_src *source,
3401 struct amdgpu_iv_entry *entry)
3402{
3403 DRM_ERROR("Illegal instruction in command stream\n");
3404 gfx_v9_4_3_fault(adev, entry);
3405 return 0;
3406}
3407
3408static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
3409{
3410 const unsigned int cp_coher_cntl =
3411 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
3412 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
3413 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
3414 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
3415 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
3416
3417 /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
3418 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
3419 amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
3420 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
3421 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
3422 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3423 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
3424 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3425}
3426
3427static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
3428 uint32_t pipe, bool enable)
3429{
3430 struct amdgpu_device *adev = ring->adev;
3431 uint32_t val;
3432 uint32_t wcl_cs_reg;
3433
3434 /* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
3435 val = enable ? 0x1 : 0x7f;
3436
3437 switch (pipe) {
3438 case 0:
3439 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0);
3440 break;
3441 case 1:
3442 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1);
3443 break;
3444 case 2:
3445 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2);
3446 break;
3447 case 3:
3448 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3);
3449 break;
3450 default:
3451 DRM_DEBUG("invalid pipe %d\n", pipe);
3452 return;
3453 }
3454
3455 amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
3456
3457}
3458static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
3459{
3460 struct amdgpu_device *adev = ring->adev;
3461 uint32_t val;
3462 int i;
3463
3464 /* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
3465 * number of gfx waves. Setting 5 bit will make sure gfx only gets
3466 * around 25% of gpu resources.
3467 */
3468 val = enable ? 0x1f : 0x07ffffff;
3469 amdgpu_ring_emit_wreg(ring,
3470 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX),
3471 val);
3472
3473 /* Restrict waves for normal/low priority compute queues as well
3474 * to get best QoS for high priority compute jobs.
3475 *
3476 * amdgpu controls only 1st ME(0-3 CS pipes).
3477 */
3478 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3479 if (i != ring->pipe)
3480 gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
3481
3482 }
3483}
3484
3485static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
3486 uint32_t pipe, uint32_t queue,
3487 uint32_t xcc_id)
3488{
3489 int i, r;
3490 /* make sure dequeue is complete*/
3491 gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id);
3492 mutex_lock(&adev->srbm_mutex);
3493 soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id));
3494 for (i = 0; i < adev->usec_timeout; i++) {
3495 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
3496 break;
3497 udelay(1);
3498 }
3499 if (i >= adev->usec_timeout)
3500 r = -ETIMEDOUT;
3501 else
3502 r = 0;
3503 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
3504 mutex_unlock(&adev->srbm_mutex);
3505 gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id);
3506
3507 return r;
3508
3509}
3510
3511static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
3512{
3513 /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
3514 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
3515 adev->gfx.mec_fw_version >= 0x0000009b)
3516 return true;
3517 else
3518 dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
3519
3520 return false;
3521}
3522
3523static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
3524{
3525 struct amdgpu_device *adev = ring->adev;
3526 uint32_t reset_pipe, clean_pipe;
3527 int r;
3528
3529 if (!gfx_v9_4_3_pipe_reset_support(adev))
3530 return -EINVAL;
3531
3532 gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id);
3533 mutex_lock(&adev->srbm_mutex);
3534
3535 reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL);
3536 clean_pipe = reset_pipe;
3537
3538 if (ring->me == 1) {
3539 switch (ring->pipe) {
3540 case 0:
3541 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3542 MEC_ME1_PIPE0_RESET, 1);
3543 break;
3544 case 1:
3545 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3546 MEC_ME1_PIPE1_RESET, 1);
3547 break;
3548 case 2:
3549 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3550 MEC_ME1_PIPE2_RESET, 1);
3551 break;
3552 case 3:
3553 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3554 MEC_ME1_PIPE3_RESET, 1);
3555 break;
3556 default:
3557 break;
3558 }
3559 } else {
3560 if (ring->pipe)
3561 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3562 MEC_ME2_PIPE1_RESET, 1);
3563 else
3564 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3565 MEC_ME2_PIPE0_RESET, 1);
3566 }
3567
3568 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe);
3569 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe);
3570 mutex_unlock(&adev->srbm_mutex);
3571 gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id);
3572
3573 r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3574 return r;
3575}
3576
3577static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
3578 unsigned int vmid)
3579{
3580 struct amdgpu_device *adev = ring->adev;
3581 struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
3582 struct amdgpu_ring *kiq_ring = &kiq->ring;
3583 unsigned long flags;
3584 int r;
3585
3586 if (amdgpu_sriov_vf(adev))
3587 return -EINVAL;
3588
3589 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3590 return -EINVAL;
3591
3592 spin_lock_irqsave(&kiq->ring_lock, flags);
3593
3594 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
3595 spin_unlock_irqrestore(&kiq->ring_lock, flags);
3596 return -ENOMEM;
3597 }
3598
3599 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
3600 0, 0);
3601 amdgpu_ring_commit(kiq_ring);
3602
3603 spin_unlock_irqrestore(&kiq->ring_lock, flags);
3604
3605 r = amdgpu_ring_test_ring(kiq_ring);
3606 if (r) {
3607 dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n",
3608 ring->name);
3609 goto pipe_reset;
3610 }
3611
3612 r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3613 if (r)
3614 dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
3615
3616pipe_reset:
3617 if(r) {
3618 r = gfx_v9_4_3_reset_hw_pipe(ring);
3619 dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
3620 r ? "failed" : "successfully");
3621 if (r)
3622 return r;
3623 }
3624
3625 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3626 if (unlikely(r != 0)){
3627 dev_err(adev->dev, "fail to resv mqd_obj\n");
3628 return r;
3629 }
3630 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3631 if (!r) {
3632 r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
3633 amdgpu_bo_kunmap(ring->mqd_obj);
3634 ring->mqd_ptr = NULL;
3635 }
3636 amdgpu_bo_unreserve(ring->mqd_obj);
3637 if (r) {
3638 dev_err(adev->dev, "fail to unresv mqd_obj\n");
3639 return r;
3640 }
3641 spin_lock_irqsave(&kiq->ring_lock, flags);
3642 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
3643 if (r) {
3644 spin_unlock_irqrestore(&kiq->ring_lock, flags);
3645 return -ENOMEM;
3646 }
3647 kiq->pmf->kiq_map_queues(kiq_ring, ring);
3648 amdgpu_ring_commit(kiq_ring);
3649 spin_unlock_irqrestore(&kiq->ring_lock, flags);
3650
3651 r = amdgpu_ring_test_ring(kiq_ring);
3652 if (r) {
3653 dev_err(adev->dev, "fail to remap queue\n");
3654 return r;
3655 }
3656 return amdgpu_ring_test_ring(ring);
3657}
3658
3659enum amdgpu_gfx_cp_ras_mem_id {
3660 AMDGPU_GFX_CP_MEM1 = 1,
3661 AMDGPU_GFX_CP_MEM2,
3662 AMDGPU_GFX_CP_MEM3,
3663 AMDGPU_GFX_CP_MEM4,
3664 AMDGPU_GFX_CP_MEM5,
3665};
3666
3667enum amdgpu_gfx_gcea_ras_mem_id {
3668 AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4,
3669 AMDGPU_GFX_GCEA_IORD_CMDMEM,
3670 AMDGPU_GFX_GCEA_GMIWR_CMDMEM,
3671 AMDGPU_GFX_GCEA_GMIRD_CMDMEM,
3672 AMDGPU_GFX_GCEA_DRAMWR_CMDMEM,
3673 AMDGPU_GFX_GCEA_DRAMRD_CMDMEM,
3674 AMDGPU_GFX_GCEA_MAM_DMEM0,
3675 AMDGPU_GFX_GCEA_MAM_DMEM1,
3676 AMDGPU_GFX_GCEA_MAM_DMEM2,
3677 AMDGPU_GFX_GCEA_MAM_DMEM3,
3678 AMDGPU_GFX_GCEA_MAM_AMEM0,
3679 AMDGPU_GFX_GCEA_MAM_AMEM1,
3680 AMDGPU_GFX_GCEA_MAM_AMEM2,
3681 AMDGPU_GFX_GCEA_MAM_AMEM3,
3682 AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER,
3683 AMDGPU_GFX_GCEA_WRET_TAGMEM,
3684 AMDGPU_GFX_GCEA_RRET_TAGMEM,
3685 AMDGPU_GFX_GCEA_IOWR_DATAMEM,
3686 AMDGPU_GFX_GCEA_GMIWR_DATAMEM,
3687 AMDGPU_GFX_GCEA_DRAM_DATAMEM,
3688};
3689
3690enum amdgpu_gfx_gc_cane_ras_mem_id {
3691 AMDGPU_GFX_GC_CANE_MEM0 = 0,
3692};
3693
3694enum amdgpu_gfx_gcutcl2_ras_mem_id {
3695 AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160,
3696};
3697
3698enum amdgpu_gfx_gds_ras_mem_id {
3699 AMDGPU_GFX_GDS_MEM0 = 0,
3700};
3701
3702enum amdgpu_gfx_lds_ras_mem_id {
3703 AMDGPU_GFX_LDS_BANK0 = 0,
3704 AMDGPU_GFX_LDS_BANK1,
3705 AMDGPU_GFX_LDS_BANK2,
3706 AMDGPU_GFX_LDS_BANK3,
3707 AMDGPU_GFX_LDS_BANK4,
3708 AMDGPU_GFX_LDS_BANK5,
3709 AMDGPU_GFX_LDS_BANK6,
3710 AMDGPU_GFX_LDS_BANK7,
3711 AMDGPU_GFX_LDS_BANK8,
3712 AMDGPU_GFX_LDS_BANK9,
3713 AMDGPU_GFX_LDS_BANK10,
3714 AMDGPU_GFX_LDS_BANK11,
3715 AMDGPU_GFX_LDS_BANK12,
3716 AMDGPU_GFX_LDS_BANK13,
3717 AMDGPU_GFX_LDS_BANK14,
3718 AMDGPU_GFX_LDS_BANK15,
3719 AMDGPU_GFX_LDS_BANK16,
3720 AMDGPU_GFX_LDS_BANK17,
3721 AMDGPU_GFX_LDS_BANK18,
3722 AMDGPU_GFX_LDS_BANK19,
3723 AMDGPU_GFX_LDS_BANK20,
3724 AMDGPU_GFX_LDS_BANK21,
3725 AMDGPU_GFX_LDS_BANK22,
3726 AMDGPU_GFX_LDS_BANK23,
3727 AMDGPU_GFX_LDS_BANK24,
3728 AMDGPU_GFX_LDS_BANK25,
3729 AMDGPU_GFX_LDS_BANK26,
3730 AMDGPU_GFX_LDS_BANK27,
3731 AMDGPU_GFX_LDS_BANK28,
3732 AMDGPU_GFX_LDS_BANK29,
3733 AMDGPU_GFX_LDS_BANK30,
3734 AMDGPU_GFX_LDS_BANK31,
3735 AMDGPU_GFX_LDS_SP_BUFFER_A,
3736 AMDGPU_GFX_LDS_SP_BUFFER_B,
3737};
3738
3739enum amdgpu_gfx_rlc_ras_mem_id {
3740 AMDGPU_GFX_RLC_GPMF32 = 1,
3741 AMDGPU_GFX_RLC_RLCVF32,
3742 AMDGPU_GFX_RLC_SCRATCH,
3743 AMDGPU_GFX_RLC_SRM_ARAM,
3744 AMDGPU_GFX_RLC_SRM_DRAM,
3745 AMDGPU_GFX_RLC_TCTAG,
3746 AMDGPU_GFX_RLC_SPM_SE,
3747 AMDGPU_GFX_RLC_SPM_GRBMT,
3748};
3749
3750enum amdgpu_gfx_sp_ras_mem_id {
3751 AMDGPU_GFX_SP_SIMDID0 = 0,
3752};
3753
3754enum amdgpu_gfx_spi_ras_mem_id {
3755 AMDGPU_GFX_SPI_MEM0 = 0,
3756 AMDGPU_GFX_SPI_MEM1,
3757 AMDGPU_GFX_SPI_MEM2,
3758 AMDGPU_GFX_SPI_MEM3,
3759};
3760
3761enum amdgpu_gfx_sqc_ras_mem_id {
3762 AMDGPU_GFX_SQC_INST_CACHE_A = 100,
3763 AMDGPU_GFX_SQC_INST_CACHE_B = 101,
3764 AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102,
3765 AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103,
3766 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104,
3767 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105,
3768 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106,
3769 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107,
3770 AMDGPU_GFX_SQC_DATA_CACHE_A = 200,
3771 AMDGPU_GFX_SQC_DATA_CACHE_B = 201,
3772 AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202,
3773 AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203,
3774 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204,
3775 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205,
3776 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206,
3777 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207,
3778 AMDGPU_GFX_SQC_DIRTY_BIT_A = 208,
3779 AMDGPU_GFX_SQC_DIRTY_BIT_B = 209,
3780 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210,
3781 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211,
3782 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212,
3783 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213,
3784 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108,
3785};
3786
3787enum amdgpu_gfx_sq_ras_mem_id {
3788 AMDGPU_GFX_SQ_SGPR_MEM0 = 0,
3789 AMDGPU_GFX_SQ_SGPR_MEM1,
3790 AMDGPU_GFX_SQ_SGPR_MEM2,
3791 AMDGPU_GFX_SQ_SGPR_MEM3,
3792};
3793
3794enum amdgpu_gfx_ta_ras_mem_id {
3795 AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1,
3796 AMDGPU_GFX_TA_FS_AFIFO_RAM_HI,
3797 AMDGPU_GFX_TA_FS_CFIFO_RAM,
3798 AMDGPU_GFX_TA_FSX_LFIFO,
3799 AMDGPU_GFX_TA_FS_DFIFO_RAM,
3800};
3801
3802enum amdgpu_gfx_tcc_ras_mem_id {
3803 AMDGPU_GFX_TCC_MEM1 = 1,
3804};
3805
3806enum amdgpu_gfx_tca_ras_mem_id {
3807 AMDGPU_GFX_TCA_MEM1 = 1,
3808};
3809
3810enum amdgpu_gfx_tci_ras_mem_id {
3811 AMDGPU_GFX_TCIW_MEM = 1,
3812};
3813
3814enum amdgpu_gfx_tcp_ras_mem_id {
3815 AMDGPU_GFX_TCP_LFIFO0 = 1,
3816 AMDGPU_GFX_TCP_SET0BANK0_RAM,
3817 AMDGPU_GFX_TCP_SET0BANK1_RAM,
3818 AMDGPU_GFX_TCP_SET0BANK2_RAM,
3819 AMDGPU_GFX_TCP_SET0BANK3_RAM,
3820 AMDGPU_GFX_TCP_SET1BANK0_RAM,
3821 AMDGPU_GFX_TCP_SET1BANK1_RAM,
3822 AMDGPU_GFX_TCP_SET1BANK2_RAM,
3823 AMDGPU_GFX_TCP_SET1BANK3_RAM,
3824 AMDGPU_GFX_TCP_SET2BANK0_RAM,
3825 AMDGPU_GFX_TCP_SET2BANK1_RAM,
3826 AMDGPU_GFX_TCP_SET2BANK2_RAM,
3827 AMDGPU_GFX_TCP_SET2BANK3_RAM,
3828 AMDGPU_GFX_TCP_SET3BANK0_RAM,
3829 AMDGPU_GFX_TCP_SET3BANK1_RAM,
3830 AMDGPU_GFX_TCP_SET3BANK2_RAM,
3831 AMDGPU_GFX_TCP_SET3BANK3_RAM,
3832 AMDGPU_GFX_TCP_VM_FIFO,
3833 AMDGPU_GFX_TCP_DB_TAGRAM0,
3834 AMDGPU_GFX_TCP_DB_TAGRAM1,
3835 AMDGPU_GFX_TCP_DB_TAGRAM2,
3836 AMDGPU_GFX_TCP_DB_TAGRAM3,
3837 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0,
3838 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1,
3839 AMDGPU_GFX_TCP_CMD_FIFO,
3840};
3841
3842enum amdgpu_gfx_td_ras_mem_id {
3843 AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1,
3844 AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM,
3845 AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM,
3846};
3847
3848enum amdgpu_gfx_tcx_ras_mem_id {
3849 AMDGPU_GFX_TCX_FIFOD0 = 0,
3850 AMDGPU_GFX_TCX_FIFOD1,
3851 AMDGPU_GFX_TCX_FIFOD2,
3852 AMDGPU_GFX_TCX_FIFOD3,
3853 AMDGPU_GFX_TCX_FIFOD4,
3854 AMDGPU_GFX_TCX_FIFOD5,
3855 AMDGPU_GFX_TCX_FIFOD6,
3856 AMDGPU_GFX_TCX_FIFOD7,
3857 AMDGPU_GFX_TCX_FIFOB0,
3858 AMDGPU_GFX_TCX_FIFOB1,
3859 AMDGPU_GFX_TCX_FIFOB2,
3860 AMDGPU_GFX_TCX_FIFOB3,
3861 AMDGPU_GFX_TCX_FIFOB4,
3862 AMDGPU_GFX_TCX_FIFOB5,
3863 AMDGPU_GFX_TCX_FIFOB6,
3864 AMDGPU_GFX_TCX_FIFOB7,
3865 AMDGPU_GFX_TCX_FIFOA0,
3866 AMDGPU_GFX_TCX_FIFOA1,
3867 AMDGPU_GFX_TCX_FIFOA2,
3868 AMDGPU_GFX_TCX_FIFOA3,
3869 AMDGPU_GFX_TCX_FIFOA4,
3870 AMDGPU_GFX_TCX_FIFOA5,
3871 AMDGPU_GFX_TCX_FIFOA6,
3872 AMDGPU_GFX_TCX_FIFOA7,
3873 AMDGPU_GFX_TCX_CFIFO0,
3874 AMDGPU_GFX_TCX_CFIFO1,
3875 AMDGPU_GFX_TCX_CFIFO2,
3876 AMDGPU_GFX_TCX_CFIFO3,
3877 AMDGPU_GFX_TCX_CFIFO4,
3878 AMDGPU_GFX_TCX_CFIFO5,
3879 AMDGPU_GFX_TCX_CFIFO6,
3880 AMDGPU_GFX_TCX_CFIFO7,
3881 AMDGPU_GFX_TCX_FIFO_ACKB0,
3882 AMDGPU_GFX_TCX_FIFO_ACKB1,
3883 AMDGPU_GFX_TCX_FIFO_ACKB2,
3884 AMDGPU_GFX_TCX_FIFO_ACKB3,
3885 AMDGPU_GFX_TCX_FIFO_ACKB4,
3886 AMDGPU_GFX_TCX_FIFO_ACKB5,
3887 AMDGPU_GFX_TCX_FIFO_ACKB6,
3888 AMDGPU_GFX_TCX_FIFO_ACKB7,
3889 AMDGPU_GFX_TCX_FIFO_ACKD0,
3890 AMDGPU_GFX_TCX_FIFO_ACKD1,
3891 AMDGPU_GFX_TCX_FIFO_ACKD2,
3892 AMDGPU_GFX_TCX_FIFO_ACKD3,
3893 AMDGPU_GFX_TCX_FIFO_ACKD4,
3894 AMDGPU_GFX_TCX_FIFO_ACKD5,
3895 AMDGPU_GFX_TCX_FIFO_ACKD6,
3896 AMDGPU_GFX_TCX_FIFO_ACKD7,
3897 AMDGPU_GFX_TCX_DST_FIFOA0,
3898 AMDGPU_GFX_TCX_DST_FIFOA1,
3899 AMDGPU_GFX_TCX_DST_FIFOA2,
3900 AMDGPU_GFX_TCX_DST_FIFOA3,
3901 AMDGPU_GFX_TCX_DST_FIFOA4,
3902 AMDGPU_GFX_TCX_DST_FIFOA5,
3903 AMDGPU_GFX_TCX_DST_FIFOA6,
3904 AMDGPU_GFX_TCX_DST_FIFOA7,
3905 AMDGPU_GFX_TCX_DST_FIFOB0,
3906 AMDGPU_GFX_TCX_DST_FIFOB1,
3907 AMDGPU_GFX_TCX_DST_FIFOB2,
3908 AMDGPU_GFX_TCX_DST_FIFOB3,
3909 AMDGPU_GFX_TCX_DST_FIFOB4,
3910 AMDGPU_GFX_TCX_DST_FIFOB5,
3911 AMDGPU_GFX_TCX_DST_FIFOB6,
3912 AMDGPU_GFX_TCX_DST_FIFOB7,
3913 AMDGPU_GFX_TCX_DST_FIFOD0,
3914 AMDGPU_GFX_TCX_DST_FIFOD1,
3915 AMDGPU_GFX_TCX_DST_FIFOD2,
3916 AMDGPU_GFX_TCX_DST_FIFOD3,
3917 AMDGPU_GFX_TCX_DST_FIFOD4,
3918 AMDGPU_GFX_TCX_DST_FIFOD5,
3919 AMDGPU_GFX_TCX_DST_FIFOD6,
3920 AMDGPU_GFX_TCX_DST_FIFOD7,
3921 AMDGPU_GFX_TCX_DST_FIFO_ACKB0,
3922 AMDGPU_GFX_TCX_DST_FIFO_ACKB1,
3923 AMDGPU_GFX_TCX_DST_FIFO_ACKB2,
3924 AMDGPU_GFX_TCX_DST_FIFO_ACKB3,
3925 AMDGPU_GFX_TCX_DST_FIFO_ACKB4,
3926 AMDGPU_GFX_TCX_DST_FIFO_ACKB5,
3927 AMDGPU_GFX_TCX_DST_FIFO_ACKB6,
3928 AMDGPU_GFX_TCX_DST_FIFO_ACKB7,
3929 AMDGPU_GFX_TCX_DST_FIFO_ACKD0,
3930 AMDGPU_GFX_TCX_DST_FIFO_ACKD1,
3931 AMDGPU_GFX_TCX_DST_FIFO_ACKD2,
3932 AMDGPU_GFX_TCX_DST_FIFO_ACKD3,
3933 AMDGPU_GFX_TCX_DST_FIFO_ACKD4,
3934 AMDGPU_GFX_TCX_DST_FIFO_ACKD5,
3935 AMDGPU_GFX_TCX_DST_FIFO_ACKD6,
3936 AMDGPU_GFX_TCX_DST_FIFO_ACKD7,
3937};
3938
3939enum amdgpu_gfx_atc_l2_ras_mem_id {
3940 AMDGPU_GFX_ATC_L2_MEM0 = 0,
3941};
3942
3943enum amdgpu_gfx_utcl2_ras_mem_id {
3944 AMDGPU_GFX_UTCL2_MEM0 = 0,
3945};
3946
3947enum amdgpu_gfx_vml2_ras_mem_id {
3948 AMDGPU_GFX_VML2_MEM0 = 0,
3949};
3950
3951enum amdgpu_gfx_vml2_walker_ras_mem_id {
3952 AMDGPU_GFX_VML2_WALKER_MEM0 = 0,
3953};
3954
3955static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = {
3956 {AMDGPU_GFX_CP_MEM1, "CP_MEM1"},
3957 {AMDGPU_GFX_CP_MEM2, "CP_MEM2"},
3958 {AMDGPU_GFX_CP_MEM3, "CP_MEM3"},
3959 {AMDGPU_GFX_CP_MEM4, "CP_MEM4"},
3960 {AMDGPU_GFX_CP_MEM5, "CP_MEM5"},
3961};
3962
3963static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = {
3964 {AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"},
3965 {AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"},
3966 {AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"},
3967 {AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"},
3968 {AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"},
3969 {AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"},
3970 {AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"},
3971 {AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"},
3972 {AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"},
3973 {AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"},
3974 {AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"},
3975 {AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"},
3976 {AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"},
3977 {AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"},
3978 {AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"},
3979 {AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"},
3980 {AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"},
3981 {AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"},
3982 {AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"},
3983 {AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"},
3984};
3985
3986static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = {
3987 {AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"},
3988};
3989
3990static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = {
3991 {AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"},
3992};
3993
3994static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = {
3995 {AMDGPU_GFX_GDS_MEM0, "GDS_MEM"},
3996};
3997
3998static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = {
3999 {AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"},
4000 {AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"},
4001 {AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"},
4002 {AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"},
4003 {AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"},
4004 {AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"},
4005 {AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"},
4006 {AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"},
4007 {AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"},
4008 {AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"},
4009 {AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"},
4010 {AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"},
4011 {AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"},
4012 {AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"},
4013 {AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"},
4014 {AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"},
4015 {AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"},
4016 {AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"},
4017 {AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"},
4018 {AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"},
4019 {AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"},
4020 {AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"},
4021 {AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"},
4022 {AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"},
4023 {AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"},
4024 {AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"},
4025 {AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"},
4026 {AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"},
4027 {AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"},
4028 {AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"},
4029 {AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"},
4030 {AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"},
4031 {AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"},
4032 {AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"},
4033};
4034
4035static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = {
4036 {AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"},
4037 {AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"},
4038 {AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"},
4039 {AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"},
4040 {AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"},
4041 {AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"},
4042 {AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"},
4043 {AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"},
4044};
4045
4046static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = {
4047 {AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"},
4048};
4049
4050static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = {
4051 {AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"},
4052 {AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"},
4053 {AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"},
4054 {AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"},
4055};
4056
4057static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = {
4058 {AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"},
4059 {AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"},
4060 {AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"},
4061 {AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"},
4062 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"},
4063 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"},
4064 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"},
4065 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"},
4066 {AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"},
4067 {AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"},
4068 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"},
4069 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"},
4070 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"},
4071 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"},
4072 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"},
4073 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"},
4074 {AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"},
4075 {AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"},
4076 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"},
4077 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"},
4078 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"},
4079 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"},
4080 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"},
4081};
4082
4083static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = {
4084 {AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"},
4085 {AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"},
4086 {AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"},
4087 {AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"},
4088};
4089
4090static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = {
4091 {AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"},
4092 {AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"},
4093 {AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"},
4094 {AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"},
4095 {AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"},
4096};
4097
4098static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = {
4099 {AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"},
4100};
4101
4102static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = {
4103 {AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"},
4104};
4105
4106static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = {
4107 {AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"},
4108};
4109
4110static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = {
4111 {AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"},
4112 {AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"},
4113 {AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"},
4114 {AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"},
4115 {AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"},
4116 {AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"},
4117 {AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"},
4118 {AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"},
4119 {AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"},
4120 {AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"},
4121 {AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"},
4122 {AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"},
4123 {AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"},
4124 {AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"},
4125 {AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"},
4126 {AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"},
4127 {AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"},
4128 {AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"},
4129 {AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"},
4130 {AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"},
4131 {AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"},
4132 {AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"},
4133 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"},
4134 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"},
4135 {AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"},
4136};
4137
4138static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = {
4139 {AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"},
4140 {AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"},
4141 {AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"},
4142};
4143
4144static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = {
4145 {AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"},
4146 {AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"},
4147 {AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"},
4148 {AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"},
4149 {AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"},
4150 {AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"},
4151 {AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"},
4152 {AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"},
4153 {AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"},
4154 {AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"},
4155 {AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"},
4156 {AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"},
4157 {AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"},
4158 {AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"},
4159 {AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"},
4160 {AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"},
4161 {AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"},
4162 {AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"},
4163 {AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"},
4164 {AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"},
4165 {AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"},
4166 {AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"},
4167 {AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"},
4168 {AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"},
4169 {AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"},
4170 {AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"},
4171 {AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"},
4172 {AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"},
4173 {AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"},
4174 {AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"},
4175 {AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"},
4176 {AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"},
4177 {AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"},
4178 {AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"},
4179 {AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"},
4180 {AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"},
4181 {AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"},
4182 {AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"},
4183 {AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"},
4184 {AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"},
4185 {AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"},
4186 {AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"},
4187 {AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"},
4188 {AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"},
4189 {AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"},
4190 {AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"},
4191 {AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"},
4192 {AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"},
4193 {AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"},
4194 {AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"},
4195 {AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"},
4196 {AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"},
4197 {AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"},
4198 {AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"},
4199 {AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"},
4200 {AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"},
4201 {AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"},
4202 {AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"},
4203 {AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"},
4204 {AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"},
4205 {AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"},
4206 {AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"},
4207 {AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"},
4208 {AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"},
4209 {AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"},
4210 {AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"},
4211 {AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"},
4212 {AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"},
4213 {AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"},
4214 {AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"},
4215 {AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"},
4216 {AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"},
4217 {AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"},
4218 {AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"},
4219 {AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"},
4220 {AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"},
4221 {AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"},
4222 {AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"},
4223 {AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"},
4224 {AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"},
4225 {AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"},
4226 {AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"},
4227 {AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"},
4228 {AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"},
4229 {AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"},
4230 {AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"},
4231 {AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"},
4232 {AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"},
4233};
4234
4235static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = {
4236 {AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"},
4237};
4238
4239static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = {
4240 {AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"},
4241};
4242
4243static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = {
4244 {AMDGPU_GFX_VML2_MEM, "VML2_MEM"},
4245};
4246
4247static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = {
4248 {AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"},
4249};
4250
4251static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = {
4252 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list)
4253 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list)
4254 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list)
4255 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list)
4256 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list)
4257 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list)
4258 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list)
4259 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list)
4260 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list)
4261 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list)
4262 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list)
4263 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list)
4264 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list)
4265 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list)
4266 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list)
4267 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list)
4268 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list)
4269 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list)
4270 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list)
4271 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list)
4272 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list)
4273 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list)
4274};
4275
4276static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = {
4277 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH),
4278 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4279 AMDGPU_GFX_RLC_MEM, 1},
4280 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI),
4281 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4282 AMDGPU_GFX_CP_MEM, 1},
4283 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI),
4284 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4285 AMDGPU_GFX_CP_MEM, 1},
4286 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI),
4287 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4288 AMDGPU_GFX_CP_MEM, 1},
4289 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI),
4290 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4291 AMDGPU_GFX_GDS_MEM, 1},
4292 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI),
4293 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4294 AMDGPU_GFX_GC_CANE_MEM, 1},
4295 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI),
4296 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4297 AMDGPU_GFX_SPI_MEM, 1},
4298 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI),
4299 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4300 AMDGPU_GFX_SP_MEM, 4},
4301 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI),
4302 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4303 AMDGPU_GFX_SP_MEM, 4},
4304 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI),
4305 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4306 AMDGPU_GFX_SQ_MEM, 4},
4307 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI),
4308 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4309 AMDGPU_GFX_SQC_MEM, 4},
4310 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI),
4311 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4312 AMDGPU_GFX_TCX_MEM, 1},
4313 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI),
4314 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4315 AMDGPU_GFX_TCC_MEM, 1},
4316 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI),
4317 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4318 AMDGPU_GFX_TA_MEM, 4},
4319 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG),
4320 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4321 AMDGPU_GFX_TCI_MEM, 1},
4322 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG),
4323 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4324 AMDGPU_GFX_TCP_MEM, 4},
4325 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI),
4326 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4327 AMDGPU_GFX_TD_MEM, 4},
4328 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI),
4329 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4330 AMDGPU_GFX_GCEA_MEM, 1},
4331 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI),
4332 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4333 AMDGPU_GFX_LDS_MEM, 4},
4334};
4335
4336static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
4337 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH),
4338 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4339 AMDGPU_GFX_RLC_MEM, 1},
4340 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI),
4341 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4342 AMDGPU_GFX_CP_MEM, 1},
4343 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI),
4344 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4345 AMDGPU_GFX_CP_MEM, 1},
4346 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI),
4347 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4348 AMDGPU_GFX_CP_MEM, 1},
4349 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI),
4350 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4351 AMDGPU_GFX_GDS_MEM, 1},
4352 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI),
4353 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4354 AMDGPU_GFX_GC_CANE_MEM, 1},
4355 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI),
4356 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4357 AMDGPU_GFX_SPI_MEM, 1},
4358 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI),
4359 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4360 AMDGPU_GFX_SP_MEM, 4},
4361 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI),
4362 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4363 AMDGPU_GFX_SP_MEM, 4},
4364 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI),
4365 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4366 AMDGPU_GFX_SQ_MEM, 4},
4367 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI),
4368 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4369 AMDGPU_GFX_SQC_MEM, 4},
4370 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI),
4371 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4372 AMDGPU_GFX_TCX_MEM, 1},
4373 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI),
4374 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4375 AMDGPU_GFX_TCC_MEM, 1},
4376 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI),
4377 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4378 AMDGPU_GFX_TA_MEM, 4},
4379 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG),
4380 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4381 AMDGPU_GFX_TCI_MEM, 1},
4382 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG),
4383 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4384 AMDGPU_GFX_TCP_MEM, 4},
4385 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI),
4386 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4387 AMDGPU_GFX_TD_MEM, 4},
4388 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI),
4389 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"},
4390 AMDGPU_GFX_TCA_MEM, 1},
4391 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI),
4392 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4393 AMDGPU_GFX_GCEA_MEM, 1},
4394 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI),
4395 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4396 AMDGPU_GFX_LDS_MEM, 4},
4397};
4398
4399static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
4400 void *ras_error_status, int xcc_id)
4401{
4402 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
4403 unsigned long ce_count = 0, ue_count = 0;
4404 uint32_t i, j, k;
4405
4406 /* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */
4407 struct amdgpu_smuio_mcm_config_info mcm_info = {
4408 .socket_id = adev->smuio.funcs->get_socket_id(adev),
4409 .die_id = xcc_id & 0x01 ? 1 : 0,
4410 };
4411
4412 mutex_lock(&adev->grbm_idx_mutex);
4413
4414 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4415 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4416 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4417 /* no need to select if instance number is 1 */
4418 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4419 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4420 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4421
4422 amdgpu_ras_inst_query_ras_error_count(adev,
4423 &(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4424 1,
4425 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent,
4426 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size,
4427 GET_INST(GC, xcc_id),
4428 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
4429 &ce_count);
4430
4431 amdgpu_ras_inst_query_ras_error_count(adev,
4432 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4433 1,
4434 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4435 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4436 GET_INST(GC, xcc_id),
4437 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4438 &ue_count);
4439 }
4440 }
4441 }
4442
4443 /* handle extra register entries of UE */
4444 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4445 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4446 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4447 /* no need to select if instance number is 1 */
4448 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4449 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4450 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4451
4452 amdgpu_ras_inst_query_ras_error_count(adev,
4453 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4454 1,
4455 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4456 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4457 GET_INST(GC, xcc_id),
4458 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4459 &ue_count);
4460 }
4461 }
4462 }
4463
4464 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4465 xcc_id);
4466 mutex_unlock(&adev->grbm_idx_mutex);
4467
4468 /* the caller should make sure initialize value of
4469 * err_data->ue_count and err_data->ce_count
4470 */
4471 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
4472 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
4473}
4474
4475static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
4476 void *ras_error_status, int xcc_id)
4477{
4478 uint32_t i, j, k;
4479
4480 mutex_lock(&adev->grbm_idx_mutex);
4481
4482 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4483 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4484 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4485 /* no need to select if instance number is 1 */
4486 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4487 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4488 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4489
4490 amdgpu_ras_inst_reset_ras_error_count(adev,
4491 &(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4492 1,
4493 GET_INST(GC, xcc_id));
4494
4495 amdgpu_ras_inst_reset_ras_error_count(adev,
4496 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4497 1,
4498 GET_INST(GC, xcc_id));
4499 }
4500 }
4501 }
4502
4503 /* handle extra register entries of UE */
4504 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4505 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4506 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4507 /* no need to select if instance number is 1 */
4508 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4509 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4510 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4511
4512 amdgpu_ras_inst_reset_ras_error_count(adev,
4513 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4514 1,
4515 GET_INST(GC, xcc_id));
4516 }
4517 }
4518 }
4519
4520 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4521 xcc_id);
4522 mutex_unlock(&adev->grbm_idx_mutex);
4523}
4524
4525static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
4526 void *ras_error_status, int xcc_id)
4527{
4528 uint32_t i;
4529 uint32_t data;
4530
4531 if (amdgpu_sriov_vf(adev))
4532 return;
4533
4534 data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
4535 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
4536 amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
4537
4538 if (amdgpu_watchdog_timer.timeout_fatal_disable &&
4539 (amdgpu_watchdog_timer.period < 1 ||
4540 amdgpu_watchdog_timer.period > 0x23)) {
4541 dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
4542 amdgpu_watchdog_timer.period = 0x23;
4543 }
4544 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
4545 amdgpu_watchdog_timer.period);
4546
4547 mutex_lock(&adev->grbm_idx_mutex);
4548 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4549 gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id);
4550 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data);
4551 }
4552 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4553 xcc_id);
4554 mutex_unlock(&adev->grbm_idx_mutex);
4555}
4556
4557static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
4558 void *ras_error_status)
4559{
4560 amdgpu_gfx_ras_error_func(adev, ras_error_status,
4561 gfx_v9_4_3_inst_query_ras_err_count);
4562}
4563
4564static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
4565{
4566 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
4567}
4568
4569static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
4570{
4571 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
4572}
4573
4574static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
4575{
4576 /* Header itself is a NOP packet */
4577 if (num_nop == 1) {
4578 amdgpu_ring_write(ring, ring->funcs->nop);
4579 return;
4580 }
4581
4582 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
4583 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
4584
4585 /* Header is at index 0, followed by num_nops - 1 NOP packet's */
4586 amdgpu_ring_insert_nop(ring, num_nop - 1);
4587}
4588
4589static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
4590{
4591 struct amdgpu_device *adev = ip_block->adev;
4592 uint32_t i, j, k;
4593 uint32_t xcc_id, xcc_offset, inst_offset;
4594 uint32_t num_xcc, reg, num_inst;
4595 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4596
4597 if (!adev->gfx.ip_dump_core)
4598 return;
4599
4600 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4601 drm_printf(p, "Number of Instances:%d\n", num_xcc);
4602 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4603 xcc_offset = xcc_id * reg_count;
4604 drm_printf(p, "\nInstance id:%d\n", xcc_id);
4605 for (i = 0; i < reg_count; i++)
4606 drm_printf(p, "%-50s \t 0x%08x\n",
4607 gc_reg_list_9_4_3[i].reg_name,
4608 adev->gfx.ip_dump_core[xcc_offset + i]);
4609 }
4610
4611 /* print compute queue registers for all instances */
4612 if (!adev->gfx.ip_dump_compute_queues)
4613 return;
4614
4615 num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4616 adev->gfx.mec.num_queue_per_pipe;
4617
4618 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4619 drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n",
4620 num_xcc,
4621 adev->gfx.mec.num_mec,
4622 adev->gfx.mec.num_pipe_per_mec,
4623 adev->gfx.mec.num_queue_per_pipe);
4624
4625 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4626 xcc_offset = xcc_id * reg_count * num_inst;
4627 inst_offset = 0;
4628 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4629 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4630 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4631 drm_printf(p,
4632 "\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
4633 xcc_id, i, j, k);
4634 for (reg = 0; reg < reg_count; reg++) {
4635 drm_printf(p,
4636 "%-50s \t 0x%08x\n",
4637 gc_cp_reg_list_9_4_3[reg].reg_name,
4638 adev->gfx.ip_dump_compute_queues
4639 [xcc_offset + inst_offset +
4640 reg]);
4641 }
4642 inst_offset += reg_count;
4643 }
4644 }
4645 }
4646 }
4647}
4648
4649static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
4650{
4651 struct amdgpu_device *adev = ip_block->adev;
4652 uint32_t i, j, k;
4653 uint32_t num_xcc, reg, num_inst;
4654 uint32_t xcc_id, xcc_offset, inst_offset;
4655 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4656
4657 if (!adev->gfx.ip_dump_core)
4658 return;
4659
4660 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4661
4662 amdgpu_gfx_off_ctrl(adev, false);
4663 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4664 xcc_offset = xcc_id * reg_count;
4665 for (i = 0; i < reg_count; i++)
4666 adev->gfx.ip_dump_core[xcc_offset + i] =
4667 RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
4668 GET_INST(GC, xcc_id)));
4669 }
4670 amdgpu_gfx_off_ctrl(adev, true);
4671
4672 /* dump compute queue registers for all instances */
4673 if (!adev->gfx.ip_dump_compute_queues)
4674 return;
4675
4676 num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4677 adev->gfx.mec.num_queue_per_pipe;
4678 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4679 amdgpu_gfx_off_ctrl(adev, false);
4680 mutex_lock(&adev->srbm_mutex);
4681 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4682 xcc_offset = xcc_id * reg_count * num_inst;
4683 inst_offset = 0;
4684 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4685 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4686 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4687 /* ME0 is for GFX so start from 1 for CP */
4688 soc15_grbm_select(adev, 1 + i, j, k, 0,
4689 GET_INST(GC, xcc_id));
4690
4691 for (reg = 0; reg < reg_count; reg++) {
4692 adev->gfx.ip_dump_compute_queues
4693 [xcc_offset +
4694 inst_offset + reg] =
4695 RREG32(SOC15_REG_ENTRY_OFFSET_INST(
4696 gc_cp_reg_list_9_4_3[reg],
4697 GET_INST(GC, xcc_id)));
4698 }
4699 inst_offset += reg_count;
4700 }
4701 }
4702 }
4703 }
4704 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
4705 mutex_unlock(&adev->srbm_mutex);
4706 amdgpu_gfx_off_ctrl(adev, true);
4707}
4708
4709static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
4710{
4711 /* Emit the cleaner shader */
4712 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
4713 amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
4714}
4715
4716static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
4717 .name = "gfx_v9_4_3",
4718 .early_init = gfx_v9_4_3_early_init,
4719 .late_init = gfx_v9_4_3_late_init,
4720 .sw_init = gfx_v9_4_3_sw_init,
4721 .sw_fini = gfx_v9_4_3_sw_fini,
4722 .hw_init = gfx_v9_4_3_hw_init,
4723 .hw_fini = gfx_v9_4_3_hw_fini,
4724 .suspend = gfx_v9_4_3_suspend,
4725 .resume = gfx_v9_4_3_resume,
4726 .is_idle = gfx_v9_4_3_is_idle,
4727 .wait_for_idle = gfx_v9_4_3_wait_for_idle,
4728 .soft_reset = gfx_v9_4_3_soft_reset,
4729 .set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
4730 .set_powergating_state = gfx_v9_4_3_set_powergating_state,
4731 .get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
4732 .dump_ip_state = gfx_v9_4_3_ip_dump,
4733 .print_ip_state = gfx_v9_4_3_ip_print,
4734};
4735
4736static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
4737 .type = AMDGPU_RING_TYPE_COMPUTE,
4738 .align_mask = 0xff,
4739 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4740 .support_64bit_ptrs = true,
4741 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4742 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4743 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4744 .emit_frame_size =
4745 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4746 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4747 5 + /* hdp invalidate */
4748 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4749 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4750 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4751 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4752 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
4753 7 + /* gfx_v9_4_3_emit_mem_sync */
4754 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
4755 15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
4756 2, /* gfx_v9_4_3_ring_emit_cleaner_shader */
4757 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
4758 .emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
4759 .emit_fence = gfx_v9_4_3_ring_emit_fence,
4760 .emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
4761 .emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
4762 .emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
4763 .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
4764 .test_ring = gfx_v9_4_3_ring_test_ring,
4765 .test_ib = gfx_v9_4_3_ring_test_ib,
4766 .insert_nop = gfx_v9_4_3_ring_insert_nop,
4767 .pad_ib = amdgpu_ring_generic_pad_ib,
4768 .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4769 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4770 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4771 .soft_recovery = gfx_v9_4_3_ring_soft_recovery,
4772 .emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
4773 .emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
4774 .reset = gfx_v9_4_3_reset_kcq,
4775 .emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader,
4776 .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
4777 .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
4778};
4779
4780static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
4781 .type = AMDGPU_RING_TYPE_KIQ,
4782 .align_mask = 0xff,
4783 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4784 .support_64bit_ptrs = true,
4785 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4786 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4787 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4788 .emit_frame_size =
4789 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4790 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4791 5 + /* hdp invalidate */
4792 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4793 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4794 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4795 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4796 8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
4797 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
4798 .emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
4799 .test_ring = gfx_v9_4_3_ring_test_ring,
4800 .insert_nop = amdgpu_ring_insert_nop,
4801 .pad_ib = amdgpu_ring_generic_pad_ib,
4802 .emit_rreg = gfx_v9_4_3_ring_emit_rreg,
4803 .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4804 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4805 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4806};
4807
4808static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
4809{
4810 int i, j, num_xcc;
4811
4812 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4813 for (i = 0; i < num_xcc; i++) {
4814 adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
4815
4816 for (j = 0; j < adev->gfx.num_compute_rings; j++)
4817 adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
4818 = &gfx_v9_4_3_ring_funcs_compute;
4819 }
4820}
4821
4822static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
4823 .set = gfx_v9_4_3_set_eop_interrupt_state,
4824 .process = gfx_v9_4_3_eop_irq,
4825};
4826
4827static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
4828 .set = gfx_v9_4_3_set_priv_reg_fault_state,
4829 .process = gfx_v9_4_3_priv_reg_irq,
4830};
4831
4832static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = {
4833 .set = gfx_v9_4_3_set_bad_op_fault_state,
4834 .process = gfx_v9_4_3_bad_op_irq,
4835};
4836
4837static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
4838 .set = gfx_v9_4_3_set_priv_inst_fault_state,
4839 .process = gfx_v9_4_3_priv_inst_irq,
4840};
4841
4842static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
4843{
4844 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4845 adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
4846
4847 adev->gfx.priv_reg_irq.num_types = 1;
4848 adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
4849
4850 adev->gfx.bad_op_irq.num_types = 1;
4851 adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs;
4852
4853 adev->gfx.priv_inst_irq.num_types = 1;
4854 adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
4855}
4856
4857static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
4858{
4859 adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
4860}
4861
4862
4863static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
4864{
4865 /* init asci gds info */
4866 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4867 case IP_VERSION(9, 4, 3):
4868 case IP_VERSION(9, 4, 4):
4869 /* 9.4.3 removed all the GDS internal memory,
4870 * only support GWS opcode in kernel, like barrier
4871 * semaphore.etc */
4872 adev->gds.gds_size = 0;
4873 break;
4874 default:
4875 adev->gds.gds_size = 0x10000;
4876 break;
4877 }
4878
4879 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4880 case IP_VERSION(9, 4, 3):
4881 case IP_VERSION(9, 4, 4):
4882 /* deprecated for 9.4.3, no usage at all */
4883 adev->gds.gds_compute_max_wave_id = 0;
4884 break;
4885 default:
4886 /* this really depends on the chip */
4887 adev->gds.gds_compute_max_wave_id = 0x7ff;
4888 break;
4889 }
4890
4891 adev->gds.gws_size = 64;
4892 adev->gds.oa_size = 16;
4893}
4894
4895static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4896 u32 bitmap, int xcc_id)
4897{
4898 u32 data;
4899
4900 if (!bitmap)
4901 return;
4902
4903 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4904 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4905
4906 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
4907}
4908
4909static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
4910{
4911 u32 data, mask;
4912
4913 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
4914 data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
4915
4916 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4917 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4918
4919 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4920
4921 return (~data) & mask;
4922}
4923
4924static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
4925 struct amdgpu_cu_info *cu_info)
4926{
4927 int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
4928 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
4929 unsigned disable_masks[4 * 4];
4930 bool is_symmetric_cus;
4931
4932 if (!adev || !cu_info)
4933 return -EINVAL;
4934
4935 /*
4936 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
4937 */
4938 if (adev->gfx.config.max_shader_engines *
4939 adev->gfx.config.max_sh_per_se > 16)
4940 return -EINVAL;
4941
4942 amdgpu_gfx_parse_disable_cu(disable_masks,
4943 adev->gfx.config.max_shader_engines,
4944 adev->gfx.config.max_sh_per_se);
4945
4946 mutex_lock(&adev->grbm_idx_mutex);
4947 for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
4948 is_symmetric_cus = true;
4949 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4950 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4951 mask = 1;
4952 ao_bitmap = 0;
4953 counter = 0;
4954 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
4955 gfx_v9_4_3_set_user_cu_inactive_bitmap(
4956 adev,
4957 disable_masks[i * adev->gfx.config.max_sh_per_se + j],
4958 xcc_id);
4959 bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
4960
4961 cu_info->bitmap[xcc_id][i][j] = bitmap;
4962
4963 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4964 if (bitmap & mask) {
4965 if (counter < adev->gfx.config.max_cu_per_sh)
4966 ao_bitmap |= mask;
4967 counter++;
4968 }
4969 mask <<= 1;
4970 }
4971 active_cu_number += counter;
4972 if (i < 2 && j < 2)
4973 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4974 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4975 }
4976 if (i && is_symmetric_cus && prev_counter != counter)
4977 is_symmetric_cus = false;
4978 prev_counter = counter;
4979 }
4980 if (is_symmetric_cus) {
4981 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
4982 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
4983 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
4984 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
4985 }
4986 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4987 xcc_id);
4988 }
4989 mutex_unlock(&adev->grbm_idx_mutex);
4990
4991 cu_info->number = active_cu_number;
4992 cu_info->ao_cu_mask = ao_cu_mask;
4993 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4994
4995 return 0;
4996}
4997
4998const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
4999 .type = AMD_IP_BLOCK_TYPE_GFX,
5000 .major = 9,
5001 .minor = 4,
5002 .rev = 3,
5003 .funcs = &gfx_v9_4_3_ip_funcs,
5004};
5005
5006static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask)
5007{
5008 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5009 uint32_t tmp_mask;
5010 int i, r;
5011
5012 /* TODO : Initialize golden regs */
5013 /* gfx_v9_4_3_init_golden_registers(adev); */
5014
5015 tmp_mask = inst_mask;
5016 for_each_inst(i, tmp_mask)
5017 gfx_v9_4_3_xcc_constants_init(adev, i);
5018
5019 if (!amdgpu_sriov_vf(adev)) {
5020 tmp_mask = inst_mask;
5021 for_each_inst(i, tmp_mask) {
5022 r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
5023 if (r)
5024 return r;
5025 }
5026 }
5027
5028 tmp_mask = inst_mask;
5029 for_each_inst(i, tmp_mask) {
5030 r = gfx_v9_4_3_xcc_cp_resume(adev, i);
5031 if (r)
5032 return r;
5033 }
5034
5035 return 0;
5036}
5037
5038static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask)
5039{
5040 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5041 int i;
5042
5043 for_each_inst(i, inst_mask)
5044 gfx_v9_4_3_xcc_fini(adev, i);
5045
5046 return 0;
5047}
5048
5049struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
5050 .suspend = &gfx_v9_4_3_xcp_suspend,
5051 .resume = &gfx_v9_4_3_xcp_resume
5052};
5053
5054struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = {
5055 .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
5056 .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
5057};
5058
5059static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
5060{
5061 int r;
5062
5063 r = amdgpu_ras_block_late_init(adev, ras_block);
5064 if (r)
5065 return r;
5066
5067 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX,
5068 &gfx_v9_4_3_aca_info,
5069 NULL);
5070 if (r)
5071 goto late_fini;
5072
5073 return 0;
5074
5075late_fini:
5076 amdgpu_ras_block_late_fini(adev, ras_block);
5077
5078 return r;
5079}
5080
5081struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
5082 .ras_block = {
5083 .hw_ops = &gfx_v9_4_3_ras_ops,
5084 .ras_late_init = &gfx_v9_4_3_ras_late_init,
5085 },
5086 .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
5087};
1/*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24
25#include "amdgpu.h"
26#include "amdgpu_gfx.h"
27#include "soc15.h"
28#include "soc15d.h"
29#include "soc15_common.h"
30#include "vega10_enum.h"
31
32#include "v9_structs.h"
33
34#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
35
36#include "gc/gc_9_4_3_offset.h"
37#include "gc/gc_9_4_3_sh_mask.h"
38
39#include "gfx_v9_4_3.h"
40#include "amdgpu_xcp.h"
41#include "amdgpu_aca.h"
42
43MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
44MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
45
46#define GFX9_MEC_HPD_SIZE 4096
47#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
48
49#define GOLDEN_GB_ADDR_CONFIG 0x2a114042
50#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
51
52#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
53#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
54#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
55
56struct amdgpu_gfx_ras gfx_v9_4_3_ras;
57
58static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
59static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
60static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
61static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
62static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
63 struct amdgpu_cu_info *cu_info);
64
65static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
66 uint64_t queue_mask)
67{
68 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
69 amdgpu_ring_write(kiq_ring,
70 PACKET3_SET_RESOURCES_VMID_MASK(0) |
71 /* vmid_mask:0* queue_type:0 (KIQ) */
72 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
73 amdgpu_ring_write(kiq_ring,
74 lower_32_bits(queue_mask)); /* queue mask lo */
75 amdgpu_ring_write(kiq_ring,
76 upper_32_bits(queue_mask)); /* queue mask hi */
77 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
78 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
79 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
80 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
81}
82
83static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
84 struct amdgpu_ring *ring)
85{
86 struct amdgpu_device *adev = kiq_ring->adev;
87 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
88 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
89 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
90
91 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
92 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
93 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
94 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
95 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
96 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
97 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
98 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
99 /*queue_type: normal compute queue */
100 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
101 /* alloc format: all_on_one_pipe */
102 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
103 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
104 /* num_queues: must be 1 */
105 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
106 amdgpu_ring_write(kiq_ring,
107 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
108 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
109 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
110 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
111 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
112}
113
114static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
115 struct amdgpu_ring *ring,
116 enum amdgpu_unmap_queues_action action,
117 u64 gpu_addr, u64 seq)
118{
119 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
120
121 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
122 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
123 PACKET3_UNMAP_QUEUES_ACTION(action) |
124 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
125 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
126 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
127 amdgpu_ring_write(kiq_ring,
128 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
129
130 if (action == PREEMPT_QUEUES_NO_UNMAP) {
131 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
132 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
133 amdgpu_ring_write(kiq_ring, seq);
134 } else {
135 amdgpu_ring_write(kiq_ring, 0);
136 amdgpu_ring_write(kiq_ring, 0);
137 amdgpu_ring_write(kiq_ring, 0);
138 }
139}
140
141static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
142 struct amdgpu_ring *ring,
143 u64 addr,
144 u64 seq)
145{
146 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
147
148 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
149 amdgpu_ring_write(kiq_ring,
150 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
151 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
152 PACKET3_QUERY_STATUS_COMMAND(2));
153 /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
154 amdgpu_ring_write(kiq_ring,
155 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
156 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
157 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
158 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
159 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
160 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
161}
162
163static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
164 uint16_t pasid, uint32_t flush_type,
165 bool all_hub)
166{
167 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
168 amdgpu_ring_write(kiq_ring,
169 PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
170 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
171 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
172 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
173}
174
175static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
176 .kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
177 .kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
178 .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
179 .kiq_query_status = gfx_v9_4_3_kiq_query_status,
180 .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
181 .set_resources_size = 8,
182 .map_queues_size = 7,
183 .unmap_queues_size = 6,
184 .query_status_size = 7,
185 .invalidate_tlbs_size = 2,
186};
187
188static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
189{
190 int i, num_xcc;
191
192 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
193 for (i = 0; i < num_xcc; i++)
194 adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
195}
196
197static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
198{
199 int i, num_xcc, dev_inst;
200
201 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
202 for (i = 0; i < num_xcc; i++) {
203 dev_inst = GET_INST(GC, i);
204
205 WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
206 GOLDEN_GB_ADDR_CONFIG);
207 /* Golden settings applied by driver for ASIC with rev_id 0 */
208 if (adev->rev_id == 0) {
209 WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
210 REDUCE_FIFO_DEPTH_BY_2, 2);
211 } else {
212 WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
213 SPARE, 0x1);
214 }
215 }
216}
217
218static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
219 bool wc, uint32_t reg, uint32_t val)
220{
221 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
222 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
223 WRITE_DATA_DST_SEL(0) |
224 (wc ? WR_CONFIRM : 0));
225 amdgpu_ring_write(ring, reg);
226 amdgpu_ring_write(ring, 0);
227 amdgpu_ring_write(ring, val);
228}
229
230static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
231 int mem_space, int opt, uint32_t addr0,
232 uint32_t addr1, uint32_t ref, uint32_t mask,
233 uint32_t inv)
234{
235 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
236 amdgpu_ring_write(ring,
237 /* memory (1) or register (0) */
238 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
239 WAIT_REG_MEM_OPERATION(opt) | /* wait */
240 WAIT_REG_MEM_FUNCTION(3) | /* equal */
241 WAIT_REG_MEM_ENGINE(eng_sel)));
242
243 if (mem_space)
244 BUG_ON(addr0 & 0x3); /* Dword align */
245 amdgpu_ring_write(ring, addr0);
246 amdgpu_ring_write(ring, addr1);
247 amdgpu_ring_write(ring, ref);
248 amdgpu_ring_write(ring, mask);
249 amdgpu_ring_write(ring, inv); /* poll interval */
250}
251
252static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
253{
254 uint32_t scratch_reg0_offset, xcc_offset;
255 struct amdgpu_device *adev = ring->adev;
256 uint32_t tmp = 0;
257 unsigned i;
258 int r;
259
260 /* Use register offset which is local to XCC in the packet */
261 xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
262 scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
263 WREG32(scratch_reg0_offset, 0xCAFEDEAD);
264 tmp = RREG32(scratch_reg0_offset);
265
266 r = amdgpu_ring_alloc(ring, 3);
267 if (r)
268 return r;
269
270 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
271 amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
272 amdgpu_ring_write(ring, 0xDEADBEEF);
273 amdgpu_ring_commit(ring);
274
275 for (i = 0; i < adev->usec_timeout; i++) {
276 tmp = RREG32(scratch_reg0_offset);
277 if (tmp == 0xDEADBEEF)
278 break;
279 udelay(1);
280 }
281
282 if (i >= adev->usec_timeout)
283 r = -ETIMEDOUT;
284 return r;
285}
286
287static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
288{
289 struct amdgpu_device *adev = ring->adev;
290 struct amdgpu_ib ib;
291 struct dma_fence *f = NULL;
292
293 unsigned index;
294 uint64_t gpu_addr;
295 uint32_t tmp;
296 long r;
297
298 r = amdgpu_device_wb_get(adev, &index);
299 if (r)
300 return r;
301
302 gpu_addr = adev->wb.gpu_addr + (index * 4);
303 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
304 memset(&ib, 0, sizeof(ib));
305
306 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
307 if (r)
308 goto err1;
309
310 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
311 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
312 ib.ptr[2] = lower_32_bits(gpu_addr);
313 ib.ptr[3] = upper_32_bits(gpu_addr);
314 ib.ptr[4] = 0xDEADBEEF;
315 ib.length_dw = 5;
316
317 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
318 if (r)
319 goto err2;
320
321 r = dma_fence_wait_timeout(f, false, timeout);
322 if (r == 0) {
323 r = -ETIMEDOUT;
324 goto err2;
325 } else if (r < 0) {
326 goto err2;
327 }
328
329 tmp = adev->wb.wb[index];
330 if (tmp == 0xDEADBEEF)
331 r = 0;
332 else
333 r = -EINVAL;
334
335err2:
336 amdgpu_ib_free(adev, &ib, NULL);
337 dma_fence_put(f);
338err1:
339 amdgpu_device_wb_free(adev, index);
340 return r;
341}
342
343
344/* This value might differs per partition */
345static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
346{
347 uint64_t clock;
348
349 mutex_lock(&adev->gfx.gpu_clock_mutex);
350 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
351 clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
352 ((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
353 mutex_unlock(&adev->gfx.gpu_clock_mutex);
354
355 return clock;
356}
357
358static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
359{
360 amdgpu_ucode_release(&adev->gfx.pfp_fw);
361 amdgpu_ucode_release(&adev->gfx.me_fw);
362 amdgpu_ucode_release(&adev->gfx.ce_fw);
363 amdgpu_ucode_release(&adev->gfx.rlc_fw);
364 amdgpu_ucode_release(&adev->gfx.mec_fw);
365 amdgpu_ucode_release(&adev->gfx.mec2_fw);
366
367 kfree(adev->gfx.rlc.register_list_format);
368}
369
370static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
371 const char *chip_name)
372{
373 char fw_name[30];
374 int err;
375 const struct rlc_firmware_header_v2_0 *rlc_hdr;
376 uint16_t version_major;
377 uint16_t version_minor;
378
379 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
380
381 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
382 if (err)
383 goto out;
384 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
385
386 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
387 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
388 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
389out:
390 if (err)
391 amdgpu_ucode_release(&adev->gfx.rlc_fw);
392
393 return err;
394}
395
396static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
397{
398 return true;
399}
400
401static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
402{
403 if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
404 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
405}
406
407static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
408 const char *chip_name)
409{
410 char fw_name[30];
411 int err;
412
413 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
414
415 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
416 if (err)
417 goto out;
418 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
419 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
420
421 adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
422 adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
423
424 gfx_v9_4_3_check_if_need_gfxoff(adev);
425
426out:
427 if (err)
428 amdgpu_ucode_release(&adev->gfx.mec_fw);
429 return err;
430}
431
432static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
433{
434 char ucode_prefix[15];
435 int r;
436
437 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
438
439 r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
440 if (r)
441 return r;
442
443 r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
444 if (r)
445 return r;
446
447 return r;
448}
449
450static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
451{
452 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
453 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
454}
455
456static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
457{
458 int r, i, num_xcc;
459 u32 *hpd;
460 const __le32 *fw_data;
461 unsigned fw_size;
462 u32 *fw;
463 size_t mec_hpd_size;
464
465 const struct gfx_firmware_header_v1_0 *mec_hdr;
466
467 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
468 for (i = 0; i < num_xcc; i++)
469 bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
470 AMDGPU_MAX_COMPUTE_QUEUES);
471
472 /* take ownership of the relevant compute queues */
473 amdgpu_gfx_compute_queue_acquire(adev);
474 mec_hpd_size =
475 adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
476 if (mec_hpd_size) {
477 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
478 AMDGPU_GEM_DOMAIN_VRAM |
479 AMDGPU_GEM_DOMAIN_GTT,
480 &adev->gfx.mec.hpd_eop_obj,
481 &adev->gfx.mec.hpd_eop_gpu_addr,
482 (void **)&hpd);
483 if (r) {
484 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
485 gfx_v9_4_3_mec_fini(adev);
486 return r;
487 }
488
489 if (amdgpu_emu_mode == 1) {
490 for (i = 0; i < mec_hpd_size / 4; i++) {
491 memset((void *)(hpd + i), 0, 4);
492 if (i % 50 == 0)
493 msleep(1);
494 }
495 } else {
496 memset(hpd, 0, mec_hpd_size);
497 }
498
499 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
500 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
501 }
502
503 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
504
505 fw_data = (const __le32 *)
506 (adev->gfx.mec_fw->data +
507 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
508 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
509
510 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
511 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
512 &adev->gfx.mec.mec_fw_obj,
513 &adev->gfx.mec.mec_fw_gpu_addr,
514 (void **)&fw);
515 if (r) {
516 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
517 gfx_v9_4_3_mec_fini(adev);
518 return r;
519 }
520
521 memcpy(fw, fw_data, fw_size);
522
523 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
524 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
525
526 return 0;
527}
528
529static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
530 u32 sh_num, u32 instance, int xcc_id)
531{
532 u32 data;
533
534 if (instance == 0xffffffff)
535 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
536 INSTANCE_BROADCAST_WRITES, 1);
537 else
538 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
539 INSTANCE_INDEX, instance);
540
541 if (se_num == 0xffffffff)
542 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
543 SE_BROADCAST_WRITES, 1);
544 else
545 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
546
547 if (sh_num == 0xffffffff)
548 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
549 SH_BROADCAST_WRITES, 1);
550 else
551 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
552
553 WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
554}
555
556static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
557{
558 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
559 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
560 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
561 (address << SQ_IND_INDEX__INDEX__SHIFT) |
562 (SQ_IND_INDEX__FORCE_READ_MASK));
563 return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
564}
565
566static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
567 uint32_t wave, uint32_t thread,
568 uint32_t regno, uint32_t num, uint32_t *out)
569{
570 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
571 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
572 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
573 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
574 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
575 (SQ_IND_INDEX__FORCE_READ_MASK) |
576 (SQ_IND_INDEX__AUTO_INCR_MASK));
577 while (num--)
578 *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
579}
580
581static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
582 uint32_t xcc_id, uint32_t simd, uint32_t wave,
583 uint32_t *dst, int *no_fields)
584{
585 /* type 1 wave data */
586 dst[(*no_fields)++] = 1;
587 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
588 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
589 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
590 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
591 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
592 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
593 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
594 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
595 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
596 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
597 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
598 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
599 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
600 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
601 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
602}
603
604static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
605 uint32_t wave, uint32_t start,
606 uint32_t size, uint32_t *dst)
607{
608 wave_read_regs(adev, xcc_id, simd, wave, 0,
609 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
610}
611
612static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
613 uint32_t wave, uint32_t thread,
614 uint32_t start, uint32_t size,
615 uint32_t *dst)
616{
617 wave_read_regs(adev, xcc_id, simd, wave, thread,
618 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
619}
620
621static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
622 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
623{
624 soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
625}
626
627
628static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
629 int num_xccs_per_xcp)
630{
631 int ret, i, num_xcc;
632 u32 tmp = 0;
633
634 if (adev->psp.funcs) {
635 ret = psp_spatial_partition(&adev->psp,
636 NUM_XCC(adev->gfx.xcc_mask) /
637 num_xccs_per_xcp);
638 if (ret)
639 return ret;
640 } else {
641 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
642
643 for (i = 0; i < num_xcc; i++) {
644 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
645 num_xccs_per_xcp);
646 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
647 i % num_xccs_per_xcp);
648 WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
649 tmp);
650 }
651 ret = 0;
652 }
653
654 adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
655
656 return ret;
657}
658
659static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
660{
661 int xcc;
662
663 xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
664 if (!xcc) {
665 dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
666 return -EINVAL;
667 }
668
669 return xcc - 1;
670}
671
672static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
673 .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
674 .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
675 .read_wave_data = &gfx_v9_4_3_read_wave_data,
676 .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
677 .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
678 .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
679 .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
680 .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
681};
682
683static int gfx_v9_4_3_aca_bank_generate_report(struct aca_handle *handle,
684 struct aca_bank *bank, enum aca_error_type type,
685 struct aca_bank_report *report, void *data)
686{
687 u64 status, misc0;
688 u32 instlo;
689 int ret;
690
691 status = bank->regs[ACA_REG_IDX_STATUS];
692 if ((type == ACA_ERROR_TYPE_UE &&
693 ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
694 (type == ACA_ERROR_TYPE_CE &&
695 ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
696
697 ret = aca_bank_info_decode(bank, &report->info);
698 if (ret)
699 return ret;
700
701 /* NOTE: overwrite info.die_id with xcd id for gfx */
702 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
703 instlo &= GENMASK(31, 1);
704 report->info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
705
706 misc0 = bank->regs[ACA_REG_IDX_MISC0];
707 report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
708 }
709
710 return 0;
711}
712
713static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
714 enum aca_error_type type, void *data)
715{
716 u32 instlo;
717
718 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
719 instlo &= GENMASK(31, 1);
720 switch (instlo) {
721 case mmSMNAID_XCD0_MCA_SMU:
722 case mmSMNAID_XCD1_MCA_SMU:
723 case mmSMNXCD_XCD0_MCA_SMU:
724 return true;
725 default:
726 break;
727 }
728
729 return false;
730}
731
732static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
733 .aca_bank_generate_report = gfx_v9_4_3_aca_bank_generate_report,
734 .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
735};
736
737static const struct aca_info gfx_v9_4_3_aca_info = {
738 .hwip = ACA_HWIP_TYPE_SMU,
739 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
740 .bank_ops = &gfx_v9_4_3_aca_bank_ops,
741};
742
743static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
744{
745 u32 gb_addr_config;
746
747 adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
748 adev->gfx.ras = &gfx_v9_4_3_ras;
749
750 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
751 case IP_VERSION(9, 4, 3):
752 adev->gfx.config.max_hw_contexts = 8;
753 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
754 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
755 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
756 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
757 gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG);
758 break;
759 default:
760 BUG();
761 break;
762 }
763
764 adev->gfx.config.gb_addr_config = gb_addr_config;
765
766 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
767 REG_GET_FIELD(
768 adev->gfx.config.gb_addr_config,
769 GB_ADDR_CONFIG,
770 NUM_PIPES);
771
772 adev->gfx.config.max_tile_pipes =
773 adev->gfx.config.gb_addr_config_fields.num_pipes;
774
775 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
776 REG_GET_FIELD(
777 adev->gfx.config.gb_addr_config,
778 GB_ADDR_CONFIG,
779 NUM_BANKS);
780 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
781 REG_GET_FIELD(
782 adev->gfx.config.gb_addr_config,
783 GB_ADDR_CONFIG,
784 MAX_COMPRESSED_FRAGS);
785 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
786 REG_GET_FIELD(
787 adev->gfx.config.gb_addr_config,
788 GB_ADDR_CONFIG,
789 NUM_RB_PER_SE);
790 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
791 REG_GET_FIELD(
792 adev->gfx.config.gb_addr_config,
793 GB_ADDR_CONFIG,
794 NUM_SHADER_ENGINES);
795 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
796 REG_GET_FIELD(
797 adev->gfx.config.gb_addr_config,
798 GB_ADDR_CONFIG,
799 PIPE_INTERLEAVE_SIZE));
800
801 return 0;
802}
803
804static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
805 int xcc_id, int mec, int pipe, int queue)
806{
807 unsigned irq_type;
808 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
809 unsigned int hw_prio;
810 uint32_t xcc_doorbell_start;
811
812 ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
813 ring_id];
814
815 /* mec0 is me1 */
816 ring->xcc_id = xcc_id;
817 ring->me = mec + 1;
818 ring->pipe = pipe;
819 ring->queue = queue;
820
821 ring->ring_obj = NULL;
822 ring->use_doorbell = true;
823 xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
824 xcc_id * adev->doorbell_index.xcc_doorbell_range;
825 ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
826 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
827 (ring_id + xcc_id * adev->gfx.num_compute_rings) *
828 GFX9_MEC_HPD_SIZE;
829 ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
830 sprintf(ring->name, "comp_%d.%d.%d.%d",
831 ring->xcc_id, ring->me, ring->pipe, ring->queue);
832
833 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
834 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
835 + ring->pipe;
836 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
837 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
838 /* type-2 packets are deprecated on MEC, use type-3 instead */
839 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
840 hw_prio, NULL);
841}
842
843static int gfx_v9_4_3_sw_init(void *handle)
844{
845 int i, j, k, r, ring_id, xcc_id, num_xcc;
846 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
847
848 adev->gfx.mec.num_mec = 2;
849 adev->gfx.mec.num_pipe_per_mec = 4;
850 adev->gfx.mec.num_queue_per_pipe = 8;
851
852 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
853
854 /* EOP Event */
855 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
856 if (r)
857 return r;
858
859 /* Privileged reg */
860 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
861 &adev->gfx.priv_reg_irq);
862 if (r)
863 return r;
864
865 /* Privileged inst */
866 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
867 &adev->gfx.priv_inst_irq);
868 if (r)
869 return r;
870
871 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
872
873 r = adev->gfx.rlc.funcs->init(adev);
874 if (r) {
875 DRM_ERROR("Failed to init rlc BOs!\n");
876 return r;
877 }
878
879 r = gfx_v9_4_3_mec_init(adev);
880 if (r) {
881 DRM_ERROR("Failed to init MEC BOs!\n");
882 return r;
883 }
884
885 /* set up the compute queues - allocate horizontally across pipes */
886 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
887 ring_id = 0;
888 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
889 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
890 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
891 k++) {
892 if (!amdgpu_gfx_is_mec_queue_enabled(
893 adev, xcc_id, i, k, j))
894 continue;
895
896 r = gfx_v9_4_3_compute_ring_init(adev,
897 ring_id,
898 xcc_id,
899 i, k, j);
900 if (r)
901 return r;
902
903 ring_id++;
904 }
905 }
906 }
907
908 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
909 if (r) {
910 DRM_ERROR("Failed to init KIQ BOs!\n");
911 return r;
912 }
913
914 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
915 if (r)
916 return r;
917
918 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
919 r = amdgpu_gfx_mqd_sw_init(adev,
920 sizeof(struct v9_mqd_allocation), xcc_id);
921 if (r)
922 return r;
923 }
924
925 r = gfx_v9_4_3_gpu_early_init(adev);
926 if (r)
927 return r;
928
929 r = amdgpu_gfx_ras_sw_init(adev);
930 if (r)
931 return r;
932
933
934 if (!amdgpu_sriov_vf(adev))
935 r = amdgpu_gfx_sysfs_init(adev);
936
937 return r;
938}
939
940static int gfx_v9_4_3_sw_fini(void *handle)
941{
942 int i, num_xcc;
943 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
944
945 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
946 for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
947 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
948
949 for (i = 0; i < num_xcc; i++) {
950 amdgpu_gfx_mqd_sw_fini(adev, i);
951 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
952 amdgpu_gfx_kiq_fini(adev, i);
953 }
954
955 gfx_v9_4_3_mec_fini(adev);
956 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
957 gfx_v9_4_3_free_microcode(adev);
958 if (!amdgpu_sriov_vf(adev))
959 amdgpu_gfx_sysfs_fini(adev);
960
961 return 0;
962}
963
964#define DEFAULT_SH_MEM_BASES (0x6000)
965static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
966 int xcc_id)
967{
968 int i;
969 uint32_t sh_mem_config;
970 uint32_t sh_mem_bases;
971 uint32_t data;
972
973 /*
974 * Configure apertures:
975 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
976 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
977 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
978 */
979 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
980
981 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
982 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
983 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
984
985 mutex_lock(&adev->srbm_mutex);
986 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
987 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
988 /* CP and shaders */
989 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
990 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
991
992 /* Enable trap for each kfd vmid. */
993 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
994 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
995 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
996 }
997 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
998 mutex_unlock(&adev->srbm_mutex);
999
1000 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1001 acccess. These should be enabled by FW for target VMIDs. */
1002 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1003 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
1004 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
1005 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
1006 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
1007 }
1008}
1009
1010static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
1011{
1012 int vmid;
1013
1014 /*
1015 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1016 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1017 * the driver can enable them for graphics. VMID0 should maintain
1018 * access so that HWS firmware can save/restore entries.
1019 */
1020 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
1021 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
1022 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
1023 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
1024 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
1025 }
1026}
1027
1028static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
1029 int xcc_id)
1030{
1031 u32 tmp;
1032 int i;
1033
1034 /* XXX SH_MEM regs */
1035 /* where to put LDS, scratch, GPUVM in FSA64 space */
1036 mutex_lock(&adev->srbm_mutex);
1037 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1038 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1039 /* CP and shaders */
1040 if (i == 0) {
1041 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1042 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1043 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1044 !!adev->gmc.noretry);
1045 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1046 regSH_MEM_CONFIG, tmp);
1047 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1048 regSH_MEM_BASES, 0);
1049 } else {
1050 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1051 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1052 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1053 !!adev->gmc.noretry);
1054 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1055 regSH_MEM_CONFIG, tmp);
1056 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1057 (adev->gmc.private_aperture_start >>
1058 48));
1059 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1060 (adev->gmc.shared_aperture_start >>
1061 48));
1062 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1063 regSH_MEM_BASES, tmp);
1064 }
1065 }
1066 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1067
1068 mutex_unlock(&adev->srbm_mutex);
1069
1070 gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
1071 gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
1072}
1073
1074static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
1075{
1076 int i, num_xcc;
1077
1078 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1079
1080 gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
1081 adev->gfx.config.db_debug2 =
1082 RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
1083
1084 for (i = 0; i < num_xcc; i++)
1085 gfx_v9_4_3_xcc_constants_init(adev, i);
1086}
1087
1088static void
1089gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
1090 int xcc_id)
1091{
1092 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
1093}
1094
1095static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
1096{
1097 /*
1098 * Rlc save restore list is workable since v2_1.
1099 * And it's needed by gfxoff feature.
1100 */
1101 if (adev->gfx.rlc.is_rlc_v2_1)
1102 gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
1103}
1104
1105static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
1106{
1107 uint32_t data;
1108
1109 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
1110 data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
1111 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
1112}
1113
1114static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
1115{
1116 uint32_t rlc_setting;
1117
1118 /* if RLC is not enabled, do nothing */
1119 rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
1120 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
1121 return false;
1122
1123 return true;
1124}
1125
1126static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
1127{
1128 uint32_t data;
1129 unsigned i;
1130
1131 data = RLC_SAFE_MODE__CMD_MASK;
1132 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
1133 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1134
1135 /* wait for RLC_SAFE_MODE */
1136 for (i = 0; i < adev->usec_timeout; i++) {
1137 if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1138 break;
1139 udelay(1);
1140 }
1141}
1142
1143static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
1144 int xcc_id)
1145{
1146 uint32_t data;
1147
1148 data = RLC_SAFE_MODE__CMD_MASK;
1149 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1150}
1151
1152static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1153{
1154 int xcc_id, num_xcc;
1155 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1156
1157 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1158 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1159 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
1160 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
1161 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
1162 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
1163 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
1164 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
1165 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
1166 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
1167 }
1168 adev->gfx.rlc.rlcg_reg_access_supported = true;
1169}
1170
1171static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
1172{
1173 /* init spm vmid with 0xf */
1174 if (adev->gfx.rlc.funcs->update_spm_vmid)
1175 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
1176
1177 return 0;
1178}
1179
1180static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
1181 int xcc_id)
1182{
1183 u32 i, j, k;
1184 u32 mask;
1185
1186 mutex_lock(&adev->grbm_idx_mutex);
1187 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1188 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1189 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
1190 xcc_id);
1191 for (k = 0; k < adev->usec_timeout; k++) {
1192 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
1193 break;
1194 udelay(1);
1195 }
1196 if (k == adev->usec_timeout) {
1197 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
1198 0xffffffff,
1199 0xffffffff, xcc_id);
1200 mutex_unlock(&adev->grbm_idx_mutex);
1201 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1202 i, j);
1203 return;
1204 }
1205 }
1206 }
1207 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
1208 xcc_id);
1209 mutex_unlock(&adev->grbm_idx_mutex);
1210
1211 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1212 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1213 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1214 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1215 for (k = 0; k < adev->usec_timeout; k++) {
1216 if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1217 break;
1218 udelay(1);
1219 }
1220}
1221
1222static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1223 bool enable, int xcc_id)
1224{
1225 u32 tmp;
1226
1227 /* These interrupts should be enabled to drive DS clock */
1228
1229 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1230
1231 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1232 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1233 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1234
1235 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1236}
1237
1238static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
1239{
1240 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1241 RLC_ENABLE_F32, 0);
1242 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1243 gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
1244}
1245
1246static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
1247{
1248 int i, num_xcc;
1249
1250 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1251 for (i = 0; i < num_xcc; i++)
1252 gfx_v9_4_3_xcc_rlc_stop(adev, i);
1253}
1254
1255static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
1256{
1257 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1258 SOFT_RESET_RLC, 1);
1259 udelay(50);
1260 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1261 SOFT_RESET_RLC, 0);
1262 udelay(50);
1263}
1264
1265static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
1266{
1267 int i, num_xcc;
1268
1269 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1270 for (i = 0; i < num_xcc; i++)
1271 gfx_v9_4_3_xcc_rlc_reset(adev, i);
1272}
1273
1274static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
1275{
1276 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1277 RLC_ENABLE_F32, 1);
1278 udelay(50);
1279
1280 /* carrizo do enable cp interrupt after cp inited */
1281 if (!(adev->flags & AMD_IS_APU)) {
1282 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1283 udelay(50);
1284 }
1285}
1286
1287static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
1288{
1289#ifdef AMDGPU_RLC_DEBUG_RETRY
1290 u32 rlc_ucode_ver;
1291#endif
1292 int i, num_xcc;
1293
1294 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1295 for (i = 0; i < num_xcc; i++) {
1296 gfx_v9_4_3_xcc_rlc_start(adev, i);
1297#ifdef AMDGPU_RLC_DEBUG_RETRY
1298 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
1299 rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
1300 if (rlc_ucode_ver == 0x108) {
1301 dev_info(adev->dev,
1302 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1303 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1304 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1305 * default is 0x9C4 to create a 100us interval */
1306 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
1307 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1308 * to disable the page fault retry interrupts, default is
1309 * 0x100 (256) */
1310 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
1311 }
1312#endif
1313 }
1314}
1315
1316static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1317 int xcc_id)
1318{
1319 const struct rlc_firmware_header_v2_0 *hdr;
1320 const __le32 *fw_data;
1321 unsigned i, fw_size;
1322
1323 if (!adev->gfx.rlc_fw)
1324 return -EINVAL;
1325
1326 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1327 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1328
1329 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1330 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1331 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1332
1333 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1334 RLCG_UCODE_LOADING_START_ADDRESS);
1335 for (i = 0; i < fw_size; i++) {
1336 if (amdgpu_emu_mode == 1 && i % 100 == 0) {
1337 dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
1338 msleep(1);
1339 }
1340 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
1341 }
1342 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1343
1344 return 0;
1345}
1346
1347static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
1348{
1349 int r;
1350
1351 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1352 gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
1353 /* legacy rlc firmware loading */
1354 r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
1355 if (r)
1356 return r;
1357 gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
1358 }
1359
1360 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1361 /* disable CG */
1362 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1363 gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
1364 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1365
1366 return 0;
1367}
1368
1369static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
1370{
1371 int r, i, num_xcc;
1372
1373 if (amdgpu_sriov_vf(adev))
1374 return 0;
1375
1376 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1377 for (i = 0; i < num_xcc; i++) {
1378 r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
1379 if (r)
1380 return r;
1381 }
1382
1383 return 0;
1384}
1385
1386static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1387 unsigned vmid)
1388{
1389 u32 reg, data;
1390
1391 reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL);
1392 if (amdgpu_sriov_is_pp_one_vf(adev))
1393 data = RREG32_NO_KIQ(reg);
1394 else
1395 data = RREG32(reg);
1396
1397 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
1398 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
1399
1400 if (amdgpu_sriov_is_pp_one_vf(adev))
1401 WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1402 else
1403 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1404}
1405
1406static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
1407 {SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
1408 {SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
1409};
1410
1411static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
1412 uint32_t offset,
1413 struct soc15_reg_rlcg *entries, int arr_size)
1414{
1415 int i, inst;
1416 uint32_t reg;
1417
1418 if (!entries)
1419 return false;
1420
1421 for (i = 0; i < arr_size; i++) {
1422 const struct soc15_reg_rlcg *entry;
1423
1424 entry = &entries[i];
1425 inst = adev->ip_map.logical_to_dev_inst ?
1426 adev->ip_map.logical_to_dev_inst(
1427 adev, entry->hwip, entry->instance) :
1428 entry->instance;
1429 reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
1430 entry->reg;
1431 if (offset == reg)
1432 return true;
1433 }
1434
1435 return false;
1436}
1437
1438static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
1439{
1440 return gfx_v9_4_3_check_rlcg_range(adev, offset,
1441 (void *)rlcg_access_gc_9_4_3,
1442 ARRAY_SIZE(rlcg_access_gc_9_4_3));
1443}
1444
1445static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
1446 bool enable, int xcc_id)
1447{
1448 if (enable) {
1449 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
1450 } else {
1451 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
1452 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
1453 adev->gfx.kiq[xcc_id].ring.sched.ready = false;
1454 }
1455 udelay(50);
1456}
1457
1458static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
1459 int xcc_id)
1460{
1461 const struct gfx_firmware_header_v1_0 *mec_hdr;
1462 const __le32 *fw_data;
1463 unsigned i;
1464 u32 tmp;
1465 u32 mec_ucode_addr_offset;
1466 u32 mec_ucode_data_offset;
1467
1468 if (!adev->gfx.mec_fw)
1469 return -EINVAL;
1470
1471 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
1472
1473 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1474 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1475
1476 fw_data = (const __le32 *)
1477 (adev->gfx.mec_fw->data +
1478 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1479 tmp = 0;
1480 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1481 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1482 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1483
1484 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1485 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
1486 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1487 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1488
1489 mec_ucode_addr_offset =
1490 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
1491 mec_ucode_data_offset =
1492 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
1493
1494 /* MEC1 */
1495 WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
1496 for (i = 0; i < mec_hdr->jt_size; i++)
1497 WREG32(mec_ucode_data_offset,
1498 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
1499
1500 WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
1501 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
1502
1503 return 0;
1504}
1505
1506/* KIQ functions */
1507static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
1508{
1509 uint32_t tmp;
1510 struct amdgpu_device *adev = ring->adev;
1511
1512 /* tell RLC which is KIQ queue */
1513 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1514 tmp &= 0xffffff00;
1515 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1516 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1517 tmp |= 0x80;
1518 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1519}
1520
1521static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
1522{
1523 struct amdgpu_device *adev = ring->adev;
1524
1525 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
1526 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
1527 mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
1528 mqd->cp_hqd_queue_priority =
1529 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
1530 }
1531 }
1532}
1533
1534static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
1535{
1536 struct amdgpu_device *adev = ring->adev;
1537 struct v9_mqd *mqd = ring->mqd_ptr;
1538 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1539 uint32_t tmp;
1540
1541 mqd->header = 0xC0310800;
1542 mqd->compute_pipelinestat_enable = 0x00000001;
1543 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1544 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1545 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1546 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1547 mqd->compute_misc_reserved = 0x00000003;
1548
1549 mqd->dynamic_cu_mask_addr_lo =
1550 lower_32_bits(ring->mqd_gpu_addr
1551 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1552 mqd->dynamic_cu_mask_addr_hi =
1553 upper_32_bits(ring->mqd_gpu_addr
1554 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1555
1556 eop_base_addr = ring->eop_gpu_addr >> 8;
1557 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
1558 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1559
1560 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1561 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
1562 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1563 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
1564
1565 mqd->cp_hqd_eop_control = tmp;
1566
1567 /* enable doorbell? */
1568 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
1569
1570 if (ring->use_doorbell) {
1571 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1572 DOORBELL_OFFSET, ring->doorbell_index);
1573 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1574 DOORBELL_EN, 1);
1575 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1576 DOORBELL_SOURCE, 0);
1577 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1578 DOORBELL_HIT, 0);
1579 } else {
1580 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1581 DOORBELL_EN, 0);
1582 }
1583
1584 mqd->cp_hqd_pq_doorbell_control = tmp;
1585
1586 /* disable the queue if it's active */
1587 ring->wptr = 0;
1588 mqd->cp_hqd_dequeue_request = 0;
1589 mqd->cp_hqd_pq_rptr = 0;
1590 mqd->cp_hqd_pq_wptr_lo = 0;
1591 mqd->cp_hqd_pq_wptr_hi = 0;
1592
1593 /* set the pointer to the MQD */
1594 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1595 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1596
1597 /* set MQD vmid to 0 */
1598 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
1599 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1600 mqd->cp_mqd_control = tmp;
1601
1602 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1603 hqd_gpu_addr = ring->gpu_addr >> 8;
1604 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
1605 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1606
1607 /* set up the HQD, this is similar to CP_RB0_CNTL */
1608 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
1609 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1610 (order_base_2(ring->ring_size / 4) - 1));
1611 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1612 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1613#ifdef __BIG_ENDIAN
1614 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
1615#endif
1616 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
1617 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
1618 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1619 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1620 mqd->cp_hqd_pq_control = tmp;
1621
1622 /* set the wb address whether it's enabled or not */
1623 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1624 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1625 mqd->cp_hqd_pq_rptr_report_addr_hi =
1626 upper_32_bits(wb_gpu_addr) & 0xffff;
1627
1628 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1629 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1630 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
1631 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1632
1633 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1634 ring->wptr = 0;
1635 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
1636
1637 /* set the vmid for the queue */
1638 mqd->cp_hqd_vmid = 0;
1639
1640 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
1641 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
1642 mqd->cp_hqd_persistent_state = tmp;
1643
1644 /* set MIN_IB_AVAIL_SIZE */
1645 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
1646 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
1647 mqd->cp_hqd_ib_control = tmp;
1648
1649 /* set static priority for a queue/ring */
1650 gfx_v9_4_3_mqd_set_priority(ring, mqd);
1651 mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
1652
1653 /* map_queues packet doesn't need activate the queue,
1654 * so only kiq need set this field.
1655 */
1656 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
1657 mqd->cp_hqd_active = 1;
1658
1659 return 0;
1660}
1661
1662static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
1663 int xcc_id)
1664{
1665 struct amdgpu_device *adev = ring->adev;
1666 struct v9_mqd *mqd = ring->mqd_ptr;
1667 int j;
1668
1669 /* disable wptr polling */
1670 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
1671
1672 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
1673 mqd->cp_hqd_eop_base_addr_lo);
1674 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
1675 mqd->cp_hqd_eop_base_addr_hi);
1676
1677 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1678 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
1679 mqd->cp_hqd_eop_control);
1680
1681 /* enable doorbell? */
1682 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1683 mqd->cp_hqd_pq_doorbell_control);
1684
1685 /* disable the queue if it's active */
1686 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1687 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1688 for (j = 0; j < adev->usec_timeout; j++) {
1689 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1690 break;
1691 udelay(1);
1692 }
1693 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1694 mqd->cp_hqd_dequeue_request);
1695 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
1696 mqd->cp_hqd_pq_rptr);
1697 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1698 mqd->cp_hqd_pq_wptr_lo);
1699 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1700 mqd->cp_hqd_pq_wptr_hi);
1701 }
1702
1703 /* set the pointer to the MQD */
1704 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
1705 mqd->cp_mqd_base_addr_lo);
1706 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
1707 mqd->cp_mqd_base_addr_hi);
1708
1709 /* set MQD vmid to 0 */
1710 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
1711 mqd->cp_mqd_control);
1712
1713 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1714 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
1715 mqd->cp_hqd_pq_base_lo);
1716 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
1717 mqd->cp_hqd_pq_base_hi);
1718
1719 /* set up the HQD, this is similar to CP_RB0_CNTL */
1720 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
1721 mqd->cp_hqd_pq_control);
1722
1723 /* set the wb address whether it's enabled or not */
1724 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
1725 mqd->cp_hqd_pq_rptr_report_addr_lo);
1726 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
1727 mqd->cp_hqd_pq_rptr_report_addr_hi);
1728
1729 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1730 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
1731 mqd->cp_hqd_pq_wptr_poll_addr_lo);
1732 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
1733 mqd->cp_hqd_pq_wptr_poll_addr_hi);
1734
1735 /* enable the doorbell if requested */
1736 if (ring->use_doorbell) {
1737 WREG32_SOC15(
1738 GC, GET_INST(GC, xcc_id),
1739 regCP_MEC_DOORBELL_RANGE_LOWER,
1740 ((adev->doorbell_index.kiq +
1741 xcc_id * adev->doorbell_index.xcc_doorbell_range) *
1742 2) << 2);
1743 WREG32_SOC15(
1744 GC, GET_INST(GC, xcc_id),
1745 regCP_MEC_DOORBELL_RANGE_UPPER,
1746 ((adev->doorbell_index.userqueue_end +
1747 xcc_id * adev->doorbell_index.xcc_doorbell_range) *
1748 2) << 2);
1749 }
1750
1751 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1752 mqd->cp_hqd_pq_doorbell_control);
1753
1754 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1755 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1756 mqd->cp_hqd_pq_wptr_lo);
1757 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1758 mqd->cp_hqd_pq_wptr_hi);
1759
1760 /* set the vmid for the queue */
1761 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
1762
1763 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
1764 mqd->cp_hqd_persistent_state);
1765
1766 /* activate the queue */
1767 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
1768 mqd->cp_hqd_active);
1769
1770 if (ring->use_doorbell)
1771 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
1772
1773 return 0;
1774}
1775
1776static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
1777 int xcc_id)
1778{
1779 struct amdgpu_device *adev = ring->adev;
1780 int j;
1781
1782 /* disable the queue if it's active */
1783 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1784
1785 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1786
1787 for (j = 0; j < adev->usec_timeout; j++) {
1788 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1789 break;
1790 udelay(1);
1791 }
1792
1793 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
1794 DRM_DEBUG("%s dequeue request failed.\n", ring->name);
1795
1796 /* Manual disable if dequeue request times out */
1797 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
1798 }
1799
1800 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1801 0);
1802 }
1803
1804 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
1805 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
1806 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
1807 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
1808 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
1809 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
1810 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0);
1811 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0);
1812
1813 return 0;
1814}
1815
1816static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
1817{
1818 struct amdgpu_device *adev = ring->adev;
1819 struct v9_mqd *mqd = ring->mqd_ptr;
1820 struct v9_mqd *tmp_mqd;
1821
1822 gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id);
1823
1824 /* GPU could be in bad state during probe, driver trigger the reset
1825 * after load the SMU, in this case , the mqd is not be initialized.
1826 * driver need to re-init the mqd.
1827 * check mqd->cp_hqd_pq_control since this value should not be 0
1828 */
1829 tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
1830 if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
1831 /* for GPU_RESET case , reset MQD to a clean status */
1832 if (adev->gfx.kiq[xcc_id].mqd_backup)
1833 memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
1834
1835 /* reset ring buffer */
1836 ring->wptr = 0;
1837 amdgpu_ring_clear_ring(ring);
1838 mutex_lock(&adev->srbm_mutex);
1839 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
1840 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
1841 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1842 mutex_unlock(&adev->srbm_mutex);
1843 } else {
1844 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
1845 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
1846 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
1847 mutex_lock(&adev->srbm_mutex);
1848 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
1849 amdgpu_ring_clear_ring(ring);
1850 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
1851 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
1852 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
1853 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1854 mutex_unlock(&adev->srbm_mutex);
1855
1856 if (adev->gfx.kiq[xcc_id].mqd_backup)
1857 memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
1858 }
1859
1860 return 0;
1861}
1862
1863static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id)
1864{
1865 struct amdgpu_device *adev = ring->adev;
1866 struct v9_mqd *mqd = ring->mqd_ptr;
1867 int mqd_idx = ring - &adev->gfx.compute_ring[0];
1868 struct v9_mqd *tmp_mqd;
1869
1870 /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
1871 * is not be initialized before
1872 */
1873 tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
1874
1875 if (!tmp_mqd->cp_hqd_pq_control ||
1876 (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
1877 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
1878 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
1879 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
1880 mutex_lock(&adev->srbm_mutex);
1881 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
1882 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
1883 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1884 mutex_unlock(&adev->srbm_mutex);
1885
1886 if (adev->gfx.mec.mqd_backup[mqd_idx])
1887 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
1888 } else {
1889 /* restore MQD to a clean status */
1890 if (adev->gfx.mec.mqd_backup[mqd_idx])
1891 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
1892 /* reset ring buffer */
1893 ring->wptr = 0;
1894 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
1895 amdgpu_ring_clear_ring(ring);
1896 }
1897
1898 return 0;
1899}
1900
1901static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
1902{
1903 struct amdgpu_ring *ring;
1904 int j;
1905
1906 for (j = 0; j < adev->gfx.num_compute_rings; j++) {
1907 ring = &adev->gfx.compute_ring[j + xcc_id * adev->gfx.num_compute_rings];
1908 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
1909 mutex_lock(&adev->srbm_mutex);
1910 soc15_grbm_select(adev, ring->me,
1911 ring->pipe,
1912 ring->queue, 0, GET_INST(GC, xcc_id));
1913 gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id);
1914 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1915 mutex_unlock(&adev->srbm_mutex);
1916 }
1917 }
1918
1919 return 0;
1920}
1921
1922static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
1923{
1924 struct amdgpu_ring *ring;
1925 int r;
1926
1927 ring = &adev->gfx.kiq[xcc_id].ring;
1928
1929 r = amdgpu_bo_reserve(ring->mqd_obj, false);
1930 if (unlikely(r != 0))
1931 return r;
1932
1933 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
1934 if (unlikely(r != 0)) {
1935 amdgpu_bo_unreserve(ring->mqd_obj);
1936 return r;
1937 }
1938
1939 gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id);
1940 amdgpu_bo_kunmap(ring->mqd_obj);
1941 ring->mqd_ptr = NULL;
1942 amdgpu_bo_unreserve(ring->mqd_obj);
1943 return 0;
1944}
1945
1946static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
1947{
1948 struct amdgpu_ring *ring = NULL;
1949 int r = 0, i;
1950
1951 gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
1952
1953 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1954 ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
1955
1956 r = amdgpu_bo_reserve(ring->mqd_obj, false);
1957 if (unlikely(r != 0))
1958 goto done;
1959 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
1960 if (!r) {
1961 r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id);
1962 amdgpu_bo_kunmap(ring->mqd_obj);
1963 ring->mqd_ptr = NULL;
1964 }
1965 amdgpu_bo_unreserve(ring->mqd_obj);
1966 if (r)
1967 goto done;
1968 }
1969
1970 r = amdgpu_gfx_enable_kcq(adev, xcc_id);
1971done:
1972 return r;
1973}
1974
1975static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
1976{
1977 struct amdgpu_ring *ring;
1978 int r, j;
1979
1980 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1981
1982 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1983 gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id);
1984
1985 r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
1986 if (r)
1987 return r;
1988 }
1989
1990 r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
1991 if (r)
1992 return r;
1993
1994 r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id);
1995 if (r)
1996 return r;
1997
1998 for (j = 0; j < adev->gfx.num_compute_rings; j++) {
1999 ring = &adev->gfx.compute_ring
2000 [j + xcc_id * adev->gfx.num_compute_rings];
2001 r = amdgpu_ring_test_helper(ring);
2002 if (r)
2003 return r;
2004 }
2005
2006 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
2007
2008 return 0;
2009}
2010
2011static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
2012{
2013 int r = 0, i, num_xcc;
2014
2015 if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2016 AMDGPU_XCP_FL_NONE) ==
2017 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2018 r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr,
2019 amdgpu_user_partt_mode);
2020
2021 if (r)
2022 return r;
2023
2024 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2025 for (i = 0; i < num_xcc; i++) {
2026 r = gfx_v9_4_3_xcc_cp_resume(adev, i);
2027 if (r)
2028 return r;
2029 }
2030
2031 return 0;
2032}
2033
2034static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable,
2035 int xcc_id)
2036{
2037 gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id);
2038}
2039
2040static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
2041{
2042 if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2043 DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
2044
2045 if (amdgpu_sriov_vf(adev)) {
2046 /* must disable polling for SRIOV when hw finished, otherwise
2047 * CPC engine may still keep fetching WB address which is already
2048 * invalid after sw finished and trigger DMAR reading error in
2049 * hypervisor side.
2050 */
2051 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2052 return;
2053 }
2054
2055 /* Use deinitialize sequence from CAIL when unbinding device
2056 * from driver, otherwise KIQ is hanging when binding back
2057 */
2058 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2059 mutex_lock(&adev->srbm_mutex);
2060 soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me,
2061 adev->gfx.kiq[xcc_id].ring.pipe,
2062 adev->gfx.kiq[xcc_id].ring.queue, 0,
2063 GET_INST(GC, xcc_id));
2064 gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring,
2065 xcc_id);
2066 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2067 mutex_unlock(&adev->srbm_mutex);
2068 }
2069
2070 gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
2071 gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id);
2072}
2073
2074static int gfx_v9_4_3_hw_init(void *handle)
2075{
2076 int r;
2077 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2078
2079 if (!amdgpu_sriov_vf(adev))
2080 gfx_v9_4_3_init_golden_registers(adev);
2081
2082 gfx_v9_4_3_constants_init(adev);
2083
2084 r = adev->gfx.rlc.funcs->resume(adev);
2085 if (r)
2086 return r;
2087
2088 r = gfx_v9_4_3_cp_resume(adev);
2089 if (r)
2090 return r;
2091
2092 return r;
2093}
2094
2095static int gfx_v9_4_3_hw_fini(void *handle)
2096{
2097 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2098 int i, num_xcc;
2099
2100 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2101 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2102
2103 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2104 for (i = 0; i < num_xcc; i++) {
2105 gfx_v9_4_3_xcc_fini(adev, i);
2106 }
2107
2108 return 0;
2109}
2110
2111static int gfx_v9_4_3_suspend(void *handle)
2112{
2113 return gfx_v9_4_3_hw_fini(handle);
2114}
2115
2116static int gfx_v9_4_3_resume(void *handle)
2117{
2118 return gfx_v9_4_3_hw_init(handle);
2119}
2120
2121static bool gfx_v9_4_3_is_idle(void *handle)
2122{
2123 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2124 int i, num_xcc;
2125
2126 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2127 for (i = 0; i < num_xcc; i++) {
2128 if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS),
2129 GRBM_STATUS, GUI_ACTIVE))
2130 return false;
2131 }
2132 return true;
2133}
2134
2135static int gfx_v9_4_3_wait_for_idle(void *handle)
2136{
2137 unsigned i;
2138 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2139
2140 for (i = 0; i < adev->usec_timeout; i++) {
2141 if (gfx_v9_4_3_is_idle(handle))
2142 return 0;
2143 udelay(1);
2144 }
2145 return -ETIMEDOUT;
2146}
2147
2148static int gfx_v9_4_3_soft_reset(void *handle)
2149{
2150 u32 grbm_soft_reset = 0;
2151 u32 tmp;
2152 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2153
2154 /* GRBM_STATUS */
2155 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
2156 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2157 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2158 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2159 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2160 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2161 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2162 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2163 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2164 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2165 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2166 }
2167
2168 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2169 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2170 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2171 }
2172
2173 /* GRBM_STATUS2 */
2174 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2);
2175 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2176 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2177 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2178
2179
2180 if (grbm_soft_reset) {
2181 /* stop the rlc */
2182 adev->gfx.rlc.funcs->stop(adev);
2183
2184 /* Disable MEC parsing/prefetching */
2185 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
2186
2187 if (grbm_soft_reset) {
2188 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2189 tmp |= grbm_soft_reset;
2190 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2191 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2192 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2193
2194 udelay(50);
2195
2196 tmp &= ~grbm_soft_reset;
2197 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2198 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2199 }
2200
2201 /* Wait a little for things to settle down */
2202 udelay(50);
2203 }
2204 return 0;
2205}
2206
2207static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
2208 uint32_t vmid,
2209 uint32_t gds_base, uint32_t gds_size,
2210 uint32_t gws_base, uint32_t gws_size,
2211 uint32_t oa_base, uint32_t oa_size)
2212{
2213 struct amdgpu_device *adev = ring->adev;
2214
2215 /* GDS Base */
2216 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2217 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid,
2218 gds_base);
2219
2220 /* GDS Size */
2221 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2222 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid,
2223 gds_size);
2224
2225 /* GWS */
2226 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2227 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid,
2228 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
2229
2230 /* OA */
2231 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2232 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid,
2233 (1 << (oa_size + oa_base)) - (1 << oa_base));
2234}
2235
2236static int gfx_v9_4_3_early_init(void *handle)
2237{
2238 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2239
2240 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2241 AMDGPU_MAX_COMPUTE_RINGS);
2242 gfx_v9_4_3_set_kiq_pm4_funcs(adev);
2243 gfx_v9_4_3_set_ring_funcs(adev);
2244 gfx_v9_4_3_set_irq_funcs(adev);
2245 gfx_v9_4_3_set_gds_init(adev);
2246 gfx_v9_4_3_set_rlc_funcs(adev);
2247
2248 /* init rlcg reg access ctrl */
2249 gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev);
2250
2251 return gfx_v9_4_3_init_microcode(adev);
2252}
2253
2254static int gfx_v9_4_3_late_init(void *handle)
2255{
2256 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2257 int r;
2258
2259 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2260 if (r)
2261 return r;
2262
2263 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2264 if (r)
2265 return r;
2266
2267 if (adev->gfx.ras &&
2268 adev->gfx.ras->enable_watchdog_timer)
2269 adev->gfx.ras->enable_watchdog_timer(adev);
2270
2271 return 0;
2272}
2273
2274static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
2275 bool enable, int xcc_id)
2276{
2277 uint32_t def, data;
2278
2279 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
2280 return;
2281
2282 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2283 regRLC_CGTT_MGCG_OVERRIDE);
2284
2285 if (enable)
2286 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2287 else
2288 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2289
2290 if (def != data)
2291 WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2292 regRLC_CGTT_MGCG_OVERRIDE, data);
2293
2294}
2295
2296static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2297 bool enable, int xcc_id)
2298{
2299 uint32_t def, data;
2300
2301 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
2302 return;
2303
2304 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2305 regRLC_CGTT_MGCG_OVERRIDE);
2306
2307 if (enable)
2308 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2309 else
2310 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2311
2312 if (def != data)
2313 WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2314 regRLC_CGTT_MGCG_OVERRIDE, data);
2315}
2316
2317static void
2318gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2319 bool enable, int xcc_id)
2320{
2321 uint32_t data, def;
2322
2323 /* It is disabled by HW by default */
2324 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2325 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
2326 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2327
2328 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2329 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2330 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2331 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2332
2333 if (def != data)
2334 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2335
2336 /* MGLS is a global flag to control all MGLS in GFX */
2337 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
2338 /* 2 - RLC memory Light sleep */
2339 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
2340 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2341 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2342 if (def != data)
2343 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2344 }
2345 /* 3 - CP memory Light sleep */
2346 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2347 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2348 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2349 if (def != data)
2350 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2351 }
2352 }
2353 } else {
2354 /* 1 - MGCG_OVERRIDE */
2355 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2356
2357 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2358 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2359 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2360 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2361
2362 if (def != data)
2363 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2364
2365 /* 2 - disable MGLS in RLC */
2366 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2367 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
2368 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2369 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2370 }
2371
2372 /* 3 - disable MGLS in CP */
2373 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2374 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2375 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2376 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2377 }
2378 }
2379
2380}
2381
2382static void
2383gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2384 bool enable, int xcc_id)
2385{
2386 uint32_t def, data;
2387
2388 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2389
2390 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2391 /* unset CGCG override */
2392 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2393 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2394 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2395 else
2396 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2397 /* update CGCG and CGLS override bits */
2398 if (def != data)
2399 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2400
2401 /* enable cgcg FSM(0x0000363F) */
2402 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2403
2404 data = (0x36
2405 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2406 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2407 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2408 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2409 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2410 if (def != data)
2411 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2412
2413 /* set IDLE_POLL_COUNT(0x00900100) */
2414 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2415 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2416 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2417 if (def != data)
2418 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2419 } else {
2420 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2421 /* reset CGCG/CGLS bits */
2422 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2423 /* disable cgcg and cgls in FSM */
2424 if (def != data)
2425 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2426 }
2427
2428}
2429
2430static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
2431 bool enable, int xcc_id)
2432{
2433 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
2434
2435 if (enable) {
2436 /* FGCG */
2437 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2438 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2439
2440 /* CGCG/CGLS should be enabled after MGCG/MGLS
2441 * === MGCG + MGLS ===
2442 */
2443 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2444 xcc_id);
2445 /* === CGCG + CGLS === */
2446 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2447 xcc_id);
2448 } else {
2449 /* CGCG/CGLS should be disabled before MGCG/MGLS
2450 * === CGCG + CGLS ===
2451 */
2452 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2453 xcc_id);
2454 /* === MGCG + MGLS === */
2455 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2456 xcc_id);
2457
2458 /* FGCG */
2459 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2460 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2461 }
2462
2463 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
2464
2465 return 0;
2466}
2467
2468static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
2469 .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
2470 .set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode,
2471 .unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode,
2472 .init = gfx_v9_4_3_rlc_init,
2473 .resume = gfx_v9_4_3_rlc_resume,
2474 .stop = gfx_v9_4_3_rlc_stop,
2475 .reset = gfx_v9_4_3_rlc_reset,
2476 .start = gfx_v9_4_3_rlc_start,
2477 .update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
2478 .is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
2479};
2480
2481static int gfx_v9_4_3_set_powergating_state(void *handle,
2482 enum amd_powergating_state state)
2483{
2484 return 0;
2485}
2486
2487static int gfx_v9_4_3_set_clockgating_state(void *handle,
2488 enum amd_clockgating_state state)
2489{
2490 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2491 int i, num_xcc;
2492
2493 if (amdgpu_sriov_vf(adev))
2494 return 0;
2495
2496 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2497 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2498 case IP_VERSION(9, 4, 3):
2499 for (i = 0; i < num_xcc; i++)
2500 gfx_v9_4_3_xcc_update_gfx_clock_gating(
2501 adev, state == AMD_CG_STATE_GATE, i);
2502 break;
2503 default:
2504 break;
2505 }
2506 return 0;
2507}
2508
2509static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags)
2510{
2511 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2512 int data;
2513
2514 if (amdgpu_sriov_vf(adev))
2515 *flags = 0;
2516
2517 /* AMD_CG_SUPPORT_GFX_MGCG */
2518 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE));
2519 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
2520 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
2521
2522 /* AMD_CG_SUPPORT_GFX_CGCG */
2523 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL));
2524 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
2525 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
2526
2527 /* AMD_CG_SUPPORT_GFX_CGLS */
2528 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
2529 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
2530
2531 /* AMD_CG_SUPPORT_GFX_RLC_LS */
2532 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL));
2533 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
2534 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
2535
2536 /* AMD_CG_SUPPORT_GFX_CP_LS */
2537 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL));
2538 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
2539 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
2540}
2541
2542static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2543{
2544 struct amdgpu_device *adev = ring->adev;
2545 u32 ref_and_mask, reg_mem_engine;
2546 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
2547
2548 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2549 switch (ring->me) {
2550 case 1:
2551 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
2552 break;
2553 case 2:
2554 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
2555 break;
2556 default:
2557 return;
2558 }
2559 reg_mem_engine = 0;
2560 } else {
2561 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
2562 reg_mem_engine = 1; /* pfp */
2563 }
2564
2565 gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
2566 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
2567 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
2568 ref_and_mask, ref_and_mask, 0x20);
2569}
2570
2571static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
2572 struct amdgpu_job *job,
2573 struct amdgpu_ib *ib,
2574 uint32_t flags)
2575{
2576 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2577 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2578
2579 /* Currently, there is a high possibility to get wave ID mismatch
2580 * between ME and GDS, leading to a hw deadlock, because ME generates
2581 * different wave IDs than the GDS expects. This situation happens
2582 * randomly when at least 5 compute pipes use GDS ordered append.
2583 * The wave IDs generated by ME are also wrong after suspend/resume.
2584 * Those are probably bugs somewhere else in the kernel driver.
2585 *
2586 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2587 * GDS to 0 for this ring (me/pipe).
2588 */
2589 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2590 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2591 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
2592 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2593 }
2594
2595 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2596 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
2597 amdgpu_ring_write(ring,
2598#ifdef __BIG_ENDIAN
2599 (2 << 0) |
2600#endif
2601 lower_32_bits(ib->gpu_addr));
2602 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
2603 amdgpu_ring_write(ring, control);
2604}
2605
2606static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
2607 u64 seq, unsigned flags)
2608{
2609 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2610 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2611 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
2612
2613 /* RELEASE_MEM - flush caches, send int */
2614 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
2615 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
2616 EOP_TC_NC_ACTION_EN) :
2617 (EOP_TCL1_ACTION_EN |
2618 EOP_TC_ACTION_EN |
2619 EOP_TC_WB_ACTION_EN |
2620 EOP_TC_MD_ACTION_EN)) |
2621 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2622 EVENT_INDEX(5)));
2623 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2624
2625 /*
2626 * the address should be Qword aligned if 64bit write, Dword
2627 * aligned if only send 32bit data low (discard data high)
2628 */
2629 if (write64bit)
2630 BUG_ON(addr & 0x7);
2631 else
2632 BUG_ON(addr & 0x3);
2633 amdgpu_ring_write(ring, lower_32_bits(addr));
2634 amdgpu_ring_write(ring, upper_32_bits(addr));
2635 amdgpu_ring_write(ring, lower_32_bits(seq));
2636 amdgpu_ring_write(ring, upper_32_bits(seq));
2637 amdgpu_ring_write(ring, 0);
2638}
2639
2640static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
2641{
2642 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2643 uint32_t seq = ring->fence_drv.sync_seq;
2644 uint64_t addr = ring->fence_drv.gpu_addr;
2645
2646 gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
2647 lower_32_bits(addr), upper_32_bits(addr),
2648 seq, 0xffffffff, 4);
2649}
2650
2651static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
2652 unsigned vmid, uint64_t pd_addr)
2653{
2654 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2655}
2656
2657static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
2658{
2659 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
2660}
2661
2662static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
2663{
2664 u64 wptr;
2665
2666 /* XXX check if swapping is necessary on BE */
2667 if (ring->use_doorbell)
2668 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
2669 else
2670 BUG();
2671 return wptr;
2672}
2673
2674static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
2675{
2676 struct amdgpu_device *adev = ring->adev;
2677
2678 /* XXX check if swapping is necessary on BE */
2679 if (ring->use_doorbell) {
2680 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
2681 WDOORBELL64(ring->doorbell_index, ring->wptr);
2682 } else {
2683 BUG(); /* only DOORBELL method supported on gfx9 now */
2684 }
2685}
2686
2687static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
2688 u64 seq, unsigned int flags)
2689{
2690 struct amdgpu_device *adev = ring->adev;
2691
2692 /* we only allocate 32bit for each seq wb address */
2693 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
2694
2695 /* write fence seq to the "addr" */
2696 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2697 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2698 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
2699 amdgpu_ring_write(ring, lower_32_bits(addr));
2700 amdgpu_ring_write(ring, upper_32_bits(addr));
2701 amdgpu_ring_write(ring, lower_32_bits(seq));
2702
2703 if (flags & AMDGPU_FENCE_FLAG_INT) {
2704 /* set register to trigger INT */
2705 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2706 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2707 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
2708 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
2709 amdgpu_ring_write(ring, 0);
2710 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
2711 }
2712}
2713
2714static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
2715 uint32_t reg_val_offs)
2716{
2717 struct amdgpu_device *adev = ring->adev;
2718
2719 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
2720 amdgpu_ring_write(ring, 0 | /* src: register*/
2721 (5 << 8) | /* dst: memory */
2722 (1 << 20)); /* write confirm */
2723 amdgpu_ring_write(ring, reg);
2724 amdgpu_ring_write(ring, 0);
2725 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
2726 reg_val_offs * 4));
2727 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
2728 reg_val_offs * 4));
2729}
2730
2731static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
2732 uint32_t val)
2733{
2734 uint32_t cmd = 0;
2735
2736 switch (ring->funcs->type) {
2737 case AMDGPU_RING_TYPE_GFX:
2738 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
2739 break;
2740 case AMDGPU_RING_TYPE_KIQ:
2741 cmd = (1 << 16); /* no inc addr */
2742 break;
2743 default:
2744 cmd = WR_CONFIRM;
2745 break;
2746 }
2747 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2748 amdgpu_ring_write(ring, cmd);
2749 amdgpu_ring_write(ring, reg);
2750 amdgpu_ring_write(ring, 0);
2751 amdgpu_ring_write(ring, val);
2752}
2753
2754static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
2755 uint32_t val, uint32_t mask)
2756{
2757 gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
2758}
2759
2760static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
2761 uint32_t reg0, uint32_t reg1,
2762 uint32_t ref, uint32_t mask)
2763{
2764 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
2765 ref, mask);
2766}
2767
2768static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2769 struct amdgpu_device *adev, int me, int pipe,
2770 enum amdgpu_interrupt_state state, int xcc_id)
2771{
2772 u32 mec_int_cntl, mec_int_cntl_reg;
2773
2774 /*
2775 * amdgpu controls only the first MEC. That's why this function only
2776 * handles the setting of interrupts for this specific MEC. All other
2777 * pipes' interrupts are set by amdkfd.
2778 */
2779
2780 if (me == 1) {
2781 switch (pipe) {
2782 case 0:
2783 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
2784 break;
2785 case 1:
2786 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
2787 break;
2788 case 2:
2789 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
2790 break;
2791 case 3:
2792 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
2793 break;
2794 default:
2795 DRM_DEBUG("invalid pipe %d\n", pipe);
2796 return;
2797 }
2798 } else {
2799 DRM_DEBUG("invalid me %d\n", me);
2800 return;
2801 }
2802
2803 switch (state) {
2804 case AMDGPU_IRQ_STATE_DISABLE:
2805 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
2806 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
2807 TIME_STAMP_INT_ENABLE, 0);
2808 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
2809 break;
2810 case AMDGPU_IRQ_STATE_ENABLE:
2811 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
2812 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
2813 TIME_STAMP_INT_ENABLE, 1);
2814 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
2815 break;
2816 default:
2817 break;
2818 }
2819}
2820
2821static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
2822 struct amdgpu_irq_src *source,
2823 unsigned type,
2824 enum amdgpu_interrupt_state state)
2825{
2826 int i, num_xcc;
2827
2828 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2829 switch (state) {
2830 case AMDGPU_IRQ_STATE_DISABLE:
2831 case AMDGPU_IRQ_STATE_ENABLE:
2832 for (i = 0; i < num_xcc; i++)
2833 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
2834 PRIV_REG_INT_ENABLE,
2835 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2836 break;
2837 default:
2838 break;
2839 }
2840
2841 return 0;
2842}
2843
2844static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
2845 struct amdgpu_irq_src *source,
2846 unsigned type,
2847 enum amdgpu_interrupt_state state)
2848{
2849 int i, num_xcc;
2850
2851 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2852 switch (state) {
2853 case AMDGPU_IRQ_STATE_DISABLE:
2854 case AMDGPU_IRQ_STATE_ENABLE:
2855 for (i = 0; i < num_xcc; i++)
2856 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
2857 PRIV_INSTR_INT_ENABLE,
2858 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2859 break;
2860 default:
2861 break;
2862 }
2863
2864 return 0;
2865}
2866
2867static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
2868 struct amdgpu_irq_src *src,
2869 unsigned type,
2870 enum amdgpu_interrupt_state state)
2871{
2872 int i, num_xcc;
2873
2874 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2875 for (i = 0; i < num_xcc; i++) {
2876 switch (type) {
2877 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
2878 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2879 adev, 1, 0, state, i);
2880 break;
2881 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
2882 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2883 adev, 1, 1, state, i);
2884 break;
2885 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
2886 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2887 adev, 1, 2, state, i);
2888 break;
2889 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
2890 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2891 adev, 1, 3, state, i);
2892 break;
2893 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
2894 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2895 adev, 2, 0, state, i);
2896 break;
2897 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
2898 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2899 adev, 2, 1, state, i);
2900 break;
2901 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
2902 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2903 adev, 2, 2, state, i);
2904 break;
2905 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
2906 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2907 adev, 2, 3, state, i);
2908 break;
2909 default:
2910 break;
2911 }
2912 }
2913
2914 return 0;
2915}
2916
2917static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
2918 struct amdgpu_irq_src *source,
2919 struct amdgpu_iv_entry *entry)
2920{
2921 int i, xcc_id;
2922 u8 me_id, pipe_id, queue_id;
2923 struct amdgpu_ring *ring;
2924
2925 DRM_DEBUG("IH: CP EOP\n");
2926 me_id = (entry->ring_id & 0x0c) >> 2;
2927 pipe_id = (entry->ring_id & 0x03) >> 0;
2928 queue_id = (entry->ring_id & 0x70) >> 4;
2929
2930 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
2931
2932 if (xcc_id == -EINVAL)
2933 return -EINVAL;
2934
2935 switch (me_id) {
2936 case 0:
2937 case 1:
2938 case 2:
2939 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2940 ring = &adev->gfx.compute_ring
2941 [i +
2942 xcc_id * adev->gfx.num_compute_rings];
2943 /* Per-queue interrupt is supported for MEC starting from VI.
2944 * The interrupt can only be enabled/disabled per pipe instead of per queue.
2945 */
2946
2947 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
2948 amdgpu_fence_process(ring);
2949 }
2950 break;
2951 }
2952 return 0;
2953}
2954
2955static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
2956 struct amdgpu_iv_entry *entry)
2957{
2958 u8 me_id, pipe_id, queue_id;
2959 struct amdgpu_ring *ring;
2960 int i, xcc_id;
2961
2962 me_id = (entry->ring_id & 0x0c) >> 2;
2963 pipe_id = (entry->ring_id & 0x03) >> 0;
2964 queue_id = (entry->ring_id & 0x70) >> 4;
2965
2966 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
2967
2968 if (xcc_id == -EINVAL)
2969 return;
2970
2971 switch (me_id) {
2972 case 0:
2973 case 1:
2974 case 2:
2975 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2976 ring = &adev->gfx.compute_ring
2977 [i +
2978 xcc_id * adev->gfx.num_compute_rings];
2979 if (ring->me == me_id && ring->pipe == pipe_id &&
2980 ring->queue == queue_id)
2981 drm_sched_fault(&ring->sched);
2982 }
2983 break;
2984 }
2985}
2986
2987static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
2988 struct amdgpu_irq_src *source,
2989 struct amdgpu_iv_entry *entry)
2990{
2991 DRM_ERROR("Illegal register access in command stream\n");
2992 gfx_v9_4_3_fault(adev, entry);
2993 return 0;
2994}
2995
2996static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
2997 struct amdgpu_irq_src *source,
2998 struct amdgpu_iv_entry *entry)
2999{
3000 DRM_ERROR("Illegal instruction in command stream\n");
3001 gfx_v9_4_3_fault(adev, entry);
3002 return 0;
3003}
3004
3005static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
3006{
3007 const unsigned int cp_coher_cntl =
3008 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
3009 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
3010 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
3011 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
3012 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
3013
3014 /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
3015 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
3016 amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
3017 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
3018 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
3019 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3020 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
3021 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3022}
3023
3024static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
3025 uint32_t pipe, bool enable)
3026{
3027 struct amdgpu_device *adev = ring->adev;
3028 uint32_t val;
3029 uint32_t wcl_cs_reg;
3030
3031 /* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
3032 val = enable ? 0x1 : 0x7f;
3033
3034 switch (pipe) {
3035 case 0:
3036 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0);
3037 break;
3038 case 1:
3039 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1);
3040 break;
3041 case 2:
3042 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2);
3043 break;
3044 case 3:
3045 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3);
3046 break;
3047 default:
3048 DRM_DEBUG("invalid pipe %d\n", pipe);
3049 return;
3050 }
3051
3052 amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
3053
3054}
3055static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
3056{
3057 struct amdgpu_device *adev = ring->adev;
3058 uint32_t val;
3059 int i;
3060
3061 /* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
3062 * number of gfx waves. Setting 5 bit will make sure gfx only gets
3063 * around 25% of gpu resources.
3064 */
3065 val = enable ? 0x1f : 0x07ffffff;
3066 amdgpu_ring_emit_wreg(ring,
3067 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX),
3068 val);
3069
3070 /* Restrict waves for normal/low priority compute queues as well
3071 * to get best QoS for high priority compute jobs.
3072 *
3073 * amdgpu controls only 1st ME(0-3 CS pipes).
3074 */
3075 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3076 if (i != ring->pipe)
3077 gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
3078
3079 }
3080}
3081
3082enum amdgpu_gfx_cp_ras_mem_id {
3083 AMDGPU_GFX_CP_MEM1 = 1,
3084 AMDGPU_GFX_CP_MEM2,
3085 AMDGPU_GFX_CP_MEM3,
3086 AMDGPU_GFX_CP_MEM4,
3087 AMDGPU_GFX_CP_MEM5,
3088};
3089
3090enum amdgpu_gfx_gcea_ras_mem_id {
3091 AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4,
3092 AMDGPU_GFX_GCEA_IORD_CMDMEM,
3093 AMDGPU_GFX_GCEA_GMIWR_CMDMEM,
3094 AMDGPU_GFX_GCEA_GMIRD_CMDMEM,
3095 AMDGPU_GFX_GCEA_DRAMWR_CMDMEM,
3096 AMDGPU_GFX_GCEA_DRAMRD_CMDMEM,
3097 AMDGPU_GFX_GCEA_MAM_DMEM0,
3098 AMDGPU_GFX_GCEA_MAM_DMEM1,
3099 AMDGPU_GFX_GCEA_MAM_DMEM2,
3100 AMDGPU_GFX_GCEA_MAM_DMEM3,
3101 AMDGPU_GFX_GCEA_MAM_AMEM0,
3102 AMDGPU_GFX_GCEA_MAM_AMEM1,
3103 AMDGPU_GFX_GCEA_MAM_AMEM2,
3104 AMDGPU_GFX_GCEA_MAM_AMEM3,
3105 AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER,
3106 AMDGPU_GFX_GCEA_WRET_TAGMEM,
3107 AMDGPU_GFX_GCEA_RRET_TAGMEM,
3108 AMDGPU_GFX_GCEA_IOWR_DATAMEM,
3109 AMDGPU_GFX_GCEA_GMIWR_DATAMEM,
3110 AMDGPU_GFX_GCEA_DRAM_DATAMEM,
3111};
3112
3113enum amdgpu_gfx_gc_cane_ras_mem_id {
3114 AMDGPU_GFX_GC_CANE_MEM0 = 0,
3115};
3116
3117enum amdgpu_gfx_gcutcl2_ras_mem_id {
3118 AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160,
3119};
3120
3121enum amdgpu_gfx_gds_ras_mem_id {
3122 AMDGPU_GFX_GDS_MEM0 = 0,
3123};
3124
3125enum amdgpu_gfx_lds_ras_mem_id {
3126 AMDGPU_GFX_LDS_BANK0 = 0,
3127 AMDGPU_GFX_LDS_BANK1,
3128 AMDGPU_GFX_LDS_BANK2,
3129 AMDGPU_GFX_LDS_BANK3,
3130 AMDGPU_GFX_LDS_BANK4,
3131 AMDGPU_GFX_LDS_BANK5,
3132 AMDGPU_GFX_LDS_BANK6,
3133 AMDGPU_GFX_LDS_BANK7,
3134 AMDGPU_GFX_LDS_BANK8,
3135 AMDGPU_GFX_LDS_BANK9,
3136 AMDGPU_GFX_LDS_BANK10,
3137 AMDGPU_GFX_LDS_BANK11,
3138 AMDGPU_GFX_LDS_BANK12,
3139 AMDGPU_GFX_LDS_BANK13,
3140 AMDGPU_GFX_LDS_BANK14,
3141 AMDGPU_GFX_LDS_BANK15,
3142 AMDGPU_GFX_LDS_BANK16,
3143 AMDGPU_GFX_LDS_BANK17,
3144 AMDGPU_GFX_LDS_BANK18,
3145 AMDGPU_GFX_LDS_BANK19,
3146 AMDGPU_GFX_LDS_BANK20,
3147 AMDGPU_GFX_LDS_BANK21,
3148 AMDGPU_GFX_LDS_BANK22,
3149 AMDGPU_GFX_LDS_BANK23,
3150 AMDGPU_GFX_LDS_BANK24,
3151 AMDGPU_GFX_LDS_BANK25,
3152 AMDGPU_GFX_LDS_BANK26,
3153 AMDGPU_GFX_LDS_BANK27,
3154 AMDGPU_GFX_LDS_BANK28,
3155 AMDGPU_GFX_LDS_BANK29,
3156 AMDGPU_GFX_LDS_BANK30,
3157 AMDGPU_GFX_LDS_BANK31,
3158 AMDGPU_GFX_LDS_SP_BUFFER_A,
3159 AMDGPU_GFX_LDS_SP_BUFFER_B,
3160};
3161
3162enum amdgpu_gfx_rlc_ras_mem_id {
3163 AMDGPU_GFX_RLC_GPMF32 = 1,
3164 AMDGPU_GFX_RLC_RLCVF32,
3165 AMDGPU_GFX_RLC_SCRATCH,
3166 AMDGPU_GFX_RLC_SRM_ARAM,
3167 AMDGPU_GFX_RLC_SRM_DRAM,
3168 AMDGPU_GFX_RLC_TCTAG,
3169 AMDGPU_GFX_RLC_SPM_SE,
3170 AMDGPU_GFX_RLC_SPM_GRBMT,
3171};
3172
3173enum amdgpu_gfx_sp_ras_mem_id {
3174 AMDGPU_GFX_SP_SIMDID0 = 0,
3175};
3176
3177enum amdgpu_gfx_spi_ras_mem_id {
3178 AMDGPU_GFX_SPI_MEM0 = 0,
3179 AMDGPU_GFX_SPI_MEM1,
3180 AMDGPU_GFX_SPI_MEM2,
3181 AMDGPU_GFX_SPI_MEM3,
3182};
3183
3184enum amdgpu_gfx_sqc_ras_mem_id {
3185 AMDGPU_GFX_SQC_INST_CACHE_A = 100,
3186 AMDGPU_GFX_SQC_INST_CACHE_B = 101,
3187 AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102,
3188 AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103,
3189 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104,
3190 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105,
3191 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106,
3192 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107,
3193 AMDGPU_GFX_SQC_DATA_CACHE_A = 200,
3194 AMDGPU_GFX_SQC_DATA_CACHE_B = 201,
3195 AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202,
3196 AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203,
3197 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204,
3198 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205,
3199 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206,
3200 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207,
3201 AMDGPU_GFX_SQC_DIRTY_BIT_A = 208,
3202 AMDGPU_GFX_SQC_DIRTY_BIT_B = 209,
3203 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210,
3204 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211,
3205 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212,
3206 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213,
3207 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108,
3208};
3209
3210enum amdgpu_gfx_sq_ras_mem_id {
3211 AMDGPU_GFX_SQ_SGPR_MEM0 = 0,
3212 AMDGPU_GFX_SQ_SGPR_MEM1,
3213 AMDGPU_GFX_SQ_SGPR_MEM2,
3214 AMDGPU_GFX_SQ_SGPR_MEM3,
3215};
3216
3217enum amdgpu_gfx_ta_ras_mem_id {
3218 AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1,
3219 AMDGPU_GFX_TA_FS_AFIFO_RAM_HI,
3220 AMDGPU_GFX_TA_FS_CFIFO_RAM,
3221 AMDGPU_GFX_TA_FSX_LFIFO,
3222 AMDGPU_GFX_TA_FS_DFIFO_RAM,
3223};
3224
3225enum amdgpu_gfx_tcc_ras_mem_id {
3226 AMDGPU_GFX_TCC_MEM1 = 1,
3227};
3228
3229enum amdgpu_gfx_tca_ras_mem_id {
3230 AMDGPU_GFX_TCA_MEM1 = 1,
3231};
3232
3233enum amdgpu_gfx_tci_ras_mem_id {
3234 AMDGPU_GFX_TCIW_MEM = 1,
3235};
3236
3237enum amdgpu_gfx_tcp_ras_mem_id {
3238 AMDGPU_GFX_TCP_LFIFO0 = 1,
3239 AMDGPU_GFX_TCP_SET0BANK0_RAM,
3240 AMDGPU_GFX_TCP_SET0BANK1_RAM,
3241 AMDGPU_GFX_TCP_SET0BANK2_RAM,
3242 AMDGPU_GFX_TCP_SET0BANK3_RAM,
3243 AMDGPU_GFX_TCP_SET1BANK0_RAM,
3244 AMDGPU_GFX_TCP_SET1BANK1_RAM,
3245 AMDGPU_GFX_TCP_SET1BANK2_RAM,
3246 AMDGPU_GFX_TCP_SET1BANK3_RAM,
3247 AMDGPU_GFX_TCP_SET2BANK0_RAM,
3248 AMDGPU_GFX_TCP_SET2BANK1_RAM,
3249 AMDGPU_GFX_TCP_SET2BANK2_RAM,
3250 AMDGPU_GFX_TCP_SET2BANK3_RAM,
3251 AMDGPU_GFX_TCP_SET3BANK0_RAM,
3252 AMDGPU_GFX_TCP_SET3BANK1_RAM,
3253 AMDGPU_GFX_TCP_SET3BANK2_RAM,
3254 AMDGPU_GFX_TCP_SET3BANK3_RAM,
3255 AMDGPU_GFX_TCP_VM_FIFO,
3256 AMDGPU_GFX_TCP_DB_TAGRAM0,
3257 AMDGPU_GFX_TCP_DB_TAGRAM1,
3258 AMDGPU_GFX_TCP_DB_TAGRAM2,
3259 AMDGPU_GFX_TCP_DB_TAGRAM3,
3260 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0,
3261 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1,
3262 AMDGPU_GFX_TCP_CMD_FIFO,
3263};
3264
3265enum amdgpu_gfx_td_ras_mem_id {
3266 AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1,
3267 AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM,
3268 AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM,
3269};
3270
3271enum amdgpu_gfx_tcx_ras_mem_id {
3272 AMDGPU_GFX_TCX_FIFOD0 = 0,
3273 AMDGPU_GFX_TCX_FIFOD1,
3274 AMDGPU_GFX_TCX_FIFOD2,
3275 AMDGPU_GFX_TCX_FIFOD3,
3276 AMDGPU_GFX_TCX_FIFOD4,
3277 AMDGPU_GFX_TCX_FIFOD5,
3278 AMDGPU_GFX_TCX_FIFOD6,
3279 AMDGPU_GFX_TCX_FIFOD7,
3280 AMDGPU_GFX_TCX_FIFOB0,
3281 AMDGPU_GFX_TCX_FIFOB1,
3282 AMDGPU_GFX_TCX_FIFOB2,
3283 AMDGPU_GFX_TCX_FIFOB3,
3284 AMDGPU_GFX_TCX_FIFOB4,
3285 AMDGPU_GFX_TCX_FIFOB5,
3286 AMDGPU_GFX_TCX_FIFOB6,
3287 AMDGPU_GFX_TCX_FIFOB7,
3288 AMDGPU_GFX_TCX_FIFOA0,
3289 AMDGPU_GFX_TCX_FIFOA1,
3290 AMDGPU_GFX_TCX_FIFOA2,
3291 AMDGPU_GFX_TCX_FIFOA3,
3292 AMDGPU_GFX_TCX_FIFOA4,
3293 AMDGPU_GFX_TCX_FIFOA5,
3294 AMDGPU_GFX_TCX_FIFOA6,
3295 AMDGPU_GFX_TCX_FIFOA7,
3296 AMDGPU_GFX_TCX_CFIFO0,
3297 AMDGPU_GFX_TCX_CFIFO1,
3298 AMDGPU_GFX_TCX_CFIFO2,
3299 AMDGPU_GFX_TCX_CFIFO3,
3300 AMDGPU_GFX_TCX_CFIFO4,
3301 AMDGPU_GFX_TCX_CFIFO5,
3302 AMDGPU_GFX_TCX_CFIFO6,
3303 AMDGPU_GFX_TCX_CFIFO7,
3304 AMDGPU_GFX_TCX_FIFO_ACKB0,
3305 AMDGPU_GFX_TCX_FIFO_ACKB1,
3306 AMDGPU_GFX_TCX_FIFO_ACKB2,
3307 AMDGPU_GFX_TCX_FIFO_ACKB3,
3308 AMDGPU_GFX_TCX_FIFO_ACKB4,
3309 AMDGPU_GFX_TCX_FIFO_ACKB5,
3310 AMDGPU_GFX_TCX_FIFO_ACKB6,
3311 AMDGPU_GFX_TCX_FIFO_ACKB7,
3312 AMDGPU_GFX_TCX_FIFO_ACKD0,
3313 AMDGPU_GFX_TCX_FIFO_ACKD1,
3314 AMDGPU_GFX_TCX_FIFO_ACKD2,
3315 AMDGPU_GFX_TCX_FIFO_ACKD3,
3316 AMDGPU_GFX_TCX_FIFO_ACKD4,
3317 AMDGPU_GFX_TCX_FIFO_ACKD5,
3318 AMDGPU_GFX_TCX_FIFO_ACKD6,
3319 AMDGPU_GFX_TCX_FIFO_ACKD7,
3320 AMDGPU_GFX_TCX_DST_FIFOA0,
3321 AMDGPU_GFX_TCX_DST_FIFOA1,
3322 AMDGPU_GFX_TCX_DST_FIFOA2,
3323 AMDGPU_GFX_TCX_DST_FIFOA3,
3324 AMDGPU_GFX_TCX_DST_FIFOA4,
3325 AMDGPU_GFX_TCX_DST_FIFOA5,
3326 AMDGPU_GFX_TCX_DST_FIFOA6,
3327 AMDGPU_GFX_TCX_DST_FIFOA7,
3328 AMDGPU_GFX_TCX_DST_FIFOB0,
3329 AMDGPU_GFX_TCX_DST_FIFOB1,
3330 AMDGPU_GFX_TCX_DST_FIFOB2,
3331 AMDGPU_GFX_TCX_DST_FIFOB3,
3332 AMDGPU_GFX_TCX_DST_FIFOB4,
3333 AMDGPU_GFX_TCX_DST_FIFOB5,
3334 AMDGPU_GFX_TCX_DST_FIFOB6,
3335 AMDGPU_GFX_TCX_DST_FIFOB7,
3336 AMDGPU_GFX_TCX_DST_FIFOD0,
3337 AMDGPU_GFX_TCX_DST_FIFOD1,
3338 AMDGPU_GFX_TCX_DST_FIFOD2,
3339 AMDGPU_GFX_TCX_DST_FIFOD3,
3340 AMDGPU_GFX_TCX_DST_FIFOD4,
3341 AMDGPU_GFX_TCX_DST_FIFOD5,
3342 AMDGPU_GFX_TCX_DST_FIFOD6,
3343 AMDGPU_GFX_TCX_DST_FIFOD7,
3344 AMDGPU_GFX_TCX_DST_FIFO_ACKB0,
3345 AMDGPU_GFX_TCX_DST_FIFO_ACKB1,
3346 AMDGPU_GFX_TCX_DST_FIFO_ACKB2,
3347 AMDGPU_GFX_TCX_DST_FIFO_ACKB3,
3348 AMDGPU_GFX_TCX_DST_FIFO_ACKB4,
3349 AMDGPU_GFX_TCX_DST_FIFO_ACKB5,
3350 AMDGPU_GFX_TCX_DST_FIFO_ACKB6,
3351 AMDGPU_GFX_TCX_DST_FIFO_ACKB7,
3352 AMDGPU_GFX_TCX_DST_FIFO_ACKD0,
3353 AMDGPU_GFX_TCX_DST_FIFO_ACKD1,
3354 AMDGPU_GFX_TCX_DST_FIFO_ACKD2,
3355 AMDGPU_GFX_TCX_DST_FIFO_ACKD3,
3356 AMDGPU_GFX_TCX_DST_FIFO_ACKD4,
3357 AMDGPU_GFX_TCX_DST_FIFO_ACKD5,
3358 AMDGPU_GFX_TCX_DST_FIFO_ACKD6,
3359 AMDGPU_GFX_TCX_DST_FIFO_ACKD7,
3360};
3361
3362enum amdgpu_gfx_atc_l2_ras_mem_id {
3363 AMDGPU_GFX_ATC_L2_MEM0 = 0,
3364};
3365
3366enum amdgpu_gfx_utcl2_ras_mem_id {
3367 AMDGPU_GFX_UTCL2_MEM0 = 0,
3368};
3369
3370enum amdgpu_gfx_vml2_ras_mem_id {
3371 AMDGPU_GFX_VML2_MEM0 = 0,
3372};
3373
3374enum amdgpu_gfx_vml2_walker_ras_mem_id {
3375 AMDGPU_GFX_VML2_WALKER_MEM0 = 0,
3376};
3377
3378static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = {
3379 {AMDGPU_GFX_CP_MEM1, "CP_MEM1"},
3380 {AMDGPU_GFX_CP_MEM2, "CP_MEM2"},
3381 {AMDGPU_GFX_CP_MEM3, "CP_MEM3"},
3382 {AMDGPU_GFX_CP_MEM4, "CP_MEM4"},
3383 {AMDGPU_GFX_CP_MEM5, "CP_MEM5"},
3384};
3385
3386static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = {
3387 {AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"},
3388 {AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"},
3389 {AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"},
3390 {AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"},
3391 {AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"},
3392 {AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"},
3393 {AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"},
3394 {AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"},
3395 {AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"},
3396 {AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"},
3397 {AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"},
3398 {AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"},
3399 {AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"},
3400 {AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"},
3401 {AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"},
3402 {AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"},
3403 {AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"},
3404 {AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"},
3405 {AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"},
3406 {AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"},
3407};
3408
3409static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = {
3410 {AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"},
3411};
3412
3413static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = {
3414 {AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"},
3415};
3416
3417static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = {
3418 {AMDGPU_GFX_GDS_MEM0, "GDS_MEM"},
3419};
3420
3421static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = {
3422 {AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"},
3423 {AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"},
3424 {AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"},
3425 {AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"},
3426 {AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"},
3427 {AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"},
3428 {AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"},
3429 {AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"},
3430 {AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"},
3431 {AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"},
3432 {AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"},
3433 {AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"},
3434 {AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"},
3435 {AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"},
3436 {AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"},
3437 {AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"},
3438 {AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"},
3439 {AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"},
3440 {AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"},
3441 {AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"},
3442 {AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"},
3443 {AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"},
3444 {AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"},
3445 {AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"},
3446 {AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"},
3447 {AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"},
3448 {AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"},
3449 {AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"},
3450 {AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"},
3451 {AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"},
3452 {AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"},
3453 {AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"},
3454 {AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"},
3455 {AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"},
3456};
3457
3458static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = {
3459 {AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"},
3460 {AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"},
3461 {AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"},
3462 {AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"},
3463 {AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"},
3464 {AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"},
3465 {AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"},
3466 {AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"},
3467};
3468
3469static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = {
3470 {AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"},
3471};
3472
3473static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = {
3474 {AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"},
3475 {AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"},
3476 {AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"},
3477 {AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"},
3478};
3479
3480static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = {
3481 {AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"},
3482 {AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"},
3483 {AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"},
3484 {AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"},
3485 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"},
3486 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"},
3487 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"},
3488 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"},
3489 {AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"},
3490 {AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"},
3491 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"},
3492 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"},
3493 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"},
3494 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"},
3495 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"},
3496 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"},
3497 {AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"},
3498 {AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"},
3499 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"},
3500 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"},
3501 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"},
3502 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"},
3503 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"},
3504};
3505
3506static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = {
3507 {AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"},
3508 {AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"},
3509 {AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"},
3510 {AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"},
3511};
3512
3513static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = {
3514 {AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"},
3515 {AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"},
3516 {AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"},
3517 {AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"},
3518 {AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"},
3519};
3520
3521static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = {
3522 {AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"},
3523};
3524
3525static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = {
3526 {AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"},
3527};
3528
3529static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = {
3530 {AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"},
3531};
3532
3533static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = {
3534 {AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"},
3535 {AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"},
3536 {AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"},
3537 {AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"},
3538 {AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"},
3539 {AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"},
3540 {AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"},
3541 {AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"},
3542 {AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"},
3543 {AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"},
3544 {AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"},
3545 {AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"},
3546 {AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"},
3547 {AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"},
3548 {AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"},
3549 {AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"},
3550 {AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"},
3551 {AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"},
3552 {AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"},
3553 {AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"},
3554 {AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"},
3555 {AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"},
3556 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"},
3557 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"},
3558 {AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"},
3559};
3560
3561static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = {
3562 {AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"},
3563 {AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"},
3564 {AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"},
3565};
3566
3567static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = {
3568 {AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"},
3569 {AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"},
3570 {AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"},
3571 {AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"},
3572 {AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"},
3573 {AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"},
3574 {AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"},
3575 {AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"},
3576 {AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"},
3577 {AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"},
3578 {AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"},
3579 {AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"},
3580 {AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"},
3581 {AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"},
3582 {AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"},
3583 {AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"},
3584 {AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"},
3585 {AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"},
3586 {AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"},
3587 {AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"},
3588 {AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"},
3589 {AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"},
3590 {AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"},
3591 {AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"},
3592 {AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"},
3593 {AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"},
3594 {AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"},
3595 {AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"},
3596 {AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"},
3597 {AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"},
3598 {AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"},
3599 {AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"},
3600 {AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"},
3601 {AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"},
3602 {AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"},
3603 {AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"},
3604 {AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"},
3605 {AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"},
3606 {AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"},
3607 {AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"},
3608 {AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"},
3609 {AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"},
3610 {AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"},
3611 {AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"},
3612 {AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"},
3613 {AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"},
3614 {AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"},
3615 {AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"},
3616 {AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"},
3617 {AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"},
3618 {AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"},
3619 {AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"},
3620 {AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"},
3621 {AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"},
3622 {AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"},
3623 {AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"},
3624 {AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"},
3625 {AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"},
3626 {AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"},
3627 {AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"},
3628 {AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"},
3629 {AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"},
3630 {AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"},
3631 {AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"},
3632 {AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"},
3633 {AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"},
3634 {AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"},
3635 {AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"},
3636 {AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"},
3637 {AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"},
3638 {AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"},
3639 {AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"},
3640 {AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"},
3641 {AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"},
3642 {AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"},
3643 {AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"},
3644 {AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"},
3645 {AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"},
3646 {AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"},
3647 {AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"},
3648 {AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"},
3649 {AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"},
3650 {AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"},
3651 {AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"},
3652 {AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"},
3653 {AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"},
3654 {AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"},
3655 {AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"},
3656};
3657
3658static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = {
3659 {AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"},
3660};
3661
3662static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = {
3663 {AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"},
3664};
3665
3666static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = {
3667 {AMDGPU_GFX_VML2_MEM, "VML2_MEM"},
3668};
3669
3670static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = {
3671 {AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"},
3672};
3673
3674static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = {
3675 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list)
3676 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list)
3677 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list)
3678 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list)
3679 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list)
3680 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list)
3681 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list)
3682 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list)
3683 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list)
3684 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list)
3685 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list)
3686 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list)
3687 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list)
3688 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list)
3689 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list)
3690 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list)
3691 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list)
3692 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list)
3693 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list)
3694 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list)
3695 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list)
3696 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list)
3697};
3698
3699static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = {
3700 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH),
3701 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
3702 AMDGPU_GFX_RLC_MEM, 1},
3703 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI),
3704 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
3705 AMDGPU_GFX_CP_MEM, 1},
3706 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI),
3707 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
3708 AMDGPU_GFX_CP_MEM, 1},
3709 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI),
3710 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
3711 AMDGPU_GFX_CP_MEM, 1},
3712 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI),
3713 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
3714 AMDGPU_GFX_GDS_MEM, 1},
3715 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI),
3716 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
3717 AMDGPU_GFX_GC_CANE_MEM, 1},
3718 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI),
3719 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
3720 AMDGPU_GFX_SPI_MEM, 1},
3721 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI),
3722 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
3723 AMDGPU_GFX_SP_MEM, 4},
3724 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI),
3725 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
3726 AMDGPU_GFX_SP_MEM, 4},
3727 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI),
3728 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
3729 AMDGPU_GFX_SQ_MEM, 4},
3730 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI),
3731 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
3732 AMDGPU_GFX_SQC_MEM, 4},
3733 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI),
3734 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
3735 AMDGPU_GFX_TCX_MEM, 1},
3736 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI),
3737 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
3738 AMDGPU_GFX_TCC_MEM, 1},
3739 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI),
3740 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
3741 AMDGPU_GFX_TA_MEM, 4},
3742 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG),
3743 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
3744 AMDGPU_GFX_TCI_MEM, 1},
3745 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG),
3746 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
3747 AMDGPU_GFX_TCP_MEM, 4},
3748 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI),
3749 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
3750 AMDGPU_GFX_TD_MEM, 4},
3751 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI),
3752 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
3753 AMDGPU_GFX_GCEA_MEM, 1},
3754 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI),
3755 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
3756 AMDGPU_GFX_LDS_MEM, 4},
3757};
3758
3759static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
3760 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH),
3761 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
3762 AMDGPU_GFX_RLC_MEM, 1},
3763 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI),
3764 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
3765 AMDGPU_GFX_CP_MEM, 1},
3766 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI),
3767 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
3768 AMDGPU_GFX_CP_MEM, 1},
3769 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI),
3770 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
3771 AMDGPU_GFX_CP_MEM, 1},
3772 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI),
3773 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
3774 AMDGPU_GFX_GDS_MEM, 1},
3775 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI),
3776 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
3777 AMDGPU_GFX_GC_CANE_MEM, 1},
3778 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI),
3779 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
3780 AMDGPU_GFX_SPI_MEM, 1},
3781 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI),
3782 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
3783 AMDGPU_GFX_SP_MEM, 4},
3784 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI),
3785 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
3786 AMDGPU_GFX_SP_MEM, 4},
3787 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI),
3788 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
3789 AMDGPU_GFX_SQ_MEM, 4},
3790 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI),
3791 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
3792 AMDGPU_GFX_SQC_MEM, 4},
3793 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI),
3794 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
3795 AMDGPU_GFX_TCX_MEM, 1},
3796 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI),
3797 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
3798 AMDGPU_GFX_TCC_MEM, 1},
3799 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI),
3800 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
3801 AMDGPU_GFX_TA_MEM, 4},
3802 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG),
3803 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
3804 AMDGPU_GFX_TCI_MEM, 1},
3805 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG),
3806 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
3807 AMDGPU_GFX_TCP_MEM, 4},
3808 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI),
3809 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
3810 AMDGPU_GFX_TD_MEM, 4},
3811 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI),
3812 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"},
3813 AMDGPU_GFX_TCA_MEM, 1},
3814 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI),
3815 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
3816 AMDGPU_GFX_GCEA_MEM, 1},
3817 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI),
3818 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
3819 AMDGPU_GFX_LDS_MEM, 4},
3820};
3821
3822static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
3823 void *ras_error_status, int xcc_id)
3824{
3825 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
3826 unsigned long ce_count = 0, ue_count = 0;
3827 uint32_t i, j, k;
3828
3829 /* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */
3830 struct amdgpu_smuio_mcm_config_info mcm_info = {
3831 .socket_id = adev->smuio.funcs->get_socket_id(adev),
3832 .die_id = xcc_id & 0x01 ? 1 : 0,
3833 };
3834
3835 mutex_lock(&adev->grbm_idx_mutex);
3836
3837 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
3838 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
3839 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
3840 /* no need to select if instance number is 1 */
3841 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
3842 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
3843 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
3844
3845 amdgpu_ras_inst_query_ras_error_count(adev,
3846 &(gfx_v9_4_3_ce_reg_list[i].reg_entry),
3847 1,
3848 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent,
3849 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size,
3850 GET_INST(GC, xcc_id),
3851 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
3852 &ce_count);
3853
3854 amdgpu_ras_inst_query_ras_error_count(adev,
3855 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
3856 1,
3857 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
3858 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
3859 GET_INST(GC, xcc_id),
3860 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
3861 &ue_count);
3862 }
3863 }
3864 }
3865
3866 /* handle extra register entries of UE */
3867 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
3868 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
3869 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
3870 /* no need to select if instance number is 1 */
3871 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
3872 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
3873 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
3874
3875 amdgpu_ras_inst_query_ras_error_count(adev,
3876 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
3877 1,
3878 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
3879 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
3880 GET_INST(GC, xcc_id),
3881 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
3882 &ue_count);
3883 }
3884 }
3885 }
3886
3887 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3888 xcc_id);
3889 mutex_unlock(&adev->grbm_idx_mutex);
3890
3891 /* the caller should make sure initialize value of
3892 * err_data->ue_count and err_data->ce_count
3893 */
3894 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
3895 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
3896}
3897
3898static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
3899 void *ras_error_status, int xcc_id)
3900{
3901 uint32_t i, j, k;
3902
3903 mutex_lock(&adev->grbm_idx_mutex);
3904
3905 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
3906 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
3907 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
3908 /* no need to select if instance number is 1 */
3909 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
3910 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
3911 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
3912
3913 amdgpu_ras_inst_reset_ras_error_count(adev,
3914 &(gfx_v9_4_3_ce_reg_list[i].reg_entry),
3915 1,
3916 GET_INST(GC, xcc_id));
3917
3918 amdgpu_ras_inst_reset_ras_error_count(adev,
3919 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
3920 1,
3921 GET_INST(GC, xcc_id));
3922 }
3923 }
3924 }
3925
3926 /* handle extra register entries of UE */
3927 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
3928 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
3929 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
3930 /* no need to select if instance number is 1 */
3931 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
3932 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
3933 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
3934
3935 amdgpu_ras_inst_reset_ras_error_count(adev,
3936 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
3937 1,
3938 GET_INST(GC, xcc_id));
3939 }
3940 }
3941 }
3942
3943 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3944 xcc_id);
3945 mutex_unlock(&adev->grbm_idx_mutex);
3946}
3947
3948static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
3949 void *ras_error_status, int xcc_id)
3950{
3951 uint32_t i;
3952 uint32_t data;
3953
3954 if (amdgpu_sriov_vf(adev))
3955 return;
3956
3957 data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
3958 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
3959 amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
3960
3961 if (amdgpu_watchdog_timer.timeout_fatal_disable &&
3962 (amdgpu_watchdog_timer.period < 1 ||
3963 amdgpu_watchdog_timer.period > 0x23)) {
3964 dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
3965 amdgpu_watchdog_timer.period = 0x23;
3966 }
3967 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
3968 amdgpu_watchdog_timer.period);
3969
3970 mutex_lock(&adev->grbm_idx_mutex);
3971 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3972 gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id);
3973 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data);
3974 }
3975 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3976 xcc_id);
3977 mutex_unlock(&adev->grbm_idx_mutex);
3978}
3979
3980static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
3981 void *ras_error_status)
3982{
3983 amdgpu_gfx_ras_error_func(adev, ras_error_status,
3984 gfx_v9_4_3_inst_query_ras_err_count);
3985}
3986
3987static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
3988{
3989 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
3990}
3991
3992static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
3993{
3994 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
3995}
3996
3997static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
3998 .name = "gfx_v9_4_3",
3999 .early_init = gfx_v9_4_3_early_init,
4000 .late_init = gfx_v9_4_3_late_init,
4001 .sw_init = gfx_v9_4_3_sw_init,
4002 .sw_fini = gfx_v9_4_3_sw_fini,
4003 .hw_init = gfx_v9_4_3_hw_init,
4004 .hw_fini = gfx_v9_4_3_hw_fini,
4005 .suspend = gfx_v9_4_3_suspend,
4006 .resume = gfx_v9_4_3_resume,
4007 .is_idle = gfx_v9_4_3_is_idle,
4008 .wait_for_idle = gfx_v9_4_3_wait_for_idle,
4009 .soft_reset = gfx_v9_4_3_soft_reset,
4010 .set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
4011 .set_powergating_state = gfx_v9_4_3_set_powergating_state,
4012 .get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
4013};
4014
4015static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
4016 .type = AMDGPU_RING_TYPE_COMPUTE,
4017 .align_mask = 0xff,
4018 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4019 .support_64bit_ptrs = true,
4020 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4021 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4022 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4023 .emit_frame_size =
4024 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4025 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4026 5 + /* hdp invalidate */
4027 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4028 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4029 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4030 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4031 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
4032 7 + /* gfx_v9_4_3_emit_mem_sync */
4033 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
4034 15, /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
4035 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
4036 .emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
4037 .emit_fence = gfx_v9_4_3_ring_emit_fence,
4038 .emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
4039 .emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
4040 .emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
4041 .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
4042 .test_ring = gfx_v9_4_3_ring_test_ring,
4043 .test_ib = gfx_v9_4_3_ring_test_ib,
4044 .insert_nop = amdgpu_ring_insert_nop,
4045 .pad_ib = amdgpu_ring_generic_pad_ib,
4046 .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4047 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4048 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4049 .emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
4050 .emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
4051};
4052
4053static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
4054 .type = AMDGPU_RING_TYPE_KIQ,
4055 .align_mask = 0xff,
4056 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4057 .support_64bit_ptrs = true,
4058 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4059 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4060 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4061 .emit_frame_size =
4062 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4063 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4064 5 + /* hdp invalidate */
4065 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4066 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4067 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4068 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4069 8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
4070 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
4071 .emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
4072 .test_ring = gfx_v9_4_3_ring_test_ring,
4073 .insert_nop = amdgpu_ring_insert_nop,
4074 .pad_ib = amdgpu_ring_generic_pad_ib,
4075 .emit_rreg = gfx_v9_4_3_ring_emit_rreg,
4076 .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4077 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4078 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4079};
4080
4081static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
4082{
4083 int i, j, num_xcc;
4084
4085 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4086 for (i = 0; i < num_xcc; i++) {
4087 adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
4088
4089 for (j = 0; j < adev->gfx.num_compute_rings; j++)
4090 adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
4091 = &gfx_v9_4_3_ring_funcs_compute;
4092 }
4093}
4094
4095static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
4096 .set = gfx_v9_4_3_set_eop_interrupt_state,
4097 .process = gfx_v9_4_3_eop_irq,
4098};
4099
4100static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
4101 .set = gfx_v9_4_3_set_priv_reg_fault_state,
4102 .process = gfx_v9_4_3_priv_reg_irq,
4103};
4104
4105static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
4106 .set = gfx_v9_4_3_set_priv_inst_fault_state,
4107 .process = gfx_v9_4_3_priv_inst_irq,
4108};
4109
4110static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
4111{
4112 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4113 adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
4114
4115 adev->gfx.priv_reg_irq.num_types = 1;
4116 adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
4117
4118 adev->gfx.priv_inst_irq.num_types = 1;
4119 adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
4120}
4121
4122static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
4123{
4124 adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
4125}
4126
4127
4128static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
4129{
4130 /* init asci gds info */
4131 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4132 case IP_VERSION(9, 4, 3):
4133 /* 9.4.3 removed all the GDS internal memory,
4134 * only support GWS opcode in kernel, like barrier
4135 * semaphore.etc */
4136 adev->gds.gds_size = 0;
4137 break;
4138 default:
4139 adev->gds.gds_size = 0x10000;
4140 break;
4141 }
4142
4143 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4144 case IP_VERSION(9, 4, 3):
4145 /* deprecated for 9.4.3, no usage at all */
4146 adev->gds.gds_compute_max_wave_id = 0;
4147 break;
4148 default:
4149 /* this really depends on the chip */
4150 adev->gds.gds_compute_max_wave_id = 0x7ff;
4151 break;
4152 }
4153
4154 adev->gds.gws_size = 64;
4155 adev->gds.oa_size = 16;
4156}
4157
4158static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4159 u32 bitmap, int xcc_id)
4160{
4161 u32 data;
4162
4163 if (!bitmap)
4164 return;
4165
4166 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4167 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4168
4169 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
4170}
4171
4172static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
4173{
4174 u32 data, mask;
4175
4176 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
4177 data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
4178
4179 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4180 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4181
4182 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4183
4184 return (~data) & mask;
4185}
4186
4187static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
4188 struct amdgpu_cu_info *cu_info)
4189{
4190 int i, j, k, counter, xcc_id, active_cu_number = 0;
4191 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4192 unsigned disable_masks[4 * 4];
4193
4194 if (!adev || !cu_info)
4195 return -EINVAL;
4196
4197 /*
4198 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
4199 */
4200 if (adev->gfx.config.max_shader_engines *
4201 adev->gfx.config.max_sh_per_se > 16)
4202 return -EINVAL;
4203
4204 amdgpu_gfx_parse_disable_cu(disable_masks,
4205 adev->gfx.config.max_shader_engines,
4206 adev->gfx.config.max_sh_per_se);
4207
4208 mutex_lock(&adev->grbm_idx_mutex);
4209 for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
4210 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4211 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4212 mask = 1;
4213 ao_bitmap = 0;
4214 counter = 0;
4215 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
4216 gfx_v9_4_3_set_user_cu_inactive_bitmap(
4217 adev,
4218 disable_masks[i * adev->gfx.config.max_sh_per_se + j],
4219 xcc_id);
4220 bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
4221
4222 cu_info->bitmap[xcc_id][i][j] = bitmap;
4223
4224 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4225 if (bitmap & mask) {
4226 if (counter < adev->gfx.config.max_cu_per_sh)
4227 ao_bitmap |= mask;
4228 counter++;
4229 }
4230 mask <<= 1;
4231 }
4232 active_cu_number += counter;
4233 if (i < 2 && j < 2)
4234 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4235 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4236 }
4237 }
4238 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4239 xcc_id);
4240 }
4241 mutex_unlock(&adev->grbm_idx_mutex);
4242
4243 cu_info->number = active_cu_number;
4244 cu_info->ao_cu_mask = ao_cu_mask;
4245 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4246
4247 return 0;
4248}
4249
4250const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
4251 .type = AMD_IP_BLOCK_TYPE_GFX,
4252 .major = 9,
4253 .minor = 4,
4254 .rev = 3,
4255 .funcs = &gfx_v9_4_3_ip_funcs,
4256};
4257
4258static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask)
4259{
4260 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4261 uint32_t tmp_mask;
4262 int i, r;
4263
4264 /* TODO : Initialize golden regs */
4265 /* gfx_v9_4_3_init_golden_registers(adev); */
4266
4267 tmp_mask = inst_mask;
4268 for_each_inst(i, tmp_mask)
4269 gfx_v9_4_3_xcc_constants_init(adev, i);
4270
4271 if (!amdgpu_sriov_vf(adev)) {
4272 tmp_mask = inst_mask;
4273 for_each_inst(i, tmp_mask) {
4274 r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
4275 if (r)
4276 return r;
4277 }
4278 }
4279
4280 tmp_mask = inst_mask;
4281 for_each_inst(i, tmp_mask) {
4282 r = gfx_v9_4_3_xcc_cp_resume(adev, i);
4283 if (r)
4284 return r;
4285 }
4286
4287 return 0;
4288}
4289
4290static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask)
4291{
4292 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4293 int i;
4294
4295 for_each_inst(i, inst_mask)
4296 gfx_v9_4_3_xcc_fini(adev, i);
4297
4298 return 0;
4299}
4300
4301struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
4302 .suspend = &gfx_v9_4_3_xcp_suspend,
4303 .resume = &gfx_v9_4_3_xcp_resume
4304};
4305
4306struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = {
4307 .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
4308 .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
4309};
4310
4311static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
4312{
4313 int r;
4314
4315 r = amdgpu_ras_block_late_init(adev, ras_block);
4316 if (r)
4317 return r;
4318
4319 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX,
4320 &gfx_v9_4_3_aca_info,
4321 NULL);
4322 if (r)
4323 goto late_fini;
4324
4325 return 0;
4326
4327late_fini:
4328 amdgpu_ras_block_late_fini(adev, ras_block);
4329
4330 return r;
4331}
4332
4333struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
4334 .ras_block = {
4335 .hw_ops = &gfx_v9_4_3_ras_ops,
4336 .ras_late_init = &gfx_v9_4_3_ras_late_init,
4337 },
4338 .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
4339};