Loading...
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Xiangliang.Yu@amd.com
23 */
24
25#include "amdgpu.h"
26#include "vi.h"
27#include "bif/bif_5_0_d.h"
28#include "bif/bif_5_0_sh_mask.h"
29#include "vid.h"
30#include "gca/gfx_8_0_d.h"
31#include "gca/gfx_8_0_sh_mask.h"
32#include "gmc_v8_0.h"
33#include "gfx_v8_0.h"
34#include "sdma_v3_0.h"
35#include "tonga_ih.h"
36#include "gmc/gmc_8_2_d.h"
37#include "gmc/gmc_8_2_sh_mask.h"
38#include "oss/oss_3_0_d.h"
39#include "oss/oss_3_0_sh_mask.h"
40#include "dce/dce_10_0_d.h"
41#include "dce/dce_10_0_sh_mask.h"
42#include "smu/smu_7_1_3_d.h"
43#include "mxgpu_vi.h"
44
45#include "amdgpu_reset.h"
46
47/* VI golden setting */
48static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
49 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
50 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
51 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
52 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
53 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
54 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
55 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
56 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
57 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
58 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
59 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
60 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
61 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
62 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
63 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
64 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
65 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
66 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
67 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
68 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
69 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
70 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
71 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
72 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
73 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
74 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
75 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
76 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
77 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
78 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
79 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
80 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
81 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
82 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
83 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
84 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
85 mmPCIE_DATA, 0x000f0000, 0x00000000,
86 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
87 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
88 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
89 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
90 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
91 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
92 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
93 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
94 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
95 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
96};
97
98static const u32 xgpu_fiji_golden_settings_a10[] = {
99 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
100 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
101 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
102 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
103 mmFBC_MISC, 0x1f311fff, 0x12300000,
104 mmHDMI_CONTROL, 0x31000111, 0x00000011,
105 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
106 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
107 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
108 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
109 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
110 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
111 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
112 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
113 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
114 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
115 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
116 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
117 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
118 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
119 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
120 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
121 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
122 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
123 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
124};
125
126static const u32 xgpu_fiji_golden_common_all[] = {
127 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
128 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
129 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
130 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
131 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
132 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
133 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
134 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
135 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
136 mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
137};
138
139static const u32 xgpu_tonga_mgcg_cgcg_init[] = {
140 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
141 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
142 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
143 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
144 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
145 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
146 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
147 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
148 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
149 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
150 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
151 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
152 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
153 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
154 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
155 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
156 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
157 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
158 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
159 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
160 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
161 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
162 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
163 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
164 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
165 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
166 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
167 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
168 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
169 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
170 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
171 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
172 mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
173 mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
174 mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
175 mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
176 mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
177 mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
178 mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
179 mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
180 mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
181 mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
182 mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
183 mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
184 mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
185 mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
186 mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
187 mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
188 mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
189 mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
190 mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
191 mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
192 mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
193 mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
194 mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
195 mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
196 mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
197 mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
198 mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
199 mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
200 mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
201 mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
202 mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
203 mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
204 mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
205 mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
206 mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
207 mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
208 mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
209 mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
210 mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
211 mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
212 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
213 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
214 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
215 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
216 mmPCIE_DATA, 0x000f0000, 0x00000000,
217 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
218 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
219 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
220 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
221 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
222 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
223 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
224 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
225 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
226 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
227};
228
229static const u32 xgpu_tonga_golden_settings_a11[] = {
230 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
231 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
232 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
233 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
234 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
235 mmFBC_MISC, 0x1f311fff, 0x12300000,
236 mmGB_GPU_ID, 0x0000000f, 0x00000000,
237 mmHDMI_CONTROL, 0x31000111, 0x00000011,
238 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
239 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
240 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
241 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
242 mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
243 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
244 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
245 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
246 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
247 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
248 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
249 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
250 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
251 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
252 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
253 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
254 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
255 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
256 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
257 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
258 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
259 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
260 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
261 mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
262 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
263 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
264 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
265 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
266 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
267};
268
269static const u32 xgpu_tonga_golden_common_all[] = {
270 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
271 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
272 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
273 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
274 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
275 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
276 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
277};
278
279void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
280{
281 switch (adev->asic_type) {
282 case CHIP_FIJI:
283 amdgpu_device_program_register_sequence(adev,
284 xgpu_fiji_mgcg_cgcg_init,
285 ARRAY_SIZE(
286 xgpu_fiji_mgcg_cgcg_init));
287 amdgpu_device_program_register_sequence(adev,
288 xgpu_fiji_golden_settings_a10,
289 ARRAY_SIZE(
290 xgpu_fiji_golden_settings_a10));
291 amdgpu_device_program_register_sequence(adev,
292 xgpu_fiji_golden_common_all,
293 ARRAY_SIZE(
294 xgpu_fiji_golden_common_all));
295 break;
296 case CHIP_TONGA:
297 amdgpu_device_program_register_sequence(adev,
298 xgpu_tonga_mgcg_cgcg_init,
299 ARRAY_SIZE(
300 xgpu_tonga_mgcg_cgcg_init));
301 amdgpu_device_program_register_sequence(adev,
302 xgpu_tonga_golden_settings_a11,
303 ARRAY_SIZE(
304 xgpu_tonga_golden_settings_a11));
305 amdgpu_device_program_register_sequence(adev,
306 xgpu_tonga_golden_common_all,
307 ARRAY_SIZE(
308 xgpu_tonga_golden_common_all));
309 break;
310 default:
311 BUG_ON("Doesn't support chip type.\n");
312 break;
313 }
314}
315
316/*
317 * Mailbox communication between GPU hypervisor and VFs
318 */
319static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
320{
321 u32 reg;
322 int timeout = VI_MAILBOX_TIMEDOUT;
323 u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
324
325 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
326 reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, RCV_MSG_ACK, 1);
327 WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
328
329 /*Wait for RCV_MSG_VALID to be 0*/
330 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
331 while (reg & mask) {
332 if (timeout <= 0) {
333 pr_err("RCV_MSG_VALID is not cleared\n");
334 break;
335 }
336 mdelay(1);
337 timeout -= 1;
338
339 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
340 }
341}
342
343static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val)
344{
345 u32 reg;
346
347 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
348 reg = REG_SET_FIELD(reg, MAILBOX_CONTROL,
349 TRN_MSG_VALID, val ? 1 : 0);
350 WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
351}
352
353static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev,
354 enum idh_request req)
355{
356 u32 reg;
357
358 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0);
359 reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0,
360 MSGBUF_DATA, req);
361 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, reg);
362
363 xgpu_vi_mailbox_set_valid(adev, true);
364}
365
366static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
367 enum idh_event event)
368{
369 u32 reg;
370 u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
371
372 /* workaround: host driver doesn't set VALID for CMPL now */
373 if (event != IDH_FLR_NOTIFICATION_CMPL) {
374 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
375 if (!(reg & mask))
376 return -ENOENT;
377 }
378
379 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
380 if (reg != event)
381 return -ENOENT;
382
383 /* send ack to PF */
384 xgpu_vi_mailbox_send_ack(adev);
385
386 return 0;
387}
388
389static int xgpu_vi_poll_ack(struct amdgpu_device *adev)
390{
391 int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
392 u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK);
393 u32 reg;
394
395 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
396 while (!(reg & mask)) {
397 if (timeout <= 0) {
398 pr_err("Doesn't get ack from pf.\n");
399 r = -ETIME;
400 break;
401 }
402 mdelay(5);
403 timeout -= 5;
404
405 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
406 }
407
408 return r;
409}
410
411static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
412{
413 int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
414
415 r = xgpu_vi_mailbox_rcv_msg(adev, event);
416 while (r) {
417 if (timeout <= 0) {
418 pr_err("Doesn't get ack from pf.\n");
419 r = -ETIME;
420 break;
421 }
422 mdelay(5);
423 timeout -= 5;
424
425 r = xgpu_vi_mailbox_rcv_msg(adev, event);
426 }
427
428 return r;
429}
430
431static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
432 enum idh_request request)
433{
434 int r;
435
436 xgpu_vi_mailbox_trans_msg(adev, request);
437
438 /* start to poll ack */
439 r = xgpu_vi_poll_ack(adev);
440 if (r)
441 return r;
442
443 xgpu_vi_mailbox_set_valid(adev, false);
444
445 /* start to check msg if request is idh_req_gpu_init_access */
446 if (request == IDH_REQ_GPU_INIT_ACCESS ||
447 request == IDH_REQ_GPU_FINI_ACCESS ||
448 request == IDH_REQ_GPU_RESET_ACCESS) {
449 r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
450 if (r) {
451 pr_err("Doesn't get ack from pf, give up\n");
452 return r;
453 }
454 }
455
456 return 0;
457}
458
459static int xgpu_vi_request_reset(struct amdgpu_device *adev)
460{
461 return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
462}
463
464static int xgpu_vi_wait_reset_cmpl(struct amdgpu_device *adev)
465{
466 return xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL);
467}
468
469static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
470 bool init)
471{
472 enum idh_request req;
473
474 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
475 return xgpu_vi_send_access_requests(adev, req);
476}
477
478static int xgpu_vi_release_full_gpu_access(struct amdgpu_device *adev,
479 bool init)
480{
481 enum idh_request req;
482 int r = 0;
483
484 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
485 r = xgpu_vi_send_access_requests(adev, req);
486
487 return r;
488}
489
490/* add support mailbox interrupts */
491static int xgpu_vi_mailbox_ack_irq(struct amdgpu_device *adev,
492 struct amdgpu_irq_src *source,
493 struct amdgpu_iv_entry *entry)
494{
495 DRM_DEBUG("get ack intr and do nothing.\n");
496 return 0;
497}
498
499static int xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device *adev,
500 struct amdgpu_irq_src *src,
501 unsigned type,
502 enum amdgpu_interrupt_state state)
503{
504 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
505
506 tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, ACK_INT_EN,
507 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
508 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
509
510 return 0;
511}
512
513static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
514{
515 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
516 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
517
518 /* wait until RCV_MSG become 3 */
519 if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
520 pr_err("failed to receive FLR_CMPL\n");
521 return;
522 }
523
524 /* Trigger recovery due to world switch failure */
525 if (amdgpu_device_should_recover_gpu(adev)) {
526 struct amdgpu_reset_context reset_context;
527 memset(&reset_context, 0, sizeof(reset_context));
528
529 reset_context.method = AMD_RESET_METHOD_NONE;
530 reset_context.reset_req_dev = adev;
531 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
532
533 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
534 }
535}
536
537static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
538 struct amdgpu_irq_src *src,
539 unsigned type,
540 enum amdgpu_interrupt_state state)
541{
542 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
543
544 tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN,
545 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
546 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
547
548 return 0;
549}
550
551static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
552 struct amdgpu_irq_src *source,
553 struct amdgpu_iv_entry *entry)
554{
555 int r;
556
557 /* trigger gpu-reset by hypervisor only if TDR disabled */
558 if (!amdgpu_gpu_recovery) {
559 /* see what event we get */
560 r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
561
562 /* only handle FLR_NOTIFY now */
563 if (!r && !amdgpu_in_reset(adev))
564 WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
565 &adev->virt.flr_work),
566 "Failed to queue work! at %s",
567 __func__);
568 }
569
570 return 0;
571}
572
573static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_ack_irq_funcs = {
574 .set = xgpu_vi_set_mailbox_ack_irq,
575 .process = xgpu_vi_mailbox_ack_irq,
576};
577
578static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_rcv_irq_funcs = {
579 .set = xgpu_vi_set_mailbox_rcv_irq,
580 .process = xgpu_vi_mailbox_rcv_irq,
581};
582
583void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev)
584{
585 adev->virt.ack_irq.num_types = 1;
586 adev->virt.ack_irq.funcs = &xgpu_vi_mailbox_ack_irq_funcs;
587 adev->virt.rcv_irq.num_types = 1;
588 adev->virt.rcv_irq.funcs = &xgpu_vi_mailbox_rcv_irq_funcs;
589}
590
591int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
592{
593 int r;
594
595 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
596 if (r)
597 return r;
598
599 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
600 if (r) {
601 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
602 return r;
603 }
604
605 return 0;
606}
607
608int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev)
609{
610 int r;
611
612 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
613 if (r)
614 return r;
615 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
616 if (r) {
617 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
618 return r;
619 }
620
621 INIT_WORK(&adev->virt.flr_work, xgpu_vi_mailbox_flr_work);
622
623 return 0;
624}
625
626void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev)
627{
628 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
629 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
630}
631
632const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
633 .req_full_gpu = xgpu_vi_request_full_gpu_access,
634 .rel_full_gpu = xgpu_vi_release_full_gpu_access,
635 .reset_gpu = xgpu_vi_request_reset,
636 .wait_reset = xgpu_vi_wait_reset_cmpl,
637 .trans_msg = NULL, /* Does not need to trans VF errors to host. */
638};
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Xiangliang.Yu@amd.com
23 */
24
25#include "amdgpu.h"
26#include "vi.h"
27#include "bif/bif_5_0_d.h"
28#include "bif/bif_5_0_sh_mask.h"
29#include "vid.h"
30#include "gca/gfx_8_0_d.h"
31#include "gca/gfx_8_0_sh_mask.h"
32#include "gmc_v8_0.h"
33#include "gfx_v8_0.h"
34#include "sdma_v3_0.h"
35#include "tonga_ih.h"
36#include "gmc/gmc_8_2_d.h"
37#include "gmc/gmc_8_2_sh_mask.h"
38#include "oss/oss_3_0_d.h"
39#include "oss/oss_3_0_sh_mask.h"
40#include "gca/gfx_8_0_sh_mask.h"
41#include "dce/dce_10_0_d.h"
42#include "dce/dce_10_0_sh_mask.h"
43#include "smu/smu_7_1_3_d.h"
44#include "mxgpu_vi.h"
45
46/* VI golden setting */
47static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
48 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
49 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
50 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
51 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
52 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
53 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
54 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
55 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
56 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
57 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
58 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
59 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
60 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
61 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
62 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
63 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
64 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
65 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
66 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
67 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
68 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
69 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
70 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
71 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
72 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
73 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
74 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
75 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
76 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
77 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
78 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
79 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
80 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
81 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
82 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
83 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
84 mmPCIE_DATA, 0x000f0000, 0x00000000,
85 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
86 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
87 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
88 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
89 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
90 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
91 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
92 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
93 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
94 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
95};
96
97static const u32 xgpu_fiji_golden_settings_a10[] = {
98 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
99 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
100 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
101 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
102 mmFBC_MISC, 0x1f311fff, 0x12300000,
103 mmHDMI_CONTROL, 0x31000111, 0x00000011,
104 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
105 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
106 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
107 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
108 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
109 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
110 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
111 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
112 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
113 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
114 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
115 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
116 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
117 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
118 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
119 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
120 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
121 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
122 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
123};
124
125static const u32 xgpu_fiji_golden_common_all[] = {
126 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
127 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
128 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
129 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
130 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
131 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
132 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
133 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
134 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
135 mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
136};
137
138static const u32 xgpu_tonga_mgcg_cgcg_init[] = {
139 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
140 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
141 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
142 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
143 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
144 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
145 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
146 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
147 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
148 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
149 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
150 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
151 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
152 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
153 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
154 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
155 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
156 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
157 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
158 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
159 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
160 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
161 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
162 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
163 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
164 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
165 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
166 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
167 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
168 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
169 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
170 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
171 mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
172 mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
173 mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
174 mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
175 mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
176 mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
177 mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
178 mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
179 mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
180 mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
181 mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
182 mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
183 mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
184 mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
185 mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
186 mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
187 mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
188 mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
189 mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
190 mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
191 mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
192 mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
193 mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
194 mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
195 mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
196 mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
197 mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
198 mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
199 mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
200 mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
201 mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
202 mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
203 mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
204 mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
205 mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
206 mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
207 mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
208 mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
209 mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
210 mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
211 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
212 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
213 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
214 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
215 mmPCIE_DATA, 0x000f0000, 0x00000000,
216 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
217 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
218 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
219 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
220 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
221 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
222 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
223 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
224 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
225 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
226};
227
228static const u32 xgpu_tonga_golden_settings_a11[] = {
229 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
230 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
231 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
232 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
233 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
234 mmFBC_MISC, 0x1f311fff, 0x12300000,
235 mmGB_GPU_ID, 0x0000000f, 0x00000000,
236 mmHDMI_CONTROL, 0x31000111, 0x00000011,
237 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
238 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
239 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
240 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
241 mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
242 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
243 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
244 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
245 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
246 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
247 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
248 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
249 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
250 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
251 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
252 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
253 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
254 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
255 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
256 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
257 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
258 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
259 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
260 mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
261 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
262 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
263 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
264 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
265 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
266};
267
268static const u32 xgpu_tonga_golden_common_all[] = {
269 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
270 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
271 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
272 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
273 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
274 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
275 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
276};
277
278void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
279{
280 switch (adev->asic_type) {
281 case CHIP_FIJI:
282 amdgpu_device_program_register_sequence(adev,
283 xgpu_fiji_mgcg_cgcg_init,
284 ARRAY_SIZE(
285 xgpu_fiji_mgcg_cgcg_init));
286 amdgpu_device_program_register_sequence(adev,
287 xgpu_fiji_golden_settings_a10,
288 ARRAY_SIZE(
289 xgpu_fiji_golden_settings_a10));
290 amdgpu_device_program_register_sequence(adev,
291 xgpu_fiji_golden_common_all,
292 ARRAY_SIZE(
293 xgpu_fiji_golden_common_all));
294 break;
295 case CHIP_TONGA:
296 amdgpu_device_program_register_sequence(adev,
297 xgpu_tonga_mgcg_cgcg_init,
298 ARRAY_SIZE(
299 xgpu_tonga_mgcg_cgcg_init));
300 amdgpu_device_program_register_sequence(adev,
301 xgpu_tonga_golden_settings_a11,
302 ARRAY_SIZE(
303 xgpu_tonga_golden_settings_a11));
304 amdgpu_device_program_register_sequence(adev,
305 xgpu_tonga_golden_common_all,
306 ARRAY_SIZE(
307 xgpu_tonga_golden_common_all));
308 break;
309 default:
310 BUG_ON("Doesn't support chip type.\n");
311 break;
312 }
313}
314
315/*
316 * Mailbox communication between GPU hypervisor and VFs
317 */
318static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
319{
320 u32 reg;
321 int timeout = VI_MAILBOX_TIMEDOUT;
322 u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
323
324 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
325 reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, RCV_MSG_ACK, 1);
326 WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
327
328 /*Wait for RCV_MSG_VALID to be 0*/
329 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
330 while (reg & mask) {
331 if (timeout <= 0) {
332 pr_err("RCV_MSG_VALID is not cleared\n");
333 break;
334 }
335 mdelay(1);
336 timeout -=1;
337
338 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
339 }
340}
341
342static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val)
343{
344 u32 reg;
345
346 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
347 reg = REG_SET_FIELD(reg, MAILBOX_CONTROL,
348 TRN_MSG_VALID, val ? 1 : 0);
349 WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
350}
351
352static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev,
353 enum idh_request req)
354{
355 u32 reg;
356
357 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0);
358 reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0,
359 MSGBUF_DATA, req);
360 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, reg);
361
362 xgpu_vi_mailbox_set_valid(adev, true);
363}
364
365static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
366 enum idh_event event)
367{
368 u32 reg;
369 u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
370
371 /* workaround: host driver doesn't set VALID for CMPL now */
372 if (event != IDH_FLR_NOTIFICATION_CMPL) {
373 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
374 if (!(reg & mask))
375 return -ENOENT;
376 }
377
378 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
379 if (reg != event)
380 return -ENOENT;
381
382 /* send ack to PF */
383 xgpu_vi_mailbox_send_ack(adev);
384
385 return 0;
386}
387
388static int xgpu_vi_poll_ack(struct amdgpu_device *adev)
389{
390 int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
391 u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK);
392 u32 reg;
393
394 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
395 while (!(reg & mask)) {
396 if (timeout <= 0) {
397 pr_err("Doesn't get ack from pf.\n");
398 r = -ETIME;
399 break;
400 }
401 mdelay(5);
402 timeout -= 5;
403
404 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
405 }
406
407 return r;
408}
409
410static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
411{
412 int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
413
414 r = xgpu_vi_mailbox_rcv_msg(adev, event);
415 while (r) {
416 if (timeout <= 0) {
417 pr_err("Doesn't get ack from pf.\n");
418 r = -ETIME;
419 break;
420 }
421 mdelay(5);
422 timeout -= 5;
423
424 r = xgpu_vi_mailbox_rcv_msg(adev, event);
425 }
426
427 return r;
428}
429
430static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
431 enum idh_request request)
432{
433 int r;
434
435 xgpu_vi_mailbox_trans_msg(adev, request);
436
437 /* start to poll ack */
438 r = xgpu_vi_poll_ack(adev);
439 if (r)
440 return r;
441
442 xgpu_vi_mailbox_set_valid(adev, false);
443
444 /* start to check msg if request is idh_req_gpu_init_access */
445 if (request == IDH_REQ_GPU_INIT_ACCESS ||
446 request == IDH_REQ_GPU_FINI_ACCESS ||
447 request == IDH_REQ_GPU_RESET_ACCESS) {
448 r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
449 if (r) {
450 pr_err("Doesn't get ack from pf, give up\n");
451 return r;
452 }
453 }
454
455 return 0;
456}
457
458static int xgpu_vi_request_reset(struct amdgpu_device *adev)
459{
460 return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
461}
462
463static int xgpu_vi_wait_reset_cmpl(struct amdgpu_device *adev)
464{
465 return xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL);
466}
467
468static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
469 bool init)
470{
471 enum idh_request req;
472
473 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
474 return xgpu_vi_send_access_requests(adev, req);
475}
476
477static int xgpu_vi_release_full_gpu_access(struct amdgpu_device *adev,
478 bool init)
479{
480 enum idh_request req;
481 int r = 0;
482
483 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
484 r = xgpu_vi_send_access_requests(adev, req);
485
486 return r;
487}
488
489/* add support mailbox interrupts */
490static int xgpu_vi_mailbox_ack_irq(struct amdgpu_device *adev,
491 struct amdgpu_irq_src *source,
492 struct amdgpu_iv_entry *entry)
493{
494 DRM_DEBUG("get ack intr and do nothing.\n");
495 return 0;
496}
497
498static int xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device *adev,
499 struct amdgpu_irq_src *src,
500 unsigned type,
501 enum amdgpu_interrupt_state state)
502{
503 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
504
505 tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, ACK_INT_EN,
506 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
507 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
508
509 return 0;
510}
511
512static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
513{
514 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
515 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
516
517 /* wait until RCV_MSG become 3 */
518 if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
519 pr_err("failed to recieve FLR_CMPL\n");
520 return;
521 }
522
523 /* Trigger recovery due to world switch failure */
524 amdgpu_device_gpu_recover(adev, NULL, false);
525}
526
527static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
528 struct amdgpu_irq_src *src,
529 unsigned type,
530 enum amdgpu_interrupt_state state)
531{
532 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
533
534 tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN,
535 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
536 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
537
538 return 0;
539}
540
541static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
542 struct amdgpu_irq_src *source,
543 struct amdgpu_iv_entry *entry)
544{
545 int r;
546
547 /* trigger gpu-reset by hypervisor only if TDR disbaled */
548 if (!amdgpu_gpu_recovery) {
549 /* see what event we get */
550 r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
551
552 /* only handle FLR_NOTIFY now */
553 if (!r)
554 schedule_work(&adev->virt.flr_work);
555 }
556
557 return 0;
558}
559
560static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_ack_irq_funcs = {
561 .set = xgpu_vi_set_mailbox_ack_irq,
562 .process = xgpu_vi_mailbox_ack_irq,
563};
564
565static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_rcv_irq_funcs = {
566 .set = xgpu_vi_set_mailbox_rcv_irq,
567 .process = xgpu_vi_mailbox_rcv_irq,
568};
569
570void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev)
571{
572 adev->virt.ack_irq.num_types = 1;
573 adev->virt.ack_irq.funcs = &xgpu_vi_mailbox_ack_irq_funcs;
574 adev->virt.rcv_irq.num_types = 1;
575 adev->virt.rcv_irq.funcs = &xgpu_vi_mailbox_rcv_irq_funcs;
576}
577
578int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
579{
580 int r;
581
582 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
583 if (r)
584 return r;
585
586 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
587 if (r) {
588 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
589 return r;
590 }
591
592 return 0;
593}
594
595int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev)
596{
597 int r;
598
599 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
600 if (r)
601 return r;
602 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
603 if (r) {
604 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
605 return r;
606 }
607
608 INIT_WORK(&adev->virt.flr_work, xgpu_vi_mailbox_flr_work);
609
610 return 0;
611}
612
613void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev)
614{
615 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
616 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
617}
618
619const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
620 .req_full_gpu = xgpu_vi_request_full_gpu_access,
621 .rel_full_gpu = xgpu_vi_release_full_gpu_access,
622 .reset_gpu = xgpu_vi_request_reset,
623 .wait_reset = xgpu_vi_wait_reset_cmpl,
624 .trans_msg = NULL, /* Does not need to trans VF errors to host. */
625};