Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/kernel.h>
26#include <linux/firmware.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29
30#include "amdgpu.h"
31#include "amdgpu_gfx.h"
32#include "amdgpu_ring.h"
33#include "vi.h"
34#include "vi_structs.h"
35#include "vid.h"
36#include "amdgpu_ucode.h"
37#include "amdgpu_atombios.h"
38#include "atombios_i2c.h"
39#include "clearstate_vi.h"
40
41#include "gmc/gmc_8_2_d.h"
42#include "gmc/gmc_8_2_sh_mask.h"
43
44#include "oss/oss_3_0_d.h"
45#include "oss/oss_3_0_sh_mask.h"
46
47#include "bif/bif_5_0_d.h"
48#include "bif/bif_5_0_sh_mask.h"
49#include "gca/gfx_8_0_d.h"
50#include "gca/gfx_8_0_enum.h"
51#include "gca/gfx_8_0_sh_mask.h"
52
53#include "dce/dce_10_0_d.h"
54#include "dce/dce_10_0_sh_mask.h"
55
56#include "smu/smu_7_1_3_d.h"
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#define GFX8_NUM_GFX_RINGS 1
61#define GFX8_MEC_HPD_SIZE 4096
62
63#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
64#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
65#define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002
66#define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
67
68#define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
69#define PIPE_CONFIG(x) ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
70#define TILE_SPLIT(x) ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
71#define MICRO_TILE_MODE_NEW(x) ((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
72#define SAMPLE_SPLIT(x) ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
73#define BANK_WIDTH(x) ((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
74#define BANK_HEIGHT(x) ((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
75#define MACRO_TILE_ASPECT(x) ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
76#define NUM_BANKS(x) ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
77
78#define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK 0x00000001L
79#define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK 0x00000002L
80#define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK 0x00000004L
81#define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK 0x00000008L
82#define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK 0x00000010L
83#define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK 0x00000020L
84
85/* BPM SERDES CMD */
86#define SET_BPM_SERDES_CMD 1
87#define CLE_BPM_SERDES_CMD 0
88
89/* BPM Register Address*/
90enum {
91 BPM_REG_CGLS_EN = 0, /* Enable/Disable CGLS */
92 BPM_REG_CGLS_ON, /* ON/OFF CGLS: shall be controlled by RLC FW */
93 BPM_REG_CGCG_OVERRIDE, /* Set/Clear CGCG Override */
94 BPM_REG_MGCG_OVERRIDE, /* Set/Clear MGCG Override */
95 BPM_REG_FGCG_OVERRIDE, /* Set/Clear FGCG Override */
96 BPM_REG_FGCG_MAX
97};
98
99#define RLC_FormatDirectRegListLength 14
100
101MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
102MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
103MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
104MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
105MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
106MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
107
108MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
109MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
110MODULE_FIRMWARE("amdgpu/stoney_me.bin");
111MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
112MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
113
114MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
115MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
116MODULE_FIRMWARE("amdgpu/tonga_me.bin");
117MODULE_FIRMWARE("amdgpu/tonga_mec.bin");
118MODULE_FIRMWARE("amdgpu/tonga_mec2.bin");
119MODULE_FIRMWARE("amdgpu/tonga_rlc.bin");
120
121MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
122MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
123MODULE_FIRMWARE("amdgpu/topaz_me.bin");
124MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
125MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
126
127MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
128MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
129MODULE_FIRMWARE("amdgpu/fiji_me.bin");
130MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
131MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
132MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
133
134MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
135MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
136MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
137MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
138MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
139MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
140MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
141MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
142MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
143MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
144MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
145
146MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
147MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
148MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
149MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
150MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
151MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
152MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
153MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
154MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
155MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
156MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
157
158MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
159MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
160MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
161MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
162MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
163MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
164MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
165MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
166MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
167MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
168MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
169
170MODULE_FIRMWARE("amdgpu/vegam_ce.bin");
171MODULE_FIRMWARE("amdgpu/vegam_pfp.bin");
172MODULE_FIRMWARE("amdgpu/vegam_me.bin");
173MODULE_FIRMWARE("amdgpu/vegam_mec.bin");
174MODULE_FIRMWARE("amdgpu/vegam_mec2.bin");
175MODULE_FIRMWARE("amdgpu/vegam_rlc.bin");
176
177static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
178{
179 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
180 {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
181 {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
182 {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
183 {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
184 {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
185 {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
186 {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
187 {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
188 {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
189 {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
190 {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
191 {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
192 {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
193 {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
194 {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
195};
196
197static const u32 golden_settings_tonga_a11[] =
198{
199 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
200 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
201 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
202 mmGB_GPU_ID, 0x0000000f, 0x00000000,
203 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
204 mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
205 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
206 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
207 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
208 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
209 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
210 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
211 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
212 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
213 mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
214 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
215};
216
217static const u32 tonga_golden_common_all[] =
218{
219 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
220 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
221 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
222 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
223 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
224 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
225 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
226 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
227};
228
229static const u32 tonga_mgcg_cgcg_init[] =
230{
231 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
232 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
233 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
234 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
235 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
236 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
237 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
238 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
239 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
240 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
241 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
242 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
243 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
244 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
245 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
246 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
247 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
248 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
249 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
250 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
251 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
252 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
253 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
254 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
255 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
256 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
257 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
258 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
259 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
260 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
261 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
262 mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
263 mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
264 mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
265 mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
266 mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
267 mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
268 mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
269 mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
270 mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
271 mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
272 mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
273 mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
274 mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
275 mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
276 mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
277 mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
278 mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
279 mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
280 mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
281 mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
282 mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
283 mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
284 mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
285 mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
286 mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
287 mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
288 mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
289 mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
290 mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
291 mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
292 mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
293 mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
294 mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
295 mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
296 mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
297 mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
298 mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
299 mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
300 mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
301 mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
302 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
303 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
304 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
305 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
306};
307
308static const u32 golden_settings_vegam_a11[] =
309{
310 mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
311 mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000,
312 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
313 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
314 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
315 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
316 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a,
317 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e,
318 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
319 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
320 mmSQ_CONFIG, 0x07f80000, 0x01180000,
321 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
322 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
323 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
324 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
325 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
326 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
327};
328
329static const u32 vegam_golden_common_all[] =
330{
331 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
332 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
333 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
334 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
335 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
336 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
337};
338
339static const u32 golden_settings_polaris11_a11[] =
340{
341 mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
342 mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
343 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
344 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
345 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
346 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
347 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
348 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
349 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
350 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
351 mmSQ_CONFIG, 0x07f80000, 0x01180000,
352 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
353 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
354 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
355 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
356 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
357 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
358};
359
360static const u32 polaris11_golden_common_all[] =
361{
362 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
363 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
364 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
365 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
366 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
367 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
368};
369
370static const u32 golden_settings_polaris10_a11[] =
371{
372 mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
373 mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
374 mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
375 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
376 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
377 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
378 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
379 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
380 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a,
381 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
382 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
383 mmSQ_CONFIG, 0x07f80000, 0x07180000,
384 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
385 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
386 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
387 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
388 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
389};
390
391static const u32 polaris10_golden_common_all[] =
392{
393 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
394 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
395 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
396 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
397 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
398 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
399 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
400 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
401};
402
403static const u32 fiji_golden_common_all[] =
404{
405 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
406 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
407 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
408 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
409 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
410 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
411 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
412 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
413 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
414 mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
415};
416
417static const u32 golden_settings_fiji_a10[] =
418{
419 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
420 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
421 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
422 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
423 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
424 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
425 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
426 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
427 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
428 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
429 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
430};
431
432static const u32 fiji_mgcg_cgcg_init[] =
433{
434 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
435 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
436 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
437 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
438 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
439 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
440 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
441 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
442 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
443 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
444 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
445 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
446 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
447 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
448 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
449 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
450 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
451 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
452 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
453 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
454 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
455 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
456 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
457 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
458 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
459 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
460 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
461 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
462 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
463 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
464 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
465 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
466 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
467 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
468 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
469};
470
471static const u32 golden_settings_iceland_a11[] =
472{
473 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
474 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
475 mmDB_DEBUG3, 0xc0000000, 0xc0000000,
476 mmGB_GPU_ID, 0x0000000f, 0x00000000,
477 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
478 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
479 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
480 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
481 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
482 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
483 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
484 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
485 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
486 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
487 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
488 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
489};
490
491static const u32 iceland_golden_common_all[] =
492{
493 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
494 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
495 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
496 mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
497 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
498 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
499 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
500 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
501};
502
503static const u32 iceland_mgcg_cgcg_init[] =
504{
505 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
506 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
507 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
508 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
509 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100,
510 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100,
511 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100,
512 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
513 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
514 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
515 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
516 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
517 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
518 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
519 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
520 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
521 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
522 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
523 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
524 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
525 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
526 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
527 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100,
528 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
529 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
530 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
531 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
532 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
533 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
534 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
535 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
536 mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
537 mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
538 mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
539 mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
540 mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
541 mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
542 mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
543 mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
544 mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
545 mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
546 mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
547 mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
548 mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
549 mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
550 mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
551 mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
552 mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
553 mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
554 mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
555 mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
556 mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
557 mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
558 mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
559 mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
560 mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
561 mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
562 mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
563 mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
564 mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
565 mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
566 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
567 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
568 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
569};
570
571static const u32 cz_golden_settings_a11[] =
572{
573 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
574 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
575 mmGB_GPU_ID, 0x0000000f, 0x00000000,
576 mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
577 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
578 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
579 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
580 mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
581 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
582 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
583 mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
584 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
585};
586
587static const u32 cz_golden_common_all[] =
588{
589 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
590 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
591 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
592 mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
593 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
594 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
595 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
596 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
597};
598
599static const u32 cz_mgcg_cgcg_init[] =
600{
601 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
602 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
603 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
604 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
605 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
606 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
607 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100,
608 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
609 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
610 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
611 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
612 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
613 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
614 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
615 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
616 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
617 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
618 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
619 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
620 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
621 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
622 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
623 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
624 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
625 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
626 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
627 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
628 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
629 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
630 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
631 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
632 mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
633 mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
634 mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
635 mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
636 mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
637 mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
638 mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
639 mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
640 mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
641 mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
642 mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
643 mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
644 mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
645 mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
646 mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
647 mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
648 mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
649 mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
650 mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
651 mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
652 mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
653 mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
654 mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
655 mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
656 mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
657 mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
658 mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
659 mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
660 mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
661 mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
662 mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
663 mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
664 mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
665 mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
666 mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
667 mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
668 mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
669 mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
670 mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
671 mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
672 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
673 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
674 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
675 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
676};
677
678static const u32 stoney_golden_settings_a11[] =
679{
680 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
681 mmGB_GPU_ID, 0x0000000f, 0x00000000,
682 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
683 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
684 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
685 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
686 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
687 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
688 mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
689 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
690};
691
692static const u32 stoney_golden_common_all[] =
693{
694 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
695 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
696 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
697 mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
698 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
699 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
700 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
701 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
702};
703
704static const u32 stoney_mgcg_cgcg_init[] =
705{
706 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
707 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
708 mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
709 mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
710 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
711};
712
713
714static const char * const sq_edc_source_names[] = {
715 "SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
716 "SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
717 "SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
718 "SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
719 "SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
720 "SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
721 "SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
722};
723
724static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
725static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
726static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
727static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
728static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
729static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
730static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
731static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
732
733#define CG_ACLK_CNTL__ACLK_DIVIDER_MASK 0x0000007fL
734#define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT 0x00000000L
735
736static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
737{
738 uint32_t data;
739
740 switch (adev->asic_type) {
741 case CHIP_TOPAZ:
742 amdgpu_device_program_register_sequence(adev,
743 iceland_mgcg_cgcg_init,
744 ARRAY_SIZE(iceland_mgcg_cgcg_init));
745 amdgpu_device_program_register_sequence(adev,
746 golden_settings_iceland_a11,
747 ARRAY_SIZE(golden_settings_iceland_a11));
748 amdgpu_device_program_register_sequence(adev,
749 iceland_golden_common_all,
750 ARRAY_SIZE(iceland_golden_common_all));
751 break;
752 case CHIP_FIJI:
753 amdgpu_device_program_register_sequence(adev,
754 fiji_mgcg_cgcg_init,
755 ARRAY_SIZE(fiji_mgcg_cgcg_init));
756 amdgpu_device_program_register_sequence(adev,
757 golden_settings_fiji_a10,
758 ARRAY_SIZE(golden_settings_fiji_a10));
759 amdgpu_device_program_register_sequence(adev,
760 fiji_golden_common_all,
761 ARRAY_SIZE(fiji_golden_common_all));
762 break;
763
764 case CHIP_TONGA:
765 amdgpu_device_program_register_sequence(adev,
766 tonga_mgcg_cgcg_init,
767 ARRAY_SIZE(tonga_mgcg_cgcg_init));
768 amdgpu_device_program_register_sequence(adev,
769 golden_settings_tonga_a11,
770 ARRAY_SIZE(golden_settings_tonga_a11));
771 amdgpu_device_program_register_sequence(adev,
772 tonga_golden_common_all,
773 ARRAY_SIZE(tonga_golden_common_all));
774 break;
775 case CHIP_VEGAM:
776 amdgpu_device_program_register_sequence(adev,
777 golden_settings_vegam_a11,
778 ARRAY_SIZE(golden_settings_vegam_a11));
779 amdgpu_device_program_register_sequence(adev,
780 vegam_golden_common_all,
781 ARRAY_SIZE(vegam_golden_common_all));
782 break;
783 case CHIP_POLARIS11:
784 case CHIP_POLARIS12:
785 amdgpu_device_program_register_sequence(adev,
786 golden_settings_polaris11_a11,
787 ARRAY_SIZE(golden_settings_polaris11_a11));
788 amdgpu_device_program_register_sequence(adev,
789 polaris11_golden_common_all,
790 ARRAY_SIZE(polaris11_golden_common_all));
791 break;
792 case CHIP_POLARIS10:
793 amdgpu_device_program_register_sequence(adev,
794 golden_settings_polaris10_a11,
795 ARRAY_SIZE(golden_settings_polaris10_a11));
796 amdgpu_device_program_register_sequence(adev,
797 polaris10_golden_common_all,
798 ARRAY_SIZE(polaris10_golden_common_all));
799 data = RREG32_SMC(ixCG_ACLK_CNTL);
800 data &= ~CG_ACLK_CNTL__ACLK_DIVIDER_MASK;
801 data |= 0x18 << CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT;
802 WREG32_SMC(ixCG_ACLK_CNTL, data);
803 if ((adev->pdev->device == 0x67DF) && (adev->pdev->revision == 0xc7) &&
804 ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
805 (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
806 (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1680))) {
807 amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
808 amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
809 }
810 break;
811 case CHIP_CARRIZO:
812 amdgpu_device_program_register_sequence(adev,
813 cz_mgcg_cgcg_init,
814 ARRAY_SIZE(cz_mgcg_cgcg_init));
815 amdgpu_device_program_register_sequence(adev,
816 cz_golden_settings_a11,
817 ARRAY_SIZE(cz_golden_settings_a11));
818 amdgpu_device_program_register_sequence(adev,
819 cz_golden_common_all,
820 ARRAY_SIZE(cz_golden_common_all));
821 break;
822 case CHIP_STONEY:
823 amdgpu_device_program_register_sequence(adev,
824 stoney_mgcg_cgcg_init,
825 ARRAY_SIZE(stoney_mgcg_cgcg_init));
826 amdgpu_device_program_register_sequence(adev,
827 stoney_golden_settings_a11,
828 ARRAY_SIZE(stoney_golden_settings_a11));
829 amdgpu_device_program_register_sequence(adev,
830 stoney_golden_common_all,
831 ARRAY_SIZE(stoney_golden_common_all));
832 break;
833 default:
834 break;
835 }
836}
837
838static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
839{
840 struct amdgpu_device *adev = ring->adev;
841 uint32_t tmp = 0;
842 unsigned i;
843 int r;
844
845 WREG32(mmSCRATCH_REG0, 0xCAFEDEAD);
846 r = amdgpu_ring_alloc(ring, 3);
847 if (r)
848 return r;
849
850 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
851 amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START);
852 amdgpu_ring_write(ring, 0xDEADBEEF);
853 amdgpu_ring_commit(ring);
854
855 for (i = 0; i < adev->usec_timeout; i++) {
856 tmp = RREG32(mmSCRATCH_REG0);
857 if (tmp == 0xDEADBEEF)
858 break;
859 udelay(1);
860 }
861
862 if (i >= adev->usec_timeout)
863 r = -ETIMEDOUT;
864
865 return r;
866}
867
868static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
869{
870 struct amdgpu_device *adev = ring->adev;
871 struct amdgpu_ib ib;
872 struct dma_fence *f = NULL;
873
874 unsigned int index;
875 uint64_t gpu_addr;
876 uint32_t tmp;
877 long r;
878
879 r = amdgpu_device_wb_get(adev, &index);
880 if (r)
881 return r;
882
883 gpu_addr = adev->wb.gpu_addr + (index * 4);
884 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
885 memset(&ib, 0, sizeof(ib));
886
887 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
888 if (r)
889 goto err1;
890
891 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
892 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
893 ib.ptr[2] = lower_32_bits(gpu_addr);
894 ib.ptr[3] = upper_32_bits(gpu_addr);
895 ib.ptr[4] = 0xDEADBEEF;
896 ib.length_dw = 5;
897
898 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
899 if (r)
900 goto err2;
901
902 r = dma_fence_wait_timeout(f, false, timeout);
903 if (r == 0) {
904 r = -ETIMEDOUT;
905 goto err2;
906 } else if (r < 0) {
907 goto err2;
908 }
909
910 tmp = adev->wb.wb[index];
911 if (tmp == 0xDEADBEEF)
912 r = 0;
913 else
914 r = -EINVAL;
915
916err2:
917 amdgpu_ib_free(adev, &ib, NULL);
918 dma_fence_put(f);
919err1:
920 amdgpu_device_wb_free(adev, index);
921 return r;
922}
923
924
925static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
926{
927 amdgpu_ucode_release(&adev->gfx.pfp_fw);
928 amdgpu_ucode_release(&adev->gfx.me_fw);
929 amdgpu_ucode_release(&adev->gfx.ce_fw);
930 amdgpu_ucode_release(&adev->gfx.rlc_fw);
931 amdgpu_ucode_release(&adev->gfx.mec_fw);
932 if ((adev->asic_type != CHIP_STONEY) &&
933 (adev->asic_type != CHIP_TOPAZ))
934 amdgpu_ucode_release(&adev->gfx.mec2_fw);
935
936 kfree(adev->gfx.rlc.register_list_format);
937}
938
939static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
940{
941 const char *chip_name;
942 char fw_name[30];
943 int err;
944 struct amdgpu_firmware_info *info = NULL;
945 const struct common_firmware_header *header = NULL;
946 const struct gfx_firmware_header_v1_0 *cp_hdr;
947 const struct rlc_firmware_header_v2_0 *rlc_hdr;
948 unsigned int *tmp = NULL, i;
949
950 DRM_DEBUG("\n");
951
952 switch (adev->asic_type) {
953 case CHIP_TOPAZ:
954 chip_name = "topaz";
955 break;
956 case CHIP_TONGA:
957 chip_name = "tonga";
958 break;
959 case CHIP_CARRIZO:
960 chip_name = "carrizo";
961 break;
962 case CHIP_FIJI:
963 chip_name = "fiji";
964 break;
965 case CHIP_STONEY:
966 chip_name = "stoney";
967 break;
968 case CHIP_POLARIS10:
969 chip_name = "polaris10";
970 break;
971 case CHIP_POLARIS11:
972 chip_name = "polaris11";
973 break;
974 case CHIP_POLARIS12:
975 chip_name = "polaris12";
976 break;
977 case CHIP_VEGAM:
978 chip_name = "vegam";
979 break;
980 default:
981 BUG();
982 }
983
984 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
985 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
986 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
987 if (err == -ENODEV) {
988 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
989 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
990 }
991 } else {
992 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
993 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
994 }
995 if (err)
996 goto out;
997 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
998 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
999 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1000
1001 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1002 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
1003 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
1004 if (err == -ENODEV) {
1005 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1006 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
1007 }
1008 } else {
1009 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1010 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
1011 }
1012 if (err)
1013 goto out;
1014 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1015 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1016
1017 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1018
1019 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1020 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
1021 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
1022 if (err == -ENODEV) {
1023 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1024 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
1025 }
1026 } else {
1027 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1028 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
1029 }
1030 if (err)
1031 goto out;
1032 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1033 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1034 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1035
1036 /*
1037 * Support for MCBP/Virtualization in combination with chained IBs is
1038 * formal released on feature version #46
1039 */
1040 if (adev->gfx.ce_feature_version >= 46 &&
1041 adev->gfx.pfp_feature_version >= 46) {
1042 adev->virt.chained_ib_support = true;
1043 DRM_INFO("Chained IB support enabled!\n");
1044 } else
1045 adev->virt.chained_ib_support = false;
1046
1047 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1048 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
1049 if (err)
1050 goto out;
1051 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1052 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1053 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1054
1055 adev->gfx.rlc.save_and_restore_offset =
1056 le32_to_cpu(rlc_hdr->save_and_restore_offset);
1057 adev->gfx.rlc.clear_state_descriptor_offset =
1058 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1059 adev->gfx.rlc.avail_scratch_ram_locations =
1060 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1061 adev->gfx.rlc.reg_restore_list_size =
1062 le32_to_cpu(rlc_hdr->reg_restore_list_size);
1063 adev->gfx.rlc.reg_list_format_start =
1064 le32_to_cpu(rlc_hdr->reg_list_format_start);
1065 adev->gfx.rlc.reg_list_format_separate_start =
1066 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1067 adev->gfx.rlc.starting_offsets_start =
1068 le32_to_cpu(rlc_hdr->starting_offsets_start);
1069 adev->gfx.rlc.reg_list_format_size_bytes =
1070 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1071 adev->gfx.rlc.reg_list_size_bytes =
1072 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1073
1074 adev->gfx.rlc.register_list_format =
1075 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1076 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1077
1078 if (!adev->gfx.rlc.register_list_format) {
1079 err = -ENOMEM;
1080 goto out;
1081 }
1082
1083 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1084 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1085 for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1086 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1087
1088 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1089
1090 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1091 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1092 for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1093 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1094
1095 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1096 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
1097 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
1098 if (err == -ENODEV) {
1099 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1100 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
1101 }
1102 } else {
1103 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1104 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
1105 }
1106 if (err)
1107 goto out;
1108 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1109 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1110 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1111
1112 if ((adev->asic_type != CHIP_STONEY) &&
1113 (adev->asic_type != CHIP_TOPAZ)) {
1114 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1115 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
1116 err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
1117 if (err == -ENODEV) {
1118 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1119 err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
1120 }
1121 } else {
1122 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1123 err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
1124 }
1125 if (!err) {
1126 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1127 adev->gfx.mec2_fw->data;
1128 adev->gfx.mec2_fw_version =
1129 le32_to_cpu(cp_hdr->header.ucode_version);
1130 adev->gfx.mec2_feature_version =
1131 le32_to_cpu(cp_hdr->ucode_feature_version);
1132 } else {
1133 err = 0;
1134 adev->gfx.mec2_fw = NULL;
1135 }
1136 }
1137
1138 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1139 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1140 info->fw = adev->gfx.pfp_fw;
1141 header = (const struct common_firmware_header *)info->fw->data;
1142 adev->firmware.fw_size +=
1143 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1144
1145 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1146 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1147 info->fw = adev->gfx.me_fw;
1148 header = (const struct common_firmware_header *)info->fw->data;
1149 adev->firmware.fw_size +=
1150 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1151
1152 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1153 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1154 info->fw = adev->gfx.ce_fw;
1155 header = (const struct common_firmware_header *)info->fw->data;
1156 adev->firmware.fw_size +=
1157 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1158
1159 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1160 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1161 info->fw = adev->gfx.rlc_fw;
1162 header = (const struct common_firmware_header *)info->fw->data;
1163 adev->firmware.fw_size +=
1164 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1165
1166 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1167 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1168 info->fw = adev->gfx.mec_fw;
1169 header = (const struct common_firmware_header *)info->fw->data;
1170 adev->firmware.fw_size +=
1171 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1172
1173 /* we need account JT in */
1174 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1175 adev->firmware.fw_size +=
1176 ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
1177
1178 if (amdgpu_sriov_vf(adev)) {
1179 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
1180 info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
1181 info->fw = adev->gfx.mec_fw;
1182 adev->firmware.fw_size +=
1183 ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
1184 }
1185
1186 if (adev->gfx.mec2_fw) {
1187 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1188 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1189 info->fw = adev->gfx.mec2_fw;
1190 header = (const struct common_firmware_header *)info->fw->data;
1191 adev->firmware.fw_size +=
1192 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1193 }
1194
1195out:
1196 if (err) {
1197 dev_err(adev->dev,
1198 "gfx8: Failed to load firmware \"%s\"\n",
1199 fw_name);
1200 amdgpu_ucode_release(&adev->gfx.pfp_fw);
1201 amdgpu_ucode_release(&adev->gfx.me_fw);
1202 amdgpu_ucode_release(&adev->gfx.ce_fw);
1203 amdgpu_ucode_release(&adev->gfx.rlc_fw);
1204 amdgpu_ucode_release(&adev->gfx.mec_fw);
1205 amdgpu_ucode_release(&adev->gfx.mec2_fw);
1206 }
1207 return err;
1208}
1209
1210static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
1211 volatile u32 *buffer)
1212{
1213 u32 count = 0, i;
1214 const struct cs_section_def *sect = NULL;
1215 const struct cs_extent_def *ext = NULL;
1216
1217 if (adev->gfx.rlc.cs_data == NULL)
1218 return;
1219 if (buffer == NULL)
1220 return;
1221
1222 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1223 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1224
1225 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1226 buffer[count++] = cpu_to_le32(0x80000000);
1227 buffer[count++] = cpu_to_le32(0x80000000);
1228
1229 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1230 for (ext = sect->section; ext->extent != NULL; ++ext) {
1231 if (sect->id == SECT_CONTEXT) {
1232 buffer[count++] =
1233 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1234 buffer[count++] = cpu_to_le32(ext->reg_index -
1235 PACKET3_SET_CONTEXT_REG_START);
1236 for (i = 0; i < ext->reg_count; i++)
1237 buffer[count++] = cpu_to_le32(ext->extent[i]);
1238 } else {
1239 return;
1240 }
1241 }
1242 }
1243
1244 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
1245 buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
1246 PACKET3_SET_CONTEXT_REG_START);
1247 buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
1248 buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
1249
1250 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1251 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1252
1253 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1254 buffer[count++] = cpu_to_le32(0);
1255}
1256
1257static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
1258{
1259 if (adev->asic_type == CHIP_CARRIZO)
1260 return 5;
1261 else
1262 return 4;
1263}
1264
1265static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
1266{
1267 const struct cs_section_def *cs_data;
1268 int r;
1269
1270 adev->gfx.rlc.cs_data = vi_cs_data;
1271
1272 cs_data = adev->gfx.rlc.cs_data;
1273
1274 if (cs_data) {
1275 /* init clear state block */
1276 r = amdgpu_gfx_rlc_init_csb(adev);
1277 if (r)
1278 return r;
1279 }
1280
1281 if ((adev->asic_type == CHIP_CARRIZO) ||
1282 (adev->asic_type == CHIP_STONEY)) {
1283 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1284 r = amdgpu_gfx_rlc_init_cpt(adev);
1285 if (r)
1286 return r;
1287 }
1288
1289 /* init spm vmid with 0xf */
1290 if (adev->gfx.rlc.funcs->update_spm_vmid)
1291 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
1292
1293 return 0;
1294}
1295
1296static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
1297{
1298 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1299}
1300
1301static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
1302{
1303 int r;
1304 u32 *hpd;
1305 size_t mec_hpd_size;
1306
1307 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1308
1309 /* take ownership of the relevant compute queues */
1310 amdgpu_gfx_compute_queue_acquire(adev);
1311
1312 mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
1313 if (mec_hpd_size) {
1314 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1315 AMDGPU_GEM_DOMAIN_VRAM |
1316 AMDGPU_GEM_DOMAIN_GTT,
1317 &adev->gfx.mec.hpd_eop_obj,
1318 &adev->gfx.mec.hpd_eop_gpu_addr,
1319 (void **)&hpd);
1320 if (r) {
1321 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1322 return r;
1323 }
1324
1325 memset(hpd, 0, mec_hpd_size);
1326
1327 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1328 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1329 }
1330
1331 return 0;
1332}
1333
1334static const u32 vgpr_init_compute_shader[] =
1335{
1336 0x7e000209, 0x7e020208,
1337 0x7e040207, 0x7e060206,
1338 0x7e080205, 0x7e0a0204,
1339 0x7e0c0203, 0x7e0e0202,
1340 0x7e100201, 0x7e120200,
1341 0x7e140209, 0x7e160208,
1342 0x7e180207, 0x7e1a0206,
1343 0x7e1c0205, 0x7e1e0204,
1344 0x7e200203, 0x7e220202,
1345 0x7e240201, 0x7e260200,
1346 0x7e280209, 0x7e2a0208,
1347 0x7e2c0207, 0x7e2e0206,
1348 0x7e300205, 0x7e320204,
1349 0x7e340203, 0x7e360202,
1350 0x7e380201, 0x7e3a0200,
1351 0x7e3c0209, 0x7e3e0208,
1352 0x7e400207, 0x7e420206,
1353 0x7e440205, 0x7e460204,
1354 0x7e480203, 0x7e4a0202,
1355 0x7e4c0201, 0x7e4e0200,
1356 0x7e500209, 0x7e520208,
1357 0x7e540207, 0x7e560206,
1358 0x7e580205, 0x7e5a0204,
1359 0x7e5c0203, 0x7e5e0202,
1360 0x7e600201, 0x7e620200,
1361 0x7e640209, 0x7e660208,
1362 0x7e680207, 0x7e6a0206,
1363 0x7e6c0205, 0x7e6e0204,
1364 0x7e700203, 0x7e720202,
1365 0x7e740201, 0x7e760200,
1366 0x7e780209, 0x7e7a0208,
1367 0x7e7c0207, 0x7e7e0206,
1368 0xbf8a0000, 0xbf810000,
1369};
1370
1371static const u32 sgpr_init_compute_shader[] =
1372{
1373 0xbe8a0100, 0xbe8c0102,
1374 0xbe8e0104, 0xbe900106,
1375 0xbe920108, 0xbe940100,
1376 0xbe960102, 0xbe980104,
1377 0xbe9a0106, 0xbe9c0108,
1378 0xbe9e0100, 0xbea00102,
1379 0xbea20104, 0xbea40106,
1380 0xbea60108, 0xbea80100,
1381 0xbeaa0102, 0xbeac0104,
1382 0xbeae0106, 0xbeb00108,
1383 0xbeb20100, 0xbeb40102,
1384 0xbeb60104, 0xbeb80106,
1385 0xbeba0108, 0xbebc0100,
1386 0xbebe0102, 0xbec00104,
1387 0xbec20106, 0xbec40108,
1388 0xbec60100, 0xbec80102,
1389 0xbee60004, 0xbee70005,
1390 0xbeea0006, 0xbeeb0007,
1391 0xbee80008, 0xbee90009,
1392 0xbefc0000, 0xbf8a0000,
1393 0xbf810000, 0x00000000,
1394};
1395
1396static const u32 vgpr_init_regs[] =
1397{
1398 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1399 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1400 mmCOMPUTE_NUM_THREAD_X, 256*4,
1401 mmCOMPUTE_NUM_THREAD_Y, 1,
1402 mmCOMPUTE_NUM_THREAD_Z, 1,
1403 mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
1404 mmCOMPUTE_PGM_RSRC2, 20,
1405 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1406 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1407 mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1408 mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1409 mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1410 mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1411 mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1412 mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1413 mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1414 mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1415};
1416
1417static const u32 sgpr1_init_regs[] =
1418{
1419 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1420 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1421 mmCOMPUTE_NUM_THREAD_X, 256*5,
1422 mmCOMPUTE_NUM_THREAD_Y, 1,
1423 mmCOMPUTE_NUM_THREAD_Z, 1,
1424 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1425 mmCOMPUTE_PGM_RSRC2, 20,
1426 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1427 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1428 mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1429 mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1430 mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1431 mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1432 mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1433 mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1434 mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1435 mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1436};
1437
1438static const u32 sgpr2_init_regs[] =
1439{
1440 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0,
1441 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1442 mmCOMPUTE_NUM_THREAD_X, 256*5,
1443 mmCOMPUTE_NUM_THREAD_Y, 1,
1444 mmCOMPUTE_NUM_THREAD_Z, 1,
1445 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1446 mmCOMPUTE_PGM_RSRC2, 20,
1447 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1448 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1449 mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1450 mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1451 mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1452 mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1453 mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1454 mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1455 mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1456 mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1457};
1458
1459static const u32 sec_ded_counter_registers[] =
1460{
1461 mmCPC_EDC_ATC_CNT,
1462 mmCPC_EDC_SCRATCH_CNT,
1463 mmCPC_EDC_UCODE_CNT,
1464 mmCPF_EDC_ATC_CNT,
1465 mmCPF_EDC_ROQ_CNT,
1466 mmCPF_EDC_TAG_CNT,
1467 mmCPG_EDC_ATC_CNT,
1468 mmCPG_EDC_DMA_CNT,
1469 mmCPG_EDC_TAG_CNT,
1470 mmDC_EDC_CSINVOC_CNT,
1471 mmDC_EDC_RESTORE_CNT,
1472 mmDC_EDC_STATE_CNT,
1473 mmGDS_EDC_CNT,
1474 mmGDS_EDC_GRBM_CNT,
1475 mmGDS_EDC_OA_DED,
1476 mmSPI_EDC_CNT,
1477 mmSQC_ATC_EDC_GATCL1_CNT,
1478 mmSQC_EDC_CNT,
1479 mmSQ_EDC_DED_CNT,
1480 mmSQ_EDC_INFO,
1481 mmSQ_EDC_SEC_CNT,
1482 mmTCC_EDC_CNT,
1483 mmTCP_ATC_EDC_GATCL1_CNT,
1484 mmTCP_EDC_CNT,
1485 mmTD_EDC_CNT
1486};
1487
1488static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1489{
1490 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
1491 struct amdgpu_ib ib;
1492 struct dma_fence *f = NULL;
1493 int r, i;
1494 u32 tmp;
1495 unsigned total_size, vgpr_offset, sgpr_offset;
1496 u64 gpu_addr;
1497
1498 /* only supported on CZ */
1499 if (adev->asic_type != CHIP_CARRIZO)
1500 return 0;
1501
1502 /* bail if the compute ring is not ready */
1503 if (!ring->sched.ready)
1504 return 0;
1505
1506 tmp = RREG32(mmGB_EDC_MODE);
1507 WREG32(mmGB_EDC_MODE, 0);
1508
1509 total_size =
1510 (((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1511 total_size +=
1512 (((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1513 total_size +=
1514 (((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1515 total_size = ALIGN(total_size, 256);
1516 vgpr_offset = total_size;
1517 total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
1518 sgpr_offset = total_size;
1519 total_size += sizeof(sgpr_init_compute_shader);
1520
1521 /* allocate an indirect buffer to put the commands in */
1522 memset(&ib, 0, sizeof(ib));
1523 r = amdgpu_ib_get(adev, NULL, total_size,
1524 AMDGPU_IB_POOL_DIRECT, &ib);
1525 if (r) {
1526 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
1527 return r;
1528 }
1529
1530 /* load the compute shaders */
1531 for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
1532 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
1533
1534 for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
1535 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
1536
1537 /* init the ib length to 0 */
1538 ib.length_dw = 0;
1539
1540 /* VGPR */
1541 /* write the register state for the compute dispatch */
1542 for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
1543 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1544 ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START;
1545 ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
1546 }
1547 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1548 gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
1549 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1550 ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1551 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1552 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1553
1554 /* write dispatch packet */
1555 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1556 ib.ptr[ib.length_dw++] = 8; /* x */
1557 ib.ptr[ib.length_dw++] = 1; /* y */
1558 ib.ptr[ib.length_dw++] = 1; /* z */
1559 ib.ptr[ib.length_dw++] =
1560 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1561
1562 /* write CS partial flush packet */
1563 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1564 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1565
1566 /* SGPR1 */
1567 /* write the register state for the compute dispatch */
1568 for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
1569 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1570 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START;
1571 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
1572 }
1573 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1574 gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1575 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1576 ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1577 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1578 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1579
1580 /* write dispatch packet */
1581 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1582 ib.ptr[ib.length_dw++] = 8; /* x */
1583 ib.ptr[ib.length_dw++] = 1; /* y */
1584 ib.ptr[ib.length_dw++] = 1; /* z */
1585 ib.ptr[ib.length_dw++] =
1586 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1587
1588 /* write CS partial flush packet */
1589 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1590 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1591
1592 /* SGPR2 */
1593 /* write the register state for the compute dispatch */
1594 for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
1595 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1596 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START;
1597 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
1598 }
1599 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1600 gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1601 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1602 ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1603 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1604 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1605
1606 /* write dispatch packet */
1607 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1608 ib.ptr[ib.length_dw++] = 8; /* x */
1609 ib.ptr[ib.length_dw++] = 1; /* y */
1610 ib.ptr[ib.length_dw++] = 1; /* z */
1611 ib.ptr[ib.length_dw++] =
1612 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1613
1614 /* write CS partial flush packet */
1615 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1616 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1617
1618 /* shedule the ib on the ring */
1619 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1620 if (r) {
1621 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
1622 goto fail;
1623 }
1624
1625 /* wait for the GPU to finish processing the IB */
1626 r = dma_fence_wait(f, false);
1627 if (r) {
1628 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
1629 goto fail;
1630 }
1631
1632 tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
1633 tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
1634 WREG32(mmGB_EDC_MODE, tmp);
1635
1636 tmp = RREG32(mmCC_GC_EDC_CONFIG);
1637 tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
1638 WREG32(mmCC_GC_EDC_CONFIG, tmp);
1639
1640
1641 /* read back registers to clear the counters */
1642 for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
1643 RREG32(sec_ded_counter_registers[i]);
1644
1645fail:
1646 amdgpu_ib_free(adev, &ib, NULL);
1647 dma_fence_put(f);
1648
1649 return r;
1650}
1651
1652static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1653{
1654 u32 gb_addr_config;
1655 u32 mc_arb_ramcfg;
1656 u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
1657 u32 tmp;
1658 int ret;
1659
1660 switch (adev->asic_type) {
1661 case CHIP_TOPAZ:
1662 adev->gfx.config.max_shader_engines = 1;
1663 adev->gfx.config.max_tile_pipes = 2;
1664 adev->gfx.config.max_cu_per_sh = 6;
1665 adev->gfx.config.max_sh_per_se = 1;
1666 adev->gfx.config.max_backends_per_se = 2;
1667 adev->gfx.config.max_texture_channel_caches = 2;
1668 adev->gfx.config.max_gprs = 256;
1669 adev->gfx.config.max_gs_threads = 32;
1670 adev->gfx.config.max_hw_contexts = 8;
1671
1672 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1673 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1674 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1675 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1676 gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
1677 break;
1678 case CHIP_FIJI:
1679 adev->gfx.config.max_shader_engines = 4;
1680 adev->gfx.config.max_tile_pipes = 16;
1681 adev->gfx.config.max_cu_per_sh = 16;
1682 adev->gfx.config.max_sh_per_se = 1;
1683 adev->gfx.config.max_backends_per_se = 4;
1684 adev->gfx.config.max_texture_channel_caches = 16;
1685 adev->gfx.config.max_gprs = 256;
1686 adev->gfx.config.max_gs_threads = 32;
1687 adev->gfx.config.max_hw_contexts = 8;
1688
1689 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1690 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1691 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1692 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1693 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1694 break;
1695 case CHIP_POLARIS11:
1696 case CHIP_POLARIS12:
1697 ret = amdgpu_atombios_get_gfx_info(adev);
1698 if (ret)
1699 return ret;
1700 adev->gfx.config.max_gprs = 256;
1701 adev->gfx.config.max_gs_threads = 32;
1702 adev->gfx.config.max_hw_contexts = 8;
1703
1704 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1705 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1706 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1707 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1708 gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
1709 break;
1710 case CHIP_POLARIS10:
1711 case CHIP_VEGAM:
1712 ret = amdgpu_atombios_get_gfx_info(adev);
1713 if (ret)
1714 return ret;
1715 adev->gfx.config.max_gprs = 256;
1716 adev->gfx.config.max_gs_threads = 32;
1717 adev->gfx.config.max_hw_contexts = 8;
1718
1719 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1720 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1721 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1722 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1723 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1724 break;
1725 case CHIP_TONGA:
1726 adev->gfx.config.max_shader_engines = 4;
1727 adev->gfx.config.max_tile_pipes = 8;
1728 adev->gfx.config.max_cu_per_sh = 8;
1729 adev->gfx.config.max_sh_per_se = 1;
1730 adev->gfx.config.max_backends_per_se = 2;
1731 adev->gfx.config.max_texture_channel_caches = 8;
1732 adev->gfx.config.max_gprs = 256;
1733 adev->gfx.config.max_gs_threads = 32;
1734 adev->gfx.config.max_hw_contexts = 8;
1735
1736 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1737 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1738 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1739 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1740 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1741 break;
1742 case CHIP_CARRIZO:
1743 adev->gfx.config.max_shader_engines = 1;
1744 adev->gfx.config.max_tile_pipes = 2;
1745 adev->gfx.config.max_sh_per_se = 1;
1746 adev->gfx.config.max_backends_per_se = 2;
1747 adev->gfx.config.max_cu_per_sh = 8;
1748 adev->gfx.config.max_texture_channel_caches = 2;
1749 adev->gfx.config.max_gprs = 256;
1750 adev->gfx.config.max_gs_threads = 32;
1751 adev->gfx.config.max_hw_contexts = 8;
1752
1753 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1754 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1755 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1756 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1757 gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1758 break;
1759 case CHIP_STONEY:
1760 adev->gfx.config.max_shader_engines = 1;
1761 adev->gfx.config.max_tile_pipes = 2;
1762 adev->gfx.config.max_sh_per_se = 1;
1763 adev->gfx.config.max_backends_per_se = 1;
1764 adev->gfx.config.max_cu_per_sh = 3;
1765 adev->gfx.config.max_texture_channel_caches = 2;
1766 adev->gfx.config.max_gprs = 256;
1767 adev->gfx.config.max_gs_threads = 16;
1768 adev->gfx.config.max_hw_contexts = 8;
1769
1770 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1771 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1772 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1773 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1774 gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1775 break;
1776 default:
1777 adev->gfx.config.max_shader_engines = 2;
1778 adev->gfx.config.max_tile_pipes = 4;
1779 adev->gfx.config.max_cu_per_sh = 2;
1780 adev->gfx.config.max_sh_per_se = 1;
1781 adev->gfx.config.max_backends_per_se = 2;
1782 adev->gfx.config.max_texture_channel_caches = 4;
1783 adev->gfx.config.max_gprs = 256;
1784 adev->gfx.config.max_gs_threads = 32;
1785 adev->gfx.config.max_hw_contexts = 8;
1786
1787 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1788 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1789 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1790 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1791 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1792 break;
1793 }
1794
1795 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
1796 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
1797
1798 adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
1799 MC_ARB_RAMCFG, NOOFBANK);
1800 adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
1801 MC_ARB_RAMCFG, NOOFRANKS);
1802
1803 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
1804 adev->gfx.config.mem_max_burst_length_bytes = 256;
1805 if (adev->flags & AMD_IS_APU) {
1806 /* Get memory bank mapping mode. */
1807 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
1808 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1809 dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1810
1811 tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
1812 dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1813 dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1814
1815 /* Validate settings in case only one DIMM installed. */
1816 if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
1817 dimm00_addr_map = 0;
1818 if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
1819 dimm01_addr_map = 0;
1820 if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
1821 dimm10_addr_map = 0;
1822 if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
1823 dimm11_addr_map = 0;
1824
1825 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
1826 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
1827 if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
1828 adev->gfx.config.mem_row_size_in_kb = 2;
1829 else
1830 adev->gfx.config.mem_row_size_in_kb = 1;
1831 } else {
1832 tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
1833 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1834 if (adev->gfx.config.mem_row_size_in_kb > 4)
1835 adev->gfx.config.mem_row_size_in_kb = 4;
1836 }
1837
1838 adev->gfx.config.shader_engine_tile_size = 32;
1839 adev->gfx.config.num_gpus = 1;
1840 adev->gfx.config.multi_gpu_tile_size = 64;
1841
1842 /* fix up row size */
1843 switch (adev->gfx.config.mem_row_size_in_kb) {
1844 case 1:
1845 default:
1846 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
1847 break;
1848 case 2:
1849 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
1850 break;
1851 case 4:
1852 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
1853 break;
1854 }
1855 adev->gfx.config.gb_addr_config = gb_addr_config;
1856
1857 return 0;
1858}
1859
1860static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1861 int mec, int pipe, int queue)
1862{
1863 int r;
1864 unsigned irq_type;
1865 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1866 unsigned int hw_prio;
1867
1868 ring = &adev->gfx.compute_ring[ring_id];
1869
1870 /* mec0 is me1 */
1871 ring->me = mec + 1;
1872 ring->pipe = pipe;
1873 ring->queue = queue;
1874
1875 ring->ring_obj = NULL;
1876 ring->use_doorbell = true;
1877 ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
1878 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1879 + (ring_id * GFX8_MEC_HPD_SIZE);
1880 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1881
1882 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1883 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1884 + ring->pipe;
1885
1886 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1887 AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
1888 /* type-2 packets are deprecated on MEC, use type-3 instead */
1889 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1890 hw_prio, NULL);
1891 if (r)
1892 return r;
1893
1894
1895 return 0;
1896}
1897
1898static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
1899
1900static int gfx_v8_0_sw_init(void *handle)
1901{
1902 int i, j, k, r, ring_id;
1903 int xcc_id = 0;
1904 struct amdgpu_ring *ring;
1905 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1906
1907 switch (adev->asic_type) {
1908 case CHIP_TONGA:
1909 case CHIP_CARRIZO:
1910 case CHIP_FIJI:
1911 case CHIP_POLARIS10:
1912 case CHIP_POLARIS11:
1913 case CHIP_POLARIS12:
1914 case CHIP_VEGAM:
1915 adev->gfx.mec.num_mec = 2;
1916 break;
1917 case CHIP_TOPAZ:
1918 case CHIP_STONEY:
1919 default:
1920 adev->gfx.mec.num_mec = 1;
1921 break;
1922 }
1923
1924 adev->gfx.mec.num_pipe_per_mec = 4;
1925 adev->gfx.mec.num_queue_per_pipe = 8;
1926
1927 /* EOP Event */
1928 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
1929 if (r)
1930 return r;
1931
1932 /* Privileged reg */
1933 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
1934 &adev->gfx.priv_reg_irq);
1935 if (r)
1936 return r;
1937
1938 /* Privileged inst */
1939 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
1940 &adev->gfx.priv_inst_irq);
1941 if (r)
1942 return r;
1943
1944 /* Add CP EDC/ECC irq */
1945 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
1946 &adev->gfx.cp_ecc_error_irq);
1947 if (r)
1948 return r;
1949
1950 /* SQ interrupts. */
1951 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
1952 &adev->gfx.sq_irq);
1953 if (r) {
1954 DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
1955 return r;
1956 }
1957
1958 INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
1959
1960 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1961
1962 r = gfx_v8_0_init_microcode(adev);
1963 if (r) {
1964 DRM_ERROR("Failed to load gfx firmware!\n");
1965 return r;
1966 }
1967
1968 r = adev->gfx.rlc.funcs->init(adev);
1969 if (r) {
1970 DRM_ERROR("Failed to init rlc BOs!\n");
1971 return r;
1972 }
1973
1974 r = gfx_v8_0_mec_init(adev);
1975 if (r) {
1976 DRM_ERROR("Failed to init MEC BOs!\n");
1977 return r;
1978 }
1979
1980 /* set up the gfx ring */
1981 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1982 ring = &adev->gfx.gfx_ring[i];
1983 ring->ring_obj = NULL;
1984 sprintf(ring->name, "gfx");
1985 /* no gfx doorbells on iceland */
1986 if (adev->asic_type != CHIP_TOPAZ) {
1987 ring->use_doorbell = true;
1988 ring->doorbell_index = adev->doorbell_index.gfx_ring0;
1989 }
1990
1991 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
1992 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
1993 AMDGPU_RING_PRIO_DEFAULT, NULL);
1994 if (r)
1995 return r;
1996 }
1997
1998
1999 /* set up the compute queues - allocate horizontally across pipes */
2000 ring_id = 0;
2001 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2002 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2003 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2004 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
2005 k, j))
2006 continue;
2007
2008 r = gfx_v8_0_compute_ring_init(adev,
2009 ring_id,
2010 i, k, j);
2011 if (r)
2012 return r;
2013
2014 ring_id++;
2015 }
2016 }
2017 }
2018
2019 r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE, 0);
2020 if (r) {
2021 DRM_ERROR("Failed to init KIQ BOs!\n");
2022 return r;
2023 }
2024
2025 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
2026 if (r)
2027 return r;
2028
2029 /* create MQD for all compute queues as well as KIQ for SRIOV case */
2030 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation), 0);
2031 if (r)
2032 return r;
2033
2034 adev->gfx.ce_ram_size = 0x8000;
2035
2036 r = gfx_v8_0_gpu_early_init(adev);
2037 if (r)
2038 return r;
2039
2040 return 0;
2041}
2042
2043static int gfx_v8_0_sw_fini(void *handle)
2044{
2045 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2046 int i;
2047
2048 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2049 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2050 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2051 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2052
2053 amdgpu_gfx_mqd_sw_fini(adev, 0);
2054 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
2055 amdgpu_gfx_kiq_fini(adev, 0);
2056
2057 gfx_v8_0_mec_fini(adev);
2058 amdgpu_gfx_rlc_fini(adev);
2059 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2060 &adev->gfx.rlc.clear_state_gpu_addr,
2061 (void **)&adev->gfx.rlc.cs_ptr);
2062 if ((adev->asic_type == CHIP_CARRIZO) ||
2063 (adev->asic_type == CHIP_STONEY)) {
2064 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2065 &adev->gfx.rlc.cp_table_gpu_addr,
2066 (void **)&adev->gfx.rlc.cp_table_ptr);
2067 }
2068 gfx_v8_0_free_microcode(adev);
2069
2070 return 0;
2071}
2072
2073static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
2074{
2075 uint32_t *modearray, *mod2array;
2076 const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2077 const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2078 u32 reg_offset;
2079
2080 modearray = adev->gfx.config.tile_mode_array;
2081 mod2array = adev->gfx.config.macrotile_mode_array;
2082
2083 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2084 modearray[reg_offset] = 0;
2085
2086 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2087 mod2array[reg_offset] = 0;
2088
2089 switch (adev->asic_type) {
2090 case CHIP_TOPAZ:
2091 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2092 PIPE_CONFIG(ADDR_SURF_P2) |
2093 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2094 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2095 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2096 PIPE_CONFIG(ADDR_SURF_P2) |
2097 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2098 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2099 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2100 PIPE_CONFIG(ADDR_SURF_P2) |
2101 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2102 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2103 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2104 PIPE_CONFIG(ADDR_SURF_P2) |
2105 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2106 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2107 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2108 PIPE_CONFIG(ADDR_SURF_P2) |
2109 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2110 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2111 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2112 PIPE_CONFIG(ADDR_SURF_P2) |
2113 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2114 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2115 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2116 PIPE_CONFIG(ADDR_SURF_P2) |
2117 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2118 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2119 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2120 PIPE_CONFIG(ADDR_SURF_P2));
2121 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2122 PIPE_CONFIG(ADDR_SURF_P2) |
2123 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2124 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2125 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2126 PIPE_CONFIG(ADDR_SURF_P2) |
2127 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2128 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2129 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2130 PIPE_CONFIG(ADDR_SURF_P2) |
2131 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2132 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2133 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2134 PIPE_CONFIG(ADDR_SURF_P2) |
2135 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2136 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2137 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2138 PIPE_CONFIG(ADDR_SURF_P2) |
2139 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2140 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2141 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2142 PIPE_CONFIG(ADDR_SURF_P2) |
2143 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2144 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2145 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2146 PIPE_CONFIG(ADDR_SURF_P2) |
2147 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2148 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2149 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2150 PIPE_CONFIG(ADDR_SURF_P2) |
2151 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2152 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2153 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2154 PIPE_CONFIG(ADDR_SURF_P2) |
2155 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2156 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2157 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2158 PIPE_CONFIG(ADDR_SURF_P2) |
2159 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2160 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2161 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2162 PIPE_CONFIG(ADDR_SURF_P2) |
2163 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2164 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2165 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2166 PIPE_CONFIG(ADDR_SURF_P2) |
2167 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2168 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2169 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2170 PIPE_CONFIG(ADDR_SURF_P2) |
2171 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2172 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2173 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2174 PIPE_CONFIG(ADDR_SURF_P2) |
2175 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2176 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2177 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2178 PIPE_CONFIG(ADDR_SURF_P2) |
2179 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2180 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2181 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2182 PIPE_CONFIG(ADDR_SURF_P2) |
2183 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2184 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2185 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2186 PIPE_CONFIG(ADDR_SURF_P2) |
2187 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2188 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2189 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2190 PIPE_CONFIG(ADDR_SURF_P2) |
2191 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2192 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2193
2194 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2195 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2196 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2197 NUM_BANKS(ADDR_SURF_8_BANK));
2198 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2199 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2200 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2201 NUM_BANKS(ADDR_SURF_8_BANK));
2202 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2203 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2204 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2205 NUM_BANKS(ADDR_SURF_8_BANK));
2206 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2207 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2208 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2209 NUM_BANKS(ADDR_SURF_8_BANK));
2210 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2211 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2212 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2213 NUM_BANKS(ADDR_SURF_8_BANK));
2214 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2215 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2216 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2217 NUM_BANKS(ADDR_SURF_8_BANK));
2218 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2219 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2220 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2221 NUM_BANKS(ADDR_SURF_8_BANK));
2222 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2223 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2224 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2225 NUM_BANKS(ADDR_SURF_16_BANK));
2226 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2227 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2228 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2229 NUM_BANKS(ADDR_SURF_16_BANK));
2230 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2231 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2232 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2233 NUM_BANKS(ADDR_SURF_16_BANK));
2234 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2235 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2236 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2237 NUM_BANKS(ADDR_SURF_16_BANK));
2238 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2239 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2240 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2241 NUM_BANKS(ADDR_SURF_16_BANK));
2242 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2243 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2244 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2245 NUM_BANKS(ADDR_SURF_16_BANK));
2246 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2247 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2248 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2249 NUM_BANKS(ADDR_SURF_8_BANK));
2250
2251 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2252 if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2253 reg_offset != 23)
2254 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2255
2256 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2257 if (reg_offset != 7)
2258 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2259
2260 break;
2261 case CHIP_FIJI:
2262 case CHIP_VEGAM:
2263 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2264 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2265 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2266 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2267 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2268 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2269 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2270 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2271 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2272 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2273 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2274 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2275 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2276 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2277 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2278 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2279 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2280 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2281 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2282 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2283 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2284 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2285 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2286 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2287 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2288 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2289 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2290 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2291 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2292 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2293 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2294 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2295 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2296 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
2297 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2298 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2299 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2300 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2301 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2302 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2303 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2304 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2305 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2306 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2307 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2308 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2309 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2310 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2311 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2312 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2313 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2314 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2315 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2316 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2317 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2318 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2319 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2320 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2321 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2322 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2323 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2324 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2325 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2326 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2327 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2328 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2329 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2330 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2331 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2332 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2333 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2334 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2335 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2336 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2337 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2338 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2339 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2340 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2341 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2342 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2343 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2344 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2345 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2346 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2347 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2348 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2349 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2350 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2351 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2352 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2353 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2354 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2355 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2356 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2357 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2358 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2359 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2360 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2361 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2362 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2363 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2364 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2365 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2366 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2367 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2368 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2369 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2370 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2371 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2372 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2373 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2374 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2375 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2376 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2377 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2378 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2379 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2380 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2381 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2382 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2383 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2384 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2385
2386 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2387 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2388 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2389 NUM_BANKS(ADDR_SURF_8_BANK));
2390 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2391 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2392 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2393 NUM_BANKS(ADDR_SURF_8_BANK));
2394 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2395 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2396 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2397 NUM_BANKS(ADDR_SURF_8_BANK));
2398 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2399 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2400 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2401 NUM_BANKS(ADDR_SURF_8_BANK));
2402 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2403 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2404 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2405 NUM_BANKS(ADDR_SURF_8_BANK));
2406 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2407 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2408 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2409 NUM_BANKS(ADDR_SURF_8_BANK));
2410 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2411 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2412 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2413 NUM_BANKS(ADDR_SURF_8_BANK));
2414 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2415 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2416 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2417 NUM_BANKS(ADDR_SURF_8_BANK));
2418 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2419 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2420 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2421 NUM_BANKS(ADDR_SURF_8_BANK));
2422 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2423 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2424 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2425 NUM_BANKS(ADDR_SURF_8_BANK));
2426 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2427 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2428 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2429 NUM_BANKS(ADDR_SURF_8_BANK));
2430 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2431 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2432 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2433 NUM_BANKS(ADDR_SURF_8_BANK));
2434 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2435 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2436 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2437 NUM_BANKS(ADDR_SURF_8_BANK));
2438 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2439 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2440 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2441 NUM_BANKS(ADDR_SURF_4_BANK));
2442
2443 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2444 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2445
2446 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2447 if (reg_offset != 7)
2448 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2449
2450 break;
2451 case CHIP_TONGA:
2452 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2453 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2454 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2455 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2456 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2457 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2458 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2459 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2460 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2461 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2462 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2463 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2464 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2465 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2466 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2467 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2468 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2469 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2470 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2471 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2472 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2473 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2474 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2475 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2476 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2477 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2478 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2479 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2480 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2481 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2482 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2483 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2484 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2485 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2486 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2487 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2488 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2489 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2490 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2491 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2492 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2493 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2494 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2495 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2496 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2497 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2498 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2499 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2500 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2501 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2502 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2503 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2504 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2505 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2506 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2507 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2508 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2509 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2510 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2511 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2512 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2513 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2514 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2515 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2516 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2517 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2518 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2519 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2520 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2521 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2522 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2523 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2524 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2525 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2526 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2527 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2528 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2529 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2530 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2531 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2532 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2533 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2534 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2535 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2536 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2537 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2538 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2539 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2540 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2541 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2542 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2543 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2544 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2545 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2546 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2547 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2548 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2549 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2550 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2551 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2552 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2553 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2554 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2555 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2556 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2557 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2558 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2559 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2560 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2561 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2562 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2563 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2564 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2565 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2566 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2567 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2568 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2569 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2570 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2571 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2572 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2573 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2574
2575 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2576 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2577 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2578 NUM_BANKS(ADDR_SURF_16_BANK));
2579 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2580 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2581 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2582 NUM_BANKS(ADDR_SURF_16_BANK));
2583 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2584 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2585 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2586 NUM_BANKS(ADDR_SURF_16_BANK));
2587 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2588 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2589 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2590 NUM_BANKS(ADDR_SURF_16_BANK));
2591 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2592 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2593 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2594 NUM_BANKS(ADDR_SURF_16_BANK));
2595 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2596 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2597 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2598 NUM_BANKS(ADDR_SURF_16_BANK));
2599 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2600 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2601 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2602 NUM_BANKS(ADDR_SURF_16_BANK));
2603 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2604 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2605 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2606 NUM_BANKS(ADDR_SURF_16_BANK));
2607 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2608 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2609 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2610 NUM_BANKS(ADDR_SURF_16_BANK));
2611 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2612 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2613 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2614 NUM_BANKS(ADDR_SURF_16_BANK));
2615 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2616 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2617 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2618 NUM_BANKS(ADDR_SURF_16_BANK));
2619 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2620 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2621 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2622 NUM_BANKS(ADDR_SURF_8_BANK));
2623 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2624 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2625 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2626 NUM_BANKS(ADDR_SURF_4_BANK));
2627 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2628 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2629 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2630 NUM_BANKS(ADDR_SURF_4_BANK));
2631
2632 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2633 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2634
2635 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2636 if (reg_offset != 7)
2637 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2638
2639 break;
2640 case CHIP_POLARIS11:
2641 case CHIP_POLARIS12:
2642 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2643 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2644 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2645 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2646 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2647 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2648 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2649 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2650 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2651 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2652 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2653 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2654 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2655 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2656 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2657 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2658 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2659 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2660 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2661 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2662 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2663 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2664 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2665 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2666 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2667 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2668 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2669 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2670 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2671 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2672 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2673 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2674 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2675 PIPE_CONFIG(ADDR_SURF_P4_16x16));
2676 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2677 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2678 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2679 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2680 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2681 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2682 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2683 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2684 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2685 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2686 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2687 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2688 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2689 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2690 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2691 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2692 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2693 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2694 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2695 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2696 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2697 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2698 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2699 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2700 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2701 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2702 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2703 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2704 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2705 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2706 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2707 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2708 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2709 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2710 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2711 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2712 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2713 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2714 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2715 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2716 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2717 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2718 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2719 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2720 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2721 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2722 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2723 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2724 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2725 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2726 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2727 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2728 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2729 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2730 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2731 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2732 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2733 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2734 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2735 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2736 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2737 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2738 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2739 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2740 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2741 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2742 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2743 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2744 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2745 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2746 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2747 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2748 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2749 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2750 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2751 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2752 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2753 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2754 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2755 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2756 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2757 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2758 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2759 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2760 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2761 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2762 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2763 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2764
2765 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2766 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2767 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2768 NUM_BANKS(ADDR_SURF_16_BANK));
2769
2770 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2771 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2772 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2773 NUM_BANKS(ADDR_SURF_16_BANK));
2774
2775 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2776 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2777 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2778 NUM_BANKS(ADDR_SURF_16_BANK));
2779
2780 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2781 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2782 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2783 NUM_BANKS(ADDR_SURF_16_BANK));
2784
2785 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2786 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2787 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2788 NUM_BANKS(ADDR_SURF_16_BANK));
2789
2790 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2791 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2792 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2793 NUM_BANKS(ADDR_SURF_16_BANK));
2794
2795 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2796 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2797 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2798 NUM_BANKS(ADDR_SURF_16_BANK));
2799
2800 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2801 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2802 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2803 NUM_BANKS(ADDR_SURF_16_BANK));
2804
2805 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2806 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2807 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2808 NUM_BANKS(ADDR_SURF_16_BANK));
2809
2810 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2811 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2812 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2813 NUM_BANKS(ADDR_SURF_16_BANK));
2814
2815 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2816 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2817 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2818 NUM_BANKS(ADDR_SURF_16_BANK));
2819
2820 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2821 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2822 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2823 NUM_BANKS(ADDR_SURF_16_BANK));
2824
2825 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2826 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2827 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2828 NUM_BANKS(ADDR_SURF_8_BANK));
2829
2830 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2831 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2832 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2833 NUM_BANKS(ADDR_SURF_4_BANK));
2834
2835 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2836 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2837
2838 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2839 if (reg_offset != 7)
2840 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2841
2842 break;
2843 case CHIP_POLARIS10:
2844 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2845 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2846 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2847 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2848 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2849 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2850 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2851 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2852 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2853 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2854 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2855 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2856 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2857 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2858 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2859 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2860 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2861 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2862 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2863 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2864 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2865 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2866 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2867 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2868 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2869 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2870 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2871 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2872 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2873 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2874 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2875 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2876 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2877 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2878 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2879 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2880 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2881 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2882 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2883 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2884 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2885 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2886 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2887 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2888 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2889 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2890 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2891 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2892 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2893 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2894 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2895 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2896 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2897 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2898 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2899 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2900 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2901 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2902 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2903 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2904 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2905 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2906 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2907 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2908 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2909 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2910 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2911 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2912 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2913 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2914 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2915 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2916 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2917 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2918 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2919 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2920 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2921 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2922 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2923 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2924 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2925 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2926 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2927 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2928 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2929 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2930 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2931 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2932 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2933 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2934 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2935 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2936 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2937 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2938 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2939 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2940 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2941 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2942 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2943 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2944 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2945 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2946 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2947 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2948 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2949 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2950 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2951 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2952 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2953 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2954 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2955 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2956 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2957 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2958 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2959 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2960 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2961 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2962 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2963 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2964 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2965 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2966
2967 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2968 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2969 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2970 NUM_BANKS(ADDR_SURF_16_BANK));
2971
2972 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2973 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2974 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2975 NUM_BANKS(ADDR_SURF_16_BANK));
2976
2977 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2978 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2979 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2980 NUM_BANKS(ADDR_SURF_16_BANK));
2981
2982 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2983 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2984 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2985 NUM_BANKS(ADDR_SURF_16_BANK));
2986
2987 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2988 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2989 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2990 NUM_BANKS(ADDR_SURF_16_BANK));
2991
2992 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2993 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2994 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2995 NUM_BANKS(ADDR_SURF_16_BANK));
2996
2997 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2998 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2999 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3000 NUM_BANKS(ADDR_SURF_16_BANK));
3001
3002 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3003 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3004 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3005 NUM_BANKS(ADDR_SURF_16_BANK));
3006
3007 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3008 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3009 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3010 NUM_BANKS(ADDR_SURF_16_BANK));
3011
3012 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3013 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3014 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3015 NUM_BANKS(ADDR_SURF_16_BANK));
3016
3017 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3018 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3019 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3020 NUM_BANKS(ADDR_SURF_16_BANK));
3021
3022 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3023 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3024 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3025 NUM_BANKS(ADDR_SURF_8_BANK));
3026
3027 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3028 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3029 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3030 NUM_BANKS(ADDR_SURF_4_BANK));
3031
3032 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3033 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3034 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3035 NUM_BANKS(ADDR_SURF_4_BANK));
3036
3037 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3038 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3039
3040 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3041 if (reg_offset != 7)
3042 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3043
3044 break;
3045 case CHIP_STONEY:
3046 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3047 PIPE_CONFIG(ADDR_SURF_P2) |
3048 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3049 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3050 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3051 PIPE_CONFIG(ADDR_SURF_P2) |
3052 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3053 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3054 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3055 PIPE_CONFIG(ADDR_SURF_P2) |
3056 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3057 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3058 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3059 PIPE_CONFIG(ADDR_SURF_P2) |
3060 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3061 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3062 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3063 PIPE_CONFIG(ADDR_SURF_P2) |
3064 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3065 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3066 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3067 PIPE_CONFIG(ADDR_SURF_P2) |
3068 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3069 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3070 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3071 PIPE_CONFIG(ADDR_SURF_P2) |
3072 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3073 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3074 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3075 PIPE_CONFIG(ADDR_SURF_P2));
3076 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3077 PIPE_CONFIG(ADDR_SURF_P2) |
3078 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3079 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3080 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3081 PIPE_CONFIG(ADDR_SURF_P2) |
3082 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3083 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3084 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3085 PIPE_CONFIG(ADDR_SURF_P2) |
3086 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3087 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3088 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3089 PIPE_CONFIG(ADDR_SURF_P2) |
3090 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3091 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3092 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3093 PIPE_CONFIG(ADDR_SURF_P2) |
3094 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3095 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3096 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3097 PIPE_CONFIG(ADDR_SURF_P2) |
3098 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3099 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3100 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3101 PIPE_CONFIG(ADDR_SURF_P2) |
3102 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3103 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3104 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3105 PIPE_CONFIG(ADDR_SURF_P2) |
3106 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3107 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3108 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3109 PIPE_CONFIG(ADDR_SURF_P2) |
3110 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3111 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3112 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3113 PIPE_CONFIG(ADDR_SURF_P2) |
3114 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3115 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3116 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3117 PIPE_CONFIG(ADDR_SURF_P2) |
3118 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3119 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3120 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3121 PIPE_CONFIG(ADDR_SURF_P2) |
3122 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3123 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3124 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3125 PIPE_CONFIG(ADDR_SURF_P2) |
3126 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3127 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3128 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3129 PIPE_CONFIG(ADDR_SURF_P2) |
3130 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3131 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3132 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3133 PIPE_CONFIG(ADDR_SURF_P2) |
3134 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3135 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3136 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3137 PIPE_CONFIG(ADDR_SURF_P2) |
3138 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3139 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3140 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3141 PIPE_CONFIG(ADDR_SURF_P2) |
3142 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3143 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3144 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3145 PIPE_CONFIG(ADDR_SURF_P2) |
3146 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3147 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3148
3149 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3150 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3151 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3152 NUM_BANKS(ADDR_SURF_8_BANK));
3153 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3154 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3155 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3156 NUM_BANKS(ADDR_SURF_8_BANK));
3157 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3158 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3159 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3160 NUM_BANKS(ADDR_SURF_8_BANK));
3161 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3162 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3163 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3164 NUM_BANKS(ADDR_SURF_8_BANK));
3165 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3166 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3167 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3168 NUM_BANKS(ADDR_SURF_8_BANK));
3169 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3170 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3171 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3172 NUM_BANKS(ADDR_SURF_8_BANK));
3173 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3174 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3175 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3176 NUM_BANKS(ADDR_SURF_8_BANK));
3177 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3178 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3179 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3180 NUM_BANKS(ADDR_SURF_16_BANK));
3181 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3182 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3183 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3184 NUM_BANKS(ADDR_SURF_16_BANK));
3185 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3186 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3187 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3188 NUM_BANKS(ADDR_SURF_16_BANK));
3189 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3190 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3191 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3192 NUM_BANKS(ADDR_SURF_16_BANK));
3193 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3194 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3195 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3196 NUM_BANKS(ADDR_SURF_16_BANK));
3197 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3198 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3199 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3200 NUM_BANKS(ADDR_SURF_16_BANK));
3201 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3202 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3203 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3204 NUM_BANKS(ADDR_SURF_8_BANK));
3205
3206 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3207 if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3208 reg_offset != 23)
3209 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3210
3211 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3212 if (reg_offset != 7)
3213 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3214
3215 break;
3216 default:
3217 dev_warn(adev->dev,
3218 "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
3219 adev->asic_type);
3220 fallthrough;
3221
3222 case CHIP_CARRIZO:
3223 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3224 PIPE_CONFIG(ADDR_SURF_P2) |
3225 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3226 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3227 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3228 PIPE_CONFIG(ADDR_SURF_P2) |
3229 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3230 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3231 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3232 PIPE_CONFIG(ADDR_SURF_P2) |
3233 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3234 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3235 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3236 PIPE_CONFIG(ADDR_SURF_P2) |
3237 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3238 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3239 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3240 PIPE_CONFIG(ADDR_SURF_P2) |
3241 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3242 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3243 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3244 PIPE_CONFIG(ADDR_SURF_P2) |
3245 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3246 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3247 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3248 PIPE_CONFIG(ADDR_SURF_P2) |
3249 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3250 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3251 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3252 PIPE_CONFIG(ADDR_SURF_P2));
3253 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3254 PIPE_CONFIG(ADDR_SURF_P2) |
3255 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3256 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3257 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3258 PIPE_CONFIG(ADDR_SURF_P2) |
3259 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3260 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3261 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3262 PIPE_CONFIG(ADDR_SURF_P2) |
3263 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3264 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3265 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3266 PIPE_CONFIG(ADDR_SURF_P2) |
3267 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3268 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3269 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3270 PIPE_CONFIG(ADDR_SURF_P2) |
3271 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3272 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3273 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3274 PIPE_CONFIG(ADDR_SURF_P2) |
3275 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3276 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3277 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3278 PIPE_CONFIG(ADDR_SURF_P2) |
3279 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3280 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3281 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3282 PIPE_CONFIG(ADDR_SURF_P2) |
3283 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3284 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3285 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3286 PIPE_CONFIG(ADDR_SURF_P2) |
3287 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3288 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3289 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3290 PIPE_CONFIG(ADDR_SURF_P2) |
3291 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3292 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3293 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3294 PIPE_CONFIG(ADDR_SURF_P2) |
3295 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3296 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3297 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3298 PIPE_CONFIG(ADDR_SURF_P2) |
3299 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3300 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3301 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3302 PIPE_CONFIG(ADDR_SURF_P2) |
3303 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3304 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3305 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3306 PIPE_CONFIG(ADDR_SURF_P2) |
3307 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3308 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3309 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3310 PIPE_CONFIG(ADDR_SURF_P2) |
3311 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3312 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3313 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3314 PIPE_CONFIG(ADDR_SURF_P2) |
3315 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3316 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3317 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3318 PIPE_CONFIG(ADDR_SURF_P2) |
3319 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3320 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3321 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3322 PIPE_CONFIG(ADDR_SURF_P2) |
3323 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3324 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3325
3326 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3327 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3328 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3329 NUM_BANKS(ADDR_SURF_8_BANK));
3330 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3331 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3332 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3333 NUM_BANKS(ADDR_SURF_8_BANK));
3334 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3335 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3336 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3337 NUM_BANKS(ADDR_SURF_8_BANK));
3338 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3339 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3340 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3341 NUM_BANKS(ADDR_SURF_8_BANK));
3342 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3343 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3344 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3345 NUM_BANKS(ADDR_SURF_8_BANK));
3346 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3347 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3348 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3349 NUM_BANKS(ADDR_SURF_8_BANK));
3350 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3351 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3352 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3353 NUM_BANKS(ADDR_SURF_8_BANK));
3354 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3355 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3356 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3357 NUM_BANKS(ADDR_SURF_16_BANK));
3358 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3359 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3360 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3361 NUM_BANKS(ADDR_SURF_16_BANK));
3362 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3363 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3364 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3365 NUM_BANKS(ADDR_SURF_16_BANK));
3366 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3367 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3368 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3369 NUM_BANKS(ADDR_SURF_16_BANK));
3370 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3371 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3372 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3373 NUM_BANKS(ADDR_SURF_16_BANK));
3374 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3375 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3376 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3377 NUM_BANKS(ADDR_SURF_16_BANK));
3378 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3379 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3380 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3381 NUM_BANKS(ADDR_SURF_8_BANK));
3382
3383 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3384 if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3385 reg_offset != 23)
3386 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3387
3388 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3389 if (reg_offset != 7)
3390 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3391
3392 break;
3393 }
3394}
3395
3396static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
3397 u32 se_num, u32 sh_num, u32 instance,
3398 int xcc_id)
3399{
3400 u32 data;
3401
3402 if (instance == 0xffffffff)
3403 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
3404 else
3405 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
3406
3407 if (se_num == 0xffffffff)
3408 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
3409 else
3410 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
3411
3412 if (sh_num == 0xffffffff)
3413 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
3414 else
3415 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
3416
3417 WREG32(mmGRBM_GFX_INDEX, data);
3418}
3419
3420static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
3421 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
3422{
3423 vi_srbm_select(adev, me, pipe, q, vm);
3424}
3425
3426static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
3427{
3428 u32 data, mask;
3429
3430 data = RREG32(mmCC_RB_BACKEND_DISABLE) |
3431 RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3432
3433 data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
3434
3435 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
3436 adev->gfx.config.max_sh_per_se);
3437
3438 return (~data) & mask;
3439}
3440
3441static void
3442gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
3443{
3444 switch (adev->asic_type) {
3445 case CHIP_FIJI:
3446 case CHIP_VEGAM:
3447 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
3448 RB_XSEL2(1) | PKR_MAP(2) |
3449 PKR_XSEL(1) | PKR_YSEL(1) |
3450 SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
3451 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
3452 SE_PAIR_YSEL(2);
3453 break;
3454 case CHIP_TONGA:
3455 case CHIP_POLARIS10:
3456 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3457 SE_XSEL(1) | SE_YSEL(1);
3458 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
3459 SE_PAIR_YSEL(2);
3460 break;
3461 case CHIP_TOPAZ:
3462 case CHIP_CARRIZO:
3463 *rconf |= RB_MAP_PKR0(2);
3464 *rconf1 |= 0x0;
3465 break;
3466 case CHIP_POLARIS11:
3467 case CHIP_POLARIS12:
3468 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3469 SE_XSEL(1) | SE_YSEL(1);
3470 *rconf1 |= 0x0;
3471 break;
3472 case CHIP_STONEY:
3473 *rconf |= 0x0;
3474 *rconf1 |= 0x0;
3475 break;
3476 default:
3477 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
3478 break;
3479 }
3480}
3481
3482static void
3483gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
3484 u32 raster_config, u32 raster_config_1,
3485 unsigned rb_mask, unsigned num_rb)
3486{
3487 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
3488 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
3489 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
3490 unsigned rb_per_se = num_rb / num_se;
3491 unsigned se_mask[4];
3492 unsigned se;
3493
3494 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
3495 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
3496 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
3497 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
3498
3499 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
3500 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
3501 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
3502
3503 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
3504 (!se_mask[2] && !se_mask[3]))) {
3505 raster_config_1 &= ~SE_PAIR_MAP_MASK;
3506
3507 if (!se_mask[0] && !se_mask[1]) {
3508 raster_config_1 |=
3509 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
3510 } else {
3511 raster_config_1 |=
3512 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
3513 }
3514 }
3515
3516 for (se = 0; se < num_se; se++) {
3517 unsigned raster_config_se = raster_config;
3518 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
3519 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
3520 int idx = (se / 2) * 2;
3521
3522 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
3523 raster_config_se &= ~SE_MAP_MASK;
3524
3525 if (!se_mask[idx]) {
3526 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
3527 } else {
3528 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
3529 }
3530 }
3531
3532 pkr0_mask &= rb_mask;
3533 pkr1_mask &= rb_mask;
3534 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
3535 raster_config_se &= ~PKR_MAP_MASK;
3536
3537 if (!pkr0_mask) {
3538 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
3539 } else {
3540 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
3541 }
3542 }
3543
3544 if (rb_per_se >= 2) {
3545 unsigned rb0_mask = 1 << (se * rb_per_se);
3546 unsigned rb1_mask = rb0_mask << 1;
3547
3548 rb0_mask &= rb_mask;
3549 rb1_mask &= rb_mask;
3550 if (!rb0_mask || !rb1_mask) {
3551 raster_config_se &= ~RB_MAP_PKR0_MASK;
3552
3553 if (!rb0_mask) {
3554 raster_config_se |=
3555 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
3556 } else {
3557 raster_config_se |=
3558 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
3559 }
3560 }
3561
3562 if (rb_per_se > 2) {
3563 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
3564 rb1_mask = rb0_mask << 1;
3565 rb0_mask &= rb_mask;
3566 rb1_mask &= rb_mask;
3567 if (!rb0_mask || !rb1_mask) {
3568 raster_config_se &= ~RB_MAP_PKR1_MASK;
3569
3570 if (!rb0_mask) {
3571 raster_config_se |=
3572 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
3573 } else {
3574 raster_config_se |=
3575 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
3576 }
3577 }
3578 }
3579 }
3580
3581 /* GRBM_GFX_INDEX has a different offset on VI */
3582 gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
3583 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
3584 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3585 }
3586
3587 /* GRBM_GFX_INDEX has a different offset on VI */
3588 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3589}
3590
3591static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
3592{
3593 int i, j;
3594 u32 data;
3595 u32 raster_config = 0, raster_config_1 = 0;
3596 u32 active_rbs = 0;
3597 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
3598 adev->gfx.config.max_sh_per_se;
3599 unsigned num_rb_pipes;
3600
3601 mutex_lock(&adev->grbm_idx_mutex);
3602 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3603 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3604 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3605 data = gfx_v8_0_get_rb_active_bitmap(adev);
3606 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
3607 rb_bitmap_width_per_sh);
3608 }
3609 }
3610 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3611
3612 adev->gfx.config.backend_enable_mask = active_rbs;
3613 adev->gfx.config.num_rbs = hweight32(active_rbs);
3614
3615 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
3616 adev->gfx.config.max_shader_engines, 16);
3617
3618 gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
3619
3620 if (!adev->gfx.config.backend_enable_mask ||
3621 adev->gfx.config.num_rbs >= num_rb_pipes) {
3622 WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
3623 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3624 } else {
3625 gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
3626 adev->gfx.config.backend_enable_mask,
3627 num_rb_pipes);
3628 }
3629
3630 /* cache the values for userspace */
3631 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3632 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3633 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3634 adev->gfx.config.rb_config[i][j].rb_backend_disable =
3635 RREG32(mmCC_RB_BACKEND_DISABLE);
3636 adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
3637 RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3638 adev->gfx.config.rb_config[i][j].raster_config =
3639 RREG32(mmPA_SC_RASTER_CONFIG);
3640 adev->gfx.config.rb_config[i][j].raster_config_1 =
3641 RREG32(mmPA_SC_RASTER_CONFIG_1);
3642 }
3643 }
3644 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3645 mutex_unlock(&adev->grbm_idx_mutex);
3646}
3647
3648#define DEFAULT_SH_MEM_BASES (0x6000)
3649/**
3650 * gfx_v8_0_init_compute_vmid - gart enable
3651 *
3652 * @adev: amdgpu_device pointer
3653 *
3654 * Initialize compute vmid sh_mem registers
3655 *
3656 */
3657static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
3658{
3659 int i;
3660 uint32_t sh_mem_config;
3661 uint32_t sh_mem_bases;
3662
3663 /*
3664 * Configure apertures:
3665 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
3666 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
3667 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
3668 */
3669 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
3670
3671 sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
3672 SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
3673 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
3674 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
3675 MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
3676 SH_MEM_CONFIG__PRIVATE_ATC_MASK;
3677
3678 mutex_lock(&adev->srbm_mutex);
3679 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3680 vi_srbm_select(adev, 0, 0, 0, i);
3681 /* CP and shaders */
3682 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
3683 WREG32(mmSH_MEM_APE1_BASE, 1);
3684 WREG32(mmSH_MEM_APE1_LIMIT, 0);
3685 WREG32(mmSH_MEM_BASES, sh_mem_bases);
3686 }
3687 vi_srbm_select(adev, 0, 0, 0, 0);
3688 mutex_unlock(&adev->srbm_mutex);
3689
3690 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
3691 access. These should be enabled by FW for target VMIDs. */
3692 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3693 WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
3694 WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
3695 WREG32(amdgpu_gds_reg_offset[i].gws, 0);
3696 WREG32(amdgpu_gds_reg_offset[i].oa, 0);
3697 }
3698}
3699
3700static void gfx_v8_0_init_gds_vmid(struct amdgpu_device *adev)
3701{
3702 int vmid;
3703
3704 /*
3705 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
3706 * access. Compute VMIDs should be enabled by FW for target VMIDs,
3707 * the driver can enable them for graphics. VMID0 should maintain
3708 * access so that HWS firmware can save/restore entries.
3709 */
3710 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
3711 WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
3712 WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
3713 WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
3714 WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
3715 }
3716}
3717
3718static void gfx_v8_0_config_init(struct amdgpu_device *adev)
3719{
3720 switch (adev->asic_type) {
3721 default:
3722 adev->gfx.config.double_offchip_lds_buf = 1;
3723 break;
3724 case CHIP_CARRIZO:
3725 case CHIP_STONEY:
3726 adev->gfx.config.double_offchip_lds_buf = 0;
3727 break;
3728 }
3729}
3730
3731static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
3732{
3733 u32 tmp, sh_static_mem_cfg;
3734 int i;
3735
3736 WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
3737 WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3738 WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3739 WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
3740
3741 gfx_v8_0_tiling_mode_table_init(adev);
3742 gfx_v8_0_setup_rb(adev);
3743 gfx_v8_0_get_cu_info(adev);
3744 gfx_v8_0_config_init(adev);
3745
3746 /* XXX SH_MEM regs */
3747 /* where to put LDS, scratch, GPUVM in FSA64 space */
3748 sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
3749 SWIZZLE_ENABLE, 1);
3750 sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3751 ELEMENT_SIZE, 1);
3752 sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3753 INDEX_STRIDE, 3);
3754 WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
3755
3756 mutex_lock(&adev->srbm_mutex);
3757 for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
3758 vi_srbm_select(adev, 0, 0, 0, i);
3759 /* CP and shaders */
3760 if (i == 0) {
3761 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
3762 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3763 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3764 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3765 WREG32(mmSH_MEM_CONFIG, tmp);
3766 WREG32(mmSH_MEM_BASES, 0);
3767 } else {
3768 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
3769 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3770 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3771 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3772 WREG32(mmSH_MEM_CONFIG, tmp);
3773 tmp = adev->gmc.shared_aperture_start >> 48;
3774 WREG32(mmSH_MEM_BASES, tmp);
3775 }
3776
3777 WREG32(mmSH_MEM_APE1_BASE, 1);
3778 WREG32(mmSH_MEM_APE1_LIMIT, 0);
3779 }
3780 vi_srbm_select(adev, 0, 0, 0, 0);
3781 mutex_unlock(&adev->srbm_mutex);
3782
3783 gfx_v8_0_init_compute_vmid(adev);
3784 gfx_v8_0_init_gds_vmid(adev);
3785
3786 mutex_lock(&adev->grbm_idx_mutex);
3787 /*
3788 * making sure that the following register writes will be broadcasted
3789 * to all the shaders
3790 */
3791 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3792
3793 WREG32(mmPA_SC_FIFO_SIZE,
3794 (adev->gfx.config.sc_prim_fifo_size_frontend <<
3795 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
3796 (adev->gfx.config.sc_prim_fifo_size_backend <<
3797 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
3798 (adev->gfx.config.sc_hiz_tile_fifo_size <<
3799 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
3800 (adev->gfx.config.sc_earlyz_tile_fifo_size <<
3801 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
3802
3803 tmp = RREG32(mmSPI_ARB_PRIORITY);
3804 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
3805 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
3806 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
3807 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
3808 WREG32(mmSPI_ARB_PRIORITY, tmp);
3809
3810 mutex_unlock(&adev->grbm_idx_mutex);
3811
3812}
3813
3814static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3815{
3816 u32 i, j, k;
3817 u32 mask;
3818
3819 mutex_lock(&adev->grbm_idx_mutex);
3820 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3821 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3822 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3823 for (k = 0; k < adev->usec_timeout; k++) {
3824 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3825 break;
3826 udelay(1);
3827 }
3828 if (k == adev->usec_timeout) {
3829 gfx_v8_0_select_se_sh(adev, 0xffffffff,
3830 0xffffffff, 0xffffffff, 0);
3831 mutex_unlock(&adev->grbm_idx_mutex);
3832 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
3833 i, j);
3834 return;
3835 }
3836 }
3837 }
3838 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3839 mutex_unlock(&adev->grbm_idx_mutex);
3840
3841 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3842 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3843 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3844 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3845 for (k = 0; k < adev->usec_timeout; k++) {
3846 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3847 break;
3848 udelay(1);
3849 }
3850}
3851
3852static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3853 bool enable)
3854{
3855 u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3856
3857 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
3858 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
3859 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
3860 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
3861
3862 WREG32(mmCP_INT_CNTL_RING0, tmp);
3863}
3864
3865static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
3866{
3867 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
3868 /* csib */
3869 WREG32(mmRLC_CSIB_ADDR_HI,
3870 adev->gfx.rlc.clear_state_gpu_addr >> 32);
3871 WREG32(mmRLC_CSIB_ADDR_LO,
3872 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
3873 WREG32(mmRLC_CSIB_LENGTH,
3874 adev->gfx.rlc.clear_state_size);
3875}
3876
3877static void gfx_v8_0_parse_ind_reg_list(int *register_list_format,
3878 int ind_offset,
3879 int list_size,
3880 int *unique_indices,
3881 int *indices_count,
3882 int max_indices,
3883 int *ind_start_offsets,
3884 int *offset_count,
3885 int max_offset)
3886{
3887 int indices;
3888 bool new_entry = true;
3889
3890 for (; ind_offset < list_size; ind_offset++) {
3891
3892 if (new_entry) {
3893 new_entry = false;
3894 ind_start_offsets[*offset_count] = ind_offset;
3895 *offset_count = *offset_count + 1;
3896 BUG_ON(*offset_count >= max_offset);
3897 }
3898
3899 if (register_list_format[ind_offset] == 0xFFFFFFFF) {
3900 new_entry = true;
3901 continue;
3902 }
3903
3904 ind_offset += 2;
3905
3906 /* look for the matching indice */
3907 for (indices = 0;
3908 indices < *indices_count;
3909 indices++) {
3910 if (unique_indices[indices] ==
3911 register_list_format[ind_offset])
3912 break;
3913 }
3914
3915 if (indices >= *indices_count) {
3916 unique_indices[*indices_count] =
3917 register_list_format[ind_offset];
3918 indices = *indices_count;
3919 *indices_count = *indices_count + 1;
3920 BUG_ON(*indices_count >= max_indices);
3921 }
3922
3923 register_list_format[ind_offset] = indices;
3924 }
3925}
3926
3927static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
3928{
3929 int i, temp, data;
3930 int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0};
3931 int indices_count = 0;
3932 int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3933 int offset_count = 0;
3934
3935 int list_size;
3936 unsigned int *register_list_format =
3937 kmemdup(adev->gfx.rlc.register_list_format,
3938 adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
3939 if (!register_list_format)
3940 return -ENOMEM;
3941
3942 gfx_v8_0_parse_ind_reg_list(register_list_format,
3943 RLC_FormatDirectRegListLength,
3944 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
3945 unique_indices,
3946 &indices_count,
3947 ARRAY_SIZE(unique_indices),
3948 indirect_start_offsets,
3949 &offset_count,
3950 ARRAY_SIZE(indirect_start_offsets));
3951
3952 /* save and restore list */
3953 WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
3954
3955 WREG32(mmRLC_SRM_ARAM_ADDR, 0);
3956 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
3957 WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]);
3958
3959 /* indirect list */
3960 WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start);
3961 for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
3962 WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]);
3963
3964 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
3965 list_size = list_size >> 1;
3966 WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size);
3967 WREG32(mmRLC_GPM_SCRATCH_DATA, list_size);
3968
3969 /* starting offsets starts */
3970 WREG32(mmRLC_GPM_SCRATCH_ADDR,
3971 adev->gfx.rlc.starting_offsets_start);
3972 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
3973 WREG32(mmRLC_GPM_SCRATCH_DATA,
3974 indirect_start_offsets[i]);
3975
3976 /* unique indices */
3977 temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
3978 data = mmRLC_SRM_INDEX_CNTL_DATA_0;
3979 for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
3980 if (unique_indices[i] != 0) {
3981 WREG32(temp + i, unique_indices[i] & 0x3FFFF);
3982 WREG32(data + i, unique_indices[i] >> 20);
3983 }
3984 }
3985 kfree(register_list_format);
3986
3987 return 0;
3988}
3989
3990static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
3991{
3992 WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
3993}
3994
3995static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
3996{
3997 uint32_t data;
3998
3999 WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
4000
4001 data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
4002 data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
4003 data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
4004 data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
4005 WREG32(mmRLC_PG_DELAY, data);
4006
4007 WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
4008 WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
4009
4010}
4011
4012static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
4013 bool enable)
4014{
4015 WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
4016}
4017
4018static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
4019 bool enable)
4020{
4021 WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
4022}
4023
4024static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
4025{
4026 WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
4027}
4028
4029static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
4030{
4031 if ((adev->asic_type == CHIP_CARRIZO) ||
4032 (adev->asic_type == CHIP_STONEY)) {
4033 gfx_v8_0_init_csb(adev);
4034 gfx_v8_0_init_save_restore_list(adev);
4035 gfx_v8_0_enable_save_restore_machine(adev);
4036 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
4037 gfx_v8_0_init_power_gating(adev);
4038 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
4039 } else if ((adev->asic_type == CHIP_POLARIS11) ||
4040 (adev->asic_type == CHIP_POLARIS12) ||
4041 (adev->asic_type == CHIP_VEGAM)) {
4042 gfx_v8_0_init_csb(adev);
4043 gfx_v8_0_init_save_restore_list(adev);
4044 gfx_v8_0_enable_save_restore_machine(adev);
4045 gfx_v8_0_init_power_gating(adev);
4046 }
4047
4048}
4049
4050static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
4051{
4052 WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
4053
4054 gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4055 gfx_v8_0_wait_for_rlc_serdes(adev);
4056}
4057
4058static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
4059{
4060 WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4061 udelay(50);
4062
4063 WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
4064 udelay(50);
4065}
4066
4067static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
4068{
4069 WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
4070
4071 /* carrizo do enable cp interrupt after cp inited */
4072 if (!(adev->flags & AMD_IS_APU))
4073 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4074
4075 udelay(50);
4076}
4077
4078static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
4079{
4080 if (amdgpu_sriov_vf(adev)) {
4081 gfx_v8_0_init_csb(adev);
4082 return 0;
4083 }
4084
4085 adev->gfx.rlc.funcs->stop(adev);
4086 adev->gfx.rlc.funcs->reset(adev);
4087 gfx_v8_0_init_pg(adev);
4088 adev->gfx.rlc.funcs->start(adev);
4089
4090 return 0;
4091}
4092
4093static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
4094{
4095 u32 tmp = RREG32(mmCP_ME_CNTL);
4096
4097 if (enable) {
4098 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
4099 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
4100 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
4101 } else {
4102 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
4103 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
4104 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
4105 }
4106 WREG32(mmCP_ME_CNTL, tmp);
4107 udelay(50);
4108}
4109
4110static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
4111{
4112 u32 count = 0;
4113 const struct cs_section_def *sect = NULL;
4114 const struct cs_extent_def *ext = NULL;
4115
4116 /* begin clear state */
4117 count += 2;
4118 /* context control state */
4119 count += 3;
4120
4121 for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4122 for (ext = sect->section; ext->extent != NULL; ++ext) {
4123 if (sect->id == SECT_CONTEXT)
4124 count += 2 + ext->reg_count;
4125 else
4126 return 0;
4127 }
4128 }
4129 /* pa_sc_raster_config/pa_sc_raster_config1 */
4130 count += 4;
4131 /* end clear state */
4132 count += 2;
4133 /* clear state */
4134 count += 2;
4135
4136 return count;
4137}
4138
4139static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
4140{
4141 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
4142 const struct cs_section_def *sect = NULL;
4143 const struct cs_extent_def *ext = NULL;
4144 int r, i;
4145
4146 /* init the CP */
4147 WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
4148 WREG32(mmCP_ENDIAN_SWAP, 0);
4149 WREG32(mmCP_DEVICE_ID, 1);
4150
4151 gfx_v8_0_cp_gfx_enable(adev, true);
4152
4153 r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
4154 if (r) {
4155 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
4156 return r;
4157 }
4158
4159 /* clear state buffer */
4160 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4161 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4162
4163 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4164 amdgpu_ring_write(ring, 0x80000000);
4165 amdgpu_ring_write(ring, 0x80000000);
4166
4167 for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4168 for (ext = sect->section; ext->extent != NULL; ++ext) {
4169 if (sect->id == SECT_CONTEXT) {
4170 amdgpu_ring_write(ring,
4171 PACKET3(PACKET3_SET_CONTEXT_REG,
4172 ext->reg_count));
4173 amdgpu_ring_write(ring,
4174 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4175 for (i = 0; i < ext->reg_count; i++)
4176 amdgpu_ring_write(ring, ext->extent[i]);
4177 }
4178 }
4179 }
4180
4181 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4182 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4183 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
4184 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
4185
4186 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4187 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
4188
4189 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
4190 amdgpu_ring_write(ring, 0);
4191
4192 /* init the CE partitions */
4193 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
4194 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
4195 amdgpu_ring_write(ring, 0x8000);
4196 amdgpu_ring_write(ring, 0x8000);
4197
4198 amdgpu_ring_commit(ring);
4199
4200 return 0;
4201}
4202static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu_ring *ring)
4203{
4204 u32 tmp;
4205 /* no gfx doorbells on iceland */
4206 if (adev->asic_type == CHIP_TOPAZ)
4207 return;
4208
4209 tmp = RREG32(mmCP_RB_DOORBELL_CONTROL);
4210
4211 if (ring->use_doorbell) {
4212 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4213 DOORBELL_OFFSET, ring->doorbell_index);
4214 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4215 DOORBELL_HIT, 0);
4216 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4217 DOORBELL_EN, 1);
4218 } else {
4219 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
4220 }
4221
4222 WREG32(mmCP_RB_DOORBELL_CONTROL, tmp);
4223
4224 if (adev->flags & AMD_IS_APU)
4225 return;
4226
4227 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
4228 DOORBELL_RANGE_LOWER,
4229 adev->doorbell_index.gfx_ring0);
4230 WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
4231
4232 WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
4233 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
4234}
4235
4236static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4237{
4238 struct amdgpu_ring *ring;
4239 u32 tmp;
4240 u32 rb_bufsz;
4241 u64 rb_addr, rptr_addr, wptr_gpu_addr;
4242
4243 /* Set the write pointer delay */
4244 WREG32(mmCP_RB_WPTR_DELAY, 0);
4245
4246 /* set the RB to use vmid 0 */
4247 WREG32(mmCP_RB_VMID, 0);
4248
4249 /* Set ring buffer size */
4250 ring = &adev->gfx.gfx_ring[0];
4251 rb_bufsz = order_base_2(ring->ring_size / 8);
4252 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
4253 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
4254 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3);
4255 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1);
4256#ifdef __BIG_ENDIAN
4257 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
4258#endif
4259 WREG32(mmCP_RB0_CNTL, tmp);
4260
4261 /* Initialize the ring buffer's read and write pointers */
4262 WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
4263 ring->wptr = 0;
4264 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4265
4266 /* set the wb address wether it's enabled or not */
4267 rptr_addr = ring->rptr_gpu_addr;
4268 WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
4269 WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
4270
4271 wptr_gpu_addr = ring->wptr_gpu_addr;
4272 WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
4273 WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
4274 mdelay(1);
4275 WREG32(mmCP_RB0_CNTL, tmp);
4276
4277 rb_addr = ring->gpu_addr >> 8;
4278 WREG32(mmCP_RB0_BASE, rb_addr);
4279 WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
4280
4281 gfx_v8_0_set_cpg_door_bell(adev, ring);
4282 /* start the ring */
4283 amdgpu_ring_clear_ring(ring);
4284 gfx_v8_0_cp_gfx_start(adev);
4285
4286 return 0;
4287}
4288
4289static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
4290{
4291 if (enable) {
4292 WREG32(mmCP_MEC_CNTL, 0);
4293 } else {
4294 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
4295 adev->gfx.kiq[0].ring.sched.ready = false;
4296 }
4297 udelay(50);
4298}
4299
4300/* KIQ functions */
4301static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
4302{
4303 uint32_t tmp;
4304 struct amdgpu_device *adev = ring->adev;
4305
4306 /* tell RLC which is KIQ queue */
4307 tmp = RREG32(mmRLC_CP_SCHEDULERS);
4308 tmp &= 0xffffff00;
4309 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
4310 WREG32(mmRLC_CP_SCHEDULERS, tmp);
4311 tmp |= 0x80;
4312 WREG32(mmRLC_CP_SCHEDULERS, tmp);
4313}
4314
4315static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
4316{
4317 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
4318 uint64_t queue_mask = 0;
4319 int r, i;
4320
4321 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
4322 if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap))
4323 continue;
4324
4325 /* This situation may be hit in the future if a new HW
4326 * generation exposes more than 64 queues. If so, the
4327 * definition of queue_mask needs updating */
4328 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
4329 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
4330 break;
4331 }
4332
4333 queue_mask |= (1ull << i);
4334 }
4335
4336 r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8);
4337 if (r) {
4338 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4339 return r;
4340 }
4341 /* set resources */
4342 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
4343 amdgpu_ring_write(kiq_ring, 0); /* vmid_mask:0 queue_type:0 (KIQ) */
4344 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
4345 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
4346 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
4347 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
4348 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
4349 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
4350 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4351 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4352 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
4353 uint64_t wptr_addr = ring->wptr_gpu_addr;
4354
4355 /* map queues */
4356 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
4357 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
4358 amdgpu_ring_write(kiq_ring,
4359 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
4360 amdgpu_ring_write(kiq_ring,
4361 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index) |
4362 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
4363 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
4364 PACKET3_MAP_QUEUES_ME(ring->me == 1 ? 0 : 1)); /* doorbell */
4365 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
4366 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
4367 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
4368 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
4369 }
4370
4371 amdgpu_ring_commit(kiq_ring);
4372
4373 return 0;
4374}
4375
4376static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
4377{
4378 int i, r = 0;
4379
4380 if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
4381 WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, req);
4382 for (i = 0; i < adev->usec_timeout; i++) {
4383 if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
4384 break;
4385 udelay(1);
4386 }
4387 if (i == adev->usec_timeout)
4388 r = -ETIMEDOUT;
4389 }
4390 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
4391 WREG32(mmCP_HQD_PQ_RPTR, 0);
4392 WREG32(mmCP_HQD_PQ_WPTR, 0);
4393
4394 return r;
4395}
4396
4397static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
4398{
4399 struct amdgpu_device *adev = ring->adev;
4400
4401 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4402 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
4403 mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
4404 mqd->cp_hqd_queue_priority =
4405 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
4406 }
4407 }
4408}
4409
4410static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
4411{
4412 struct amdgpu_device *adev = ring->adev;
4413 struct vi_mqd *mqd = ring->mqd_ptr;
4414 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
4415 uint32_t tmp;
4416
4417 mqd->header = 0xC0310800;
4418 mqd->compute_pipelinestat_enable = 0x00000001;
4419 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4420 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4421 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4422 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4423 mqd->compute_misc_reserved = 0x00000003;
4424 mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
4425 + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4426 mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
4427 + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4428 eop_base_addr = ring->eop_gpu_addr >> 8;
4429 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
4430 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
4431
4432 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4433 tmp = RREG32(mmCP_HQD_EOP_CONTROL);
4434 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4435 (order_base_2(GFX8_MEC_HPD_SIZE / 4) - 1));
4436
4437 mqd->cp_hqd_eop_control = tmp;
4438
4439 /* enable doorbell? */
4440 tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
4441 CP_HQD_PQ_DOORBELL_CONTROL,
4442 DOORBELL_EN,
4443 ring->use_doorbell ? 1 : 0);
4444
4445 mqd->cp_hqd_pq_doorbell_control = tmp;
4446
4447 /* set the pointer to the MQD */
4448 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
4449 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
4450
4451 /* set MQD vmid to 0 */
4452 tmp = RREG32(mmCP_MQD_CONTROL);
4453 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4454 mqd->cp_mqd_control = tmp;
4455
4456 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4457 hqd_gpu_addr = ring->gpu_addr >> 8;
4458 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4459 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4460
4461 /* set up the HQD, this is similar to CP_RB0_CNTL */
4462 tmp = RREG32(mmCP_HQD_PQ_CONTROL);
4463 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4464 (order_base_2(ring->ring_size / 4) - 1));
4465 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4466 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
4467#ifdef __BIG_ENDIAN
4468 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
4469#endif
4470 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
4471 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
4472 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4473 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4474 mqd->cp_hqd_pq_control = tmp;
4475
4476 /* set the wb address whether it's enabled or not */
4477 wb_gpu_addr = ring->rptr_gpu_addr;
4478 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4479 mqd->cp_hqd_pq_rptr_report_addr_hi =
4480 upper_32_bits(wb_gpu_addr) & 0xffff;
4481
4482 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4483 wb_gpu_addr = ring->wptr_gpu_addr;
4484 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4485 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4486
4487 tmp = 0;
4488 /* enable the doorbell if requested */
4489 if (ring->use_doorbell) {
4490 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
4491 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4492 DOORBELL_OFFSET, ring->doorbell_index);
4493
4494 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4495 DOORBELL_EN, 1);
4496 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4497 DOORBELL_SOURCE, 0);
4498 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4499 DOORBELL_HIT, 0);
4500 }
4501
4502 mqd->cp_hqd_pq_doorbell_control = tmp;
4503
4504 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4505 ring->wptr = 0;
4506 mqd->cp_hqd_pq_wptr = ring->wptr;
4507 mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
4508
4509 /* set the vmid for the queue */
4510 mqd->cp_hqd_vmid = 0;
4511
4512 tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
4513 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
4514 mqd->cp_hqd_persistent_state = tmp;
4515
4516 /* set MTYPE */
4517 tmp = RREG32(mmCP_HQD_IB_CONTROL);
4518 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
4519 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MTYPE, 3);
4520 mqd->cp_hqd_ib_control = tmp;
4521
4522 tmp = RREG32(mmCP_HQD_IQ_TIMER);
4523 tmp = REG_SET_FIELD(tmp, CP_HQD_IQ_TIMER, MTYPE, 3);
4524 mqd->cp_hqd_iq_timer = tmp;
4525
4526 tmp = RREG32(mmCP_HQD_CTX_SAVE_CONTROL);
4527 tmp = REG_SET_FIELD(tmp, CP_HQD_CTX_SAVE_CONTROL, MTYPE, 3);
4528 mqd->cp_hqd_ctx_save_control = tmp;
4529
4530 /* defaults */
4531 mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
4532 mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
4533 mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
4534 mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
4535 mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
4536 mqd->cp_hqd_cntl_stack_size = RREG32(mmCP_HQD_CNTL_STACK_SIZE);
4537 mqd->cp_hqd_wg_state_offset = RREG32(mmCP_HQD_WG_STATE_OFFSET);
4538 mqd->cp_hqd_ctx_save_size = RREG32(mmCP_HQD_CTX_SAVE_SIZE);
4539 mqd->cp_hqd_eop_done_events = RREG32(mmCP_HQD_EOP_EVENTS);
4540 mqd->cp_hqd_error = RREG32(mmCP_HQD_ERROR);
4541 mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
4542 mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
4543
4544 /* set static priority for a queue/ring */
4545 gfx_v8_0_mqd_set_priority(ring, mqd);
4546 mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
4547
4548 /* map_queues packet doesn't need activate the queue,
4549 * so only kiq need set this field.
4550 */
4551 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
4552 mqd->cp_hqd_active = 1;
4553
4554 return 0;
4555}
4556
4557static int gfx_v8_0_mqd_commit(struct amdgpu_device *adev,
4558 struct vi_mqd *mqd)
4559{
4560 uint32_t mqd_reg;
4561 uint32_t *mqd_data;
4562
4563 /* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_HQD_ERROR */
4564 mqd_data = &mqd->cp_mqd_base_addr_lo;
4565
4566 /* disable wptr polling */
4567 WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
4568
4569 /* program all HQD registers */
4570 for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_HQD_EOP_CONTROL; mqd_reg++)
4571 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4572
4573 /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
4574 * This is safe since EOP RPTR==WPTR for any inactive HQD
4575 * on ASICs that do not support context-save.
4576 * EOP writes/reads can start anywhere in the ring.
4577 */
4578 if (adev->asic_type != CHIP_TONGA) {
4579 WREG32(mmCP_HQD_EOP_RPTR, mqd->cp_hqd_eop_rptr);
4580 WREG32(mmCP_HQD_EOP_WPTR, mqd->cp_hqd_eop_wptr);
4581 WREG32(mmCP_HQD_EOP_WPTR_MEM, mqd->cp_hqd_eop_wptr_mem);
4582 }
4583
4584 for (mqd_reg = mmCP_HQD_EOP_EVENTS; mqd_reg <= mmCP_HQD_ERROR; mqd_reg++)
4585 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4586
4587 /* activate the HQD */
4588 for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
4589 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4590
4591 return 0;
4592}
4593
4594static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
4595{
4596 struct amdgpu_device *adev = ring->adev;
4597 struct vi_mqd *mqd = ring->mqd_ptr;
4598
4599 gfx_v8_0_kiq_setting(ring);
4600
4601 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4602 /* reset MQD to a clean status */
4603 if (adev->gfx.kiq[0].mqd_backup)
4604 memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct vi_mqd_allocation));
4605
4606 /* reset ring buffer */
4607 ring->wptr = 0;
4608 amdgpu_ring_clear_ring(ring);
4609 mutex_lock(&adev->srbm_mutex);
4610 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4611 gfx_v8_0_mqd_commit(adev, mqd);
4612 vi_srbm_select(adev, 0, 0, 0, 0);
4613 mutex_unlock(&adev->srbm_mutex);
4614 } else {
4615 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4616 ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4617 ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4618 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
4619 amdgpu_ring_clear_ring(ring);
4620 mutex_lock(&adev->srbm_mutex);
4621 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4622 gfx_v8_0_mqd_init(ring);
4623 gfx_v8_0_mqd_commit(adev, mqd);
4624 vi_srbm_select(adev, 0, 0, 0, 0);
4625 mutex_unlock(&adev->srbm_mutex);
4626
4627 if (adev->gfx.kiq[0].mqd_backup)
4628 memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct vi_mqd_allocation));
4629 }
4630
4631 return 0;
4632}
4633
4634static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
4635{
4636 struct amdgpu_device *adev = ring->adev;
4637 struct vi_mqd *mqd = ring->mqd_ptr;
4638 int mqd_idx = ring - &adev->gfx.compute_ring[0];
4639
4640 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4641 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4642 ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4643 ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4644 mutex_lock(&adev->srbm_mutex);
4645 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4646 gfx_v8_0_mqd_init(ring);
4647 vi_srbm_select(adev, 0, 0, 0, 0);
4648 mutex_unlock(&adev->srbm_mutex);
4649
4650 if (adev->gfx.mec.mqd_backup[mqd_idx])
4651 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
4652 } else {
4653 /* restore MQD to a clean status */
4654 if (adev->gfx.mec.mqd_backup[mqd_idx])
4655 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4656 /* reset ring buffer */
4657 ring->wptr = 0;
4658 amdgpu_ring_clear_ring(ring);
4659 }
4660 return 0;
4661}
4662
4663static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
4664{
4665 if (adev->asic_type > CHIP_TONGA) {
4666 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
4667 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
4668 }
4669 /* enable doorbells */
4670 WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4671}
4672
4673static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
4674{
4675 struct amdgpu_ring *ring;
4676 int r;
4677
4678 ring = &adev->gfx.kiq[0].ring;
4679
4680 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4681 if (unlikely(r != 0))
4682 return r;
4683
4684 r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4685 if (unlikely(r != 0)) {
4686 amdgpu_bo_unreserve(ring->mqd_obj);
4687 return r;
4688 }
4689
4690 gfx_v8_0_kiq_init_queue(ring);
4691 amdgpu_bo_kunmap(ring->mqd_obj);
4692 ring->mqd_ptr = NULL;
4693 amdgpu_bo_unreserve(ring->mqd_obj);
4694 return 0;
4695}
4696
4697static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
4698{
4699 struct amdgpu_ring *ring = NULL;
4700 int r = 0, i;
4701
4702 gfx_v8_0_cp_compute_enable(adev, true);
4703
4704 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4705 ring = &adev->gfx.compute_ring[i];
4706
4707 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4708 if (unlikely(r != 0))
4709 goto done;
4710 r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4711 if (!r) {
4712 r = gfx_v8_0_kcq_init_queue(ring);
4713 amdgpu_bo_kunmap(ring->mqd_obj);
4714 ring->mqd_ptr = NULL;
4715 }
4716 amdgpu_bo_unreserve(ring->mqd_obj);
4717 if (r)
4718 goto done;
4719 }
4720
4721 gfx_v8_0_set_mec_doorbell_range(adev);
4722
4723 r = gfx_v8_0_kiq_kcq_enable(adev);
4724 if (r)
4725 goto done;
4726
4727done:
4728 return r;
4729}
4730
4731static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
4732{
4733 int r, i;
4734 struct amdgpu_ring *ring;
4735
4736 /* collect all the ring_tests here, gfx, kiq, compute */
4737 ring = &adev->gfx.gfx_ring[0];
4738 r = amdgpu_ring_test_helper(ring);
4739 if (r)
4740 return r;
4741
4742 ring = &adev->gfx.kiq[0].ring;
4743 r = amdgpu_ring_test_helper(ring);
4744 if (r)
4745 return r;
4746
4747 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4748 ring = &adev->gfx.compute_ring[i];
4749 amdgpu_ring_test_helper(ring);
4750 }
4751
4752 return 0;
4753}
4754
4755static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
4756{
4757 int r;
4758
4759 if (!(adev->flags & AMD_IS_APU))
4760 gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4761
4762 r = gfx_v8_0_kiq_resume(adev);
4763 if (r)
4764 return r;
4765
4766 r = gfx_v8_0_cp_gfx_resume(adev);
4767 if (r)
4768 return r;
4769
4770 r = gfx_v8_0_kcq_resume(adev);
4771 if (r)
4772 return r;
4773
4774 r = gfx_v8_0_cp_test_all_rings(adev);
4775 if (r)
4776 return r;
4777
4778 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4779
4780 return 0;
4781}
4782
4783static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
4784{
4785 gfx_v8_0_cp_gfx_enable(adev, enable);
4786 gfx_v8_0_cp_compute_enable(adev, enable);
4787}
4788
4789static int gfx_v8_0_hw_init(void *handle)
4790{
4791 int r;
4792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4793
4794 gfx_v8_0_init_golden_registers(adev);
4795 gfx_v8_0_constants_init(adev);
4796
4797 r = adev->gfx.rlc.funcs->resume(adev);
4798 if (r)
4799 return r;
4800
4801 r = gfx_v8_0_cp_resume(adev);
4802
4803 return r;
4804}
4805
4806static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
4807{
4808 int r, i;
4809 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
4810
4811 r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
4812 if (r)
4813 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4814
4815 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4816 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4817
4818 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
4819 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
4820 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
4821 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
4822 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
4823 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
4824 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
4825 amdgpu_ring_write(kiq_ring, 0);
4826 amdgpu_ring_write(kiq_ring, 0);
4827 amdgpu_ring_write(kiq_ring, 0);
4828 }
4829 r = amdgpu_ring_test_helper(kiq_ring);
4830 if (r)
4831 DRM_ERROR("KCQ disable failed\n");
4832
4833 return r;
4834}
4835
4836static bool gfx_v8_0_is_idle(void *handle)
4837{
4838 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4839
4840 if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
4841 || RREG32(mmGRBM_STATUS2) != 0x8)
4842 return false;
4843 else
4844 return true;
4845}
4846
4847static bool gfx_v8_0_rlc_is_idle(void *handle)
4848{
4849 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4850
4851 if (RREG32(mmGRBM_STATUS2) != 0x8)
4852 return false;
4853 else
4854 return true;
4855}
4856
4857static int gfx_v8_0_wait_for_rlc_idle(void *handle)
4858{
4859 unsigned int i;
4860 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4861
4862 for (i = 0; i < adev->usec_timeout; i++) {
4863 if (gfx_v8_0_rlc_is_idle(handle))
4864 return 0;
4865
4866 udelay(1);
4867 }
4868 return -ETIMEDOUT;
4869}
4870
4871static int gfx_v8_0_wait_for_idle(void *handle)
4872{
4873 unsigned int i;
4874 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4875
4876 for (i = 0; i < adev->usec_timeout; i++) {
4877 if (gfx_v8_0_is_idle(handle))
4878 return 0;
4879
4880 udelay(1);
4881 }
4882 return -ETIMEDOUT;
4883}
4884
4885static int gfx_v8_0_hw_fini(void *handle)
4886{
4887 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4888
4889 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4890 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4891
4892 amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
4893
4894 amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
4895
4896 /* disable KCQ to avoid CPC touch memory not valid anymore */
4897 gfx_v8_0_kcq_disable(adev);
4898
4899 if (amdgpu_sriov_vf(adev)) {
4900 pr_debug("For SRIOV client, shouldn't do anything.\n");
4901 return 0;
4902 }
4903 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4904 if (!gfx_v8_0_wait_for_idle(adev))
4905 gfx_v8_0_cp_enable(adev, false);
4906 else
4907 pr_err("cp is busy, skip halt cp\n");
4908 if (!gfx_v8_0_wait_for_rlc_idle(adev))
4909 adev->gfx.rlc.funcs->stop(adev);
4910 else
4911 pr_err("rlc is busy, skip halt rlc\n");
4912 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4913
4914 return 0;
4915}
4916
4917static int gfx_v8_0_suspend(void *handle)
4918{
4919 return gfx_v8_0_hw_fini(handle);
4920}
4921
4922static int gfx_v8_0_resume(void *handle)
4923{
4924 return gfx_v8_0_hw_init(handle);
4925}
4926
4927static bool gfx_v8_0_check_soft_reset(void *handle)
4928{
4929 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4930 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4931 u32 tmp;
4932
4933 /* GRBM_STATUS */
4934 tmp = RREG32(mmGRBM_STATUS);
4935 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4936 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4937 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4938 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4939 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4940 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
4941 GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4942 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4943 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4944 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4945 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4946 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4947 SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
4948 }
4949
4950 /* GRBM_STATUS2 */
4951 tmp = RREG32(mmGRBM_STATUS2);
4952 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4953 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4954 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4955
4956 if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
4957 REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
4958 REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
4959 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4960 SOFT_RESET_CPF, 1);
4961 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4962 SOFT_RESET_CPC, 1);
4963 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4964 SOFT_RESET_CPG, 1);
4965 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
4966 SOFT_RESET_GRBM, 1);
4967 }
4968
4969 /* SRBM_STATUS */
4970 tmp = RREG32(mmSRBM_STATUS);
4971 if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
4972 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4973 SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
4974 if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
4975 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4976 SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
4977
4978 if (grbm_soft_reset || srbm_soft_reset) {
4979 adev->gfx.grbm_soft_reset = grbm_soft_reset;
4980 adev->gfx.srbm_soft_reset = srbm_soft_reset;
4981 return true;
4982 } else {
4983 adev->gfx.grbm_soft_reset = 0;
4984 adev->gfx.srbm_soft_reset = 0;
4985 return false;
4986 }
4987}
4988
4989static int gfx_v8_0_pre_soft_reset(void *handle)
4990{
4991 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4992 u32 grbm_soft_reset = 0;
4993
4994 if ((!adev->gfx.grbm_soft_reset) &&
4995 (!adev->gfx.srbm_soft_reset))
4996 return 0;
4997
4998 grbm_soft_reset = adev->gfx.grbm_soft_reset;
4999
5000 /* stop the rlc */
5001 adev->gfx.rlc.funcs->stop(adev);
5002
5003 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5004 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5005 /* Disable GFX parsing/prefetching */
5006 gfx_v8_0_cp_gfx_enable(adev, false);
5007
5008 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5009 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5010 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5011 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5012 int i;
5013
5014 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5015 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5016
5017 mutex_lock(&adev->srbm_mutex);
5018 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5019 gfx_v8_0_deactivate_hqd(adev, 2);
5020 vi_srbm_select(adev, 0, 0, 0, 0);
5021 mutex_unlock(&adev->srbm_mutex);
5022 }
5023 /* Disable MEC parsing/prefetching */
5024 gfx_v8_0_cp_compute_enable(adev, false);
5025 }
5026
5027 return 0;
5028}
5029
5030static int gfx_v8_0_soft_reset(void *handle)
5031{
5032 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5033 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5034 u32 tmp;
5035
5036 if ((!adev->gfx.grbm_soft_reset) &&
5037 (!adev->gfx.srbm_soft_reset))
5038 return 0;
5039
5040 grbm_soft_reset = adev->gfx.grbm_soft_reset;
5041 srbm_soft_reset = adev->gfx.srbm_soft_reset;
5042
5043 if (grbm_soft_reset || srbm_soft_reset) {
5044 tmp = RREG32(mmGMCON_DEBUG);
5045 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
5046 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
5047 WREG32(mmGMCON_DEBUG, tmp);
5048 udelay(50);
5049 }
5050
5051 if (grbm_soft_reset) {
5052 tmp = RREG32(mmGRBM_SOFT_RESET);
5053 tmp |= grbm_soft_reset;
5054 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
5055 WREG32(mmGRBM_SOFT_RESET, tmp);
5056 tmp = RREG32(mmGRBM_SOFT_RESET);
5057
5058 udelay(50);
5059
5060 tmp &= ~grbm_soft_reset;
5061 WREG32(mmGRBM_SOFT_RESET, tmp);
5062 tmp = RREG32(mmGRBM_SOFT_RESET);
5063 }
5064
5065 if (srbm_soft_reset) {
5066 tmp = RREG32(mmSRBM_SOFT_RESET);
5067 tmp |= srbm_soft_reset;
5068 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
5069 WREG32(mmSRBM_SOFT_RESET, tmp);
5070 tmp = RREG32(mmSRBM_SOFT_RESET);
5071
5072 udelay(50);
5073
5074 tmp &= ~srbm_soft_reset;
5075 WREG32(mmSRBM_SOFT_RESET, tmp);
5076 tmp = RREG32(mmSRBM_SOFT_RESET);
5077 }
5078
5079 if (grbm_soft_reset || srbm_soft_reset) {
5080 tmp = RREG32(mmGMCON_DEBUG);
5081 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
5082 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
5083 WREG32(mmGMCON_DEBUG, tmp);
5084 }
5085
5086 /* Wait a little for things to settle down */
5087 udelay(50);
5088
5089 return 0;
5090}
5091
5092static int gfx_v8_0_post_soft_reset(void *handle)
5093{
5094 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5095 u32 grbm_soft_reset = 0;
5096
5097 if ((!adev->gfx.grbm_soft_reset) &&
5098 (!adev->gfx.srbm_soft_reset))
5099 return 0;
5100
5101 grbm_soft_reset = adev->gfx.grbm_soft_reset;
5102
5103 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5104 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5105 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5106 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5107 int i;
5108
5109 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5110 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5111
5112 mutex_lock(&adev->srbm_mutex);
5113 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5114 gfx_v8_0_deactivate_hqd(adev, 2);
5115 vi_srbm_select(adev, 0, 0, 0, 0);
5116 mutex_unlock(&adev->srbm_mutex);
5117 }
5118 gfx_v8_0_kiq_resume(adev);
5119 gfx_v8_0_kcq_resume(adev);
5120 }
5121
5122 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5123 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5124 gfx_v8_0_cp_gfx_resume(adev);
5125
5126 gfx_v8_0_cp_test_all_rings(adev);
5127
5128 adev->gfx.rlc.funcs->start(adev);
5129
5130 return 0;
5131}
5132
5133/**
5134 * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
5135 *
5136 * @adev: amdgpu_device pointer
5137 *
5138 * Fetches a GPU clock counter snapshot.
5139 * Returns the 64 bit clock counter snapshot.
5140 */
5141static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
5142{
5143 uint64_t clock;
5144
5145 mutex_lock(&adev->gfx.gpu_clock_mutex);
5146 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
5147 clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
5148 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
5149 mutex_unlock(&adev->gfx.gpu_clock_mutex);
5150 return clock;
5151}
5152
5153static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
5154 uint32_t vmid,
5155 uint32_t gds_base, uint32_t gds_size,
5156 uint32_t gws_base, uint32_t gws_size,
5157 uint32_t oa_base, uint32_t oa_size)
5158{
5159 /* GDS Base */
5160 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5161 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5162 WRITE_DATA_DST_SEL(0)));
5163 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
5164 amdgpu_ring_write(ring, 0);
5165 amdgpu_ring_write(ring, gds_base);
5166
5167 /* GDS Size */
5168 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5169 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5170 WRITE_DATA_DST_SEL(0)));
5171 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
5172 amdgpu_ring_write(ring, 0);
5173 amdgpu_ring_write(ring, gds_size);
5174
5175 /* GWS */
5176 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5177 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5178 WRITE_DATA_DST_SEL(0)));
5179 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
5180 amdgpu_ring_write(ring, 0);
5181 amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
5182
5183 /* OA */
5184 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5185 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5186 WRITE_DATA_DST_SEL(0)));
5187 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
5188 amdgpu_ring_write(ring, 0);
5189 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
5190}
5191
5192static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
5193{
5194 WREG32(mmSQ_IND_INDEX,
5195 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5196 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5197 (address << SQ_IND_INDEX__INDEX__SHIFT) |
5198 (SQ_IND_INDEX__FORCE_READ_MASK));
5199 return RREG32(mmSQ_IND_DATA);
5200}
5201
5202static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
5203 uint32_t wave, uint32_t thread,
5204 uint32_t regno, uint32_t num, uint32_t *out)
5205{
5206 WREG32(mmSQ_IND_INDEX,
5207 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5208 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5209 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
5210 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
5211 (SQ_IND_INDEX__FORCE_READ_MASK) |
5212 (SQ_IND_INDEX__AUTO_INCR_MASK));
5213 while (num--)
5214 *(out++) = RREG32(mmSQ_IND_DATA);
5215}
5216
5217static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
5218{
5219 /* type 0 wave data */
5220 dst[(*no_fields)++] = 0;
5221 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
5222 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
5223 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
5224 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
5225 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
5226 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
5227 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
5228 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
5229 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
5230 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
5231 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
5232 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
5233 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
5234 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
5235 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
5236 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
5237 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
5238 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
5239 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
5240}
5241
5242static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
5243 uint32_t wave, uint32_t start,
5244 uint32_t size, uint32_t *dst)
5245{
5246 wave_read_regs(
5247 adev, simd, wave, 0,
5248 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
5249}
5250
5251
5252static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
5253 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
5254 .select_se_sh = &gfx_v8_0_select_se_sh,
5255 .read_wave_data = &gfx_v8_0_read_wave_data,
5256 .read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
5257 .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
5258};
5259
5260static int gfx_v8_0_early_init(void *handle)
5261{
5262 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5263
5264 adev->gfx.xcc_mask = 1;
5265 adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
5266 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
5267 AMDGPU_MAX_COMPUTE_RINGS);
5268 adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
5269 gfx_v8_0_set_ring_funcs(adev);
5270 gfx_v8_0_set_irq_funcs(adev);
5271 gfx_v8_0_set_gds_init(adev);
5272 gfx_v8_0_set_rlc_funcs(adev);
5273
5274 return 0;
5275}
5276
5277static int gfx_v8_0_late_init(void *handle)
5278{
5279 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5280 int r;
5281
5282 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
5283 if (r)
5284 return r;
5285
5286 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
5287 if (r)
5288 return r;
5289
5290 /* requires IBs so do in late init after IB pool is initialized */
5291 r = gfx_v8_0_do_edc_gpr_workarounds(adev);
5292 if (r)
5293 return r;
5294
5295 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
5296 if (r) {
5297 DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
5298 return r;
5299 }
5300
5301 r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
5302 if (r) {
5303 DRM_ERROR(
5304 "amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
5305 r);
5306 return r;
5307 }
5308
5309 return 0;
5310}
5311
5312static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
5313 bool enable)
5314{
5315 if ((adev->asic_type == CHIP_POLARIS11) ||
5316 (adev->asic_type == CHIP_POLARIS12) ||
5317 (adev->asic_type == CHIP_VEGAM))
5318 /* Send msg to SMU via Powerplay */
5319 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
5320
5321 WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
5322}
5323
5324static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
5325 bool enable)
5326{
5327 WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
5328}
5329
5330static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
5331 bool enable)
5332{
5333 WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
5334}
5335
5336static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
5337 bool enable)
5338{
5339 WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
5340}
5341
5342static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
5343 bool enable)
5344{
5345 WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
5346
5347 /* Read any GFX register to wake up GFX. */
5348 if (!enable)
5349 RREG32(mmDB_RENDER_CONTROL);
5350}
5351
5352static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
5353 bool enable)
5354{
5355 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
5356 cz_enable_gfx_cg_power_gating(adev, true);
5357 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
5358 cz_enable_gfx_pipeline_power_gating(adev, true);
5359 } else {
5360 cz_enable_gfx_cg_power_gating(adev, false);
5361 cz_enable_gfx_pipeline_power_gating(adev, false);
5362 }
5363}
5364
5365static int gfx_v8_0_set_powergating_state(void *handle,
5366 enum amd_powergating_state state)
5367{
5368 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5369 bool enable = (state == AMD_PG_STATE_GATE);
5370
5371 if (amdgpu_sriov_vf(adev))
5372 return 0;
5373
5374 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5375 AMD_PG_SUPPORT_RLC_SMU_HS |
5376 AMD_PG_SUPPORT_CP |
5377 AMD_PG_SUPPORT_GFX_DMG))
5378 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5379 switch (adev->asic_type) {
5380 case CHIP_CARRIZO:
5381 case CHIP_STONEY:
5382
5383 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5384 cz_enable_sck_slow_down_on_power_up(adev, true);
5385 cz_enable_sck_slow_down_on_power_down(adev, true);
5386 } else {
5387 cz_enable_sck_slow_down_on_power_up(adev, false);
5388 cz_enable_sck_slow_down_on_power_down(adev, false);
5389 }
5390 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5391 cz_enable_cp_power_gating(adev, true);
5392 else
5393 cz_enable_cp_power_gating(adev, false);
5394
5395 cz_update_gfx_cg_power_gating(adev, enable);
5396
5397 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5398 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5399 else
5400 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5401
5402 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5403 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5404 else
5405 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5406 break;
5407 case CHIP_POLARIS11:
5408 case CHIP_POLARIS12:
5409 case CHIP_VEGAM:
5410 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5411 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5412 else
5413 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5414
5415 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5416 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5417 else
5418 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5419
5420 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
5421 polaris11_enable_gfx_quick_mg_power_gating(adev, true);
5422 else
5423 polaris11_enable_gfx_quick_mg_power_gating(adev, false);
5424 break;
5425 default:
5426 break;
5427 }
5428 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5429 AMD_PG_SUPPORT_RLC_SMU_HS |
5430 AMD_PG_SUPPORT_CP |
5431 AMD_PG_SUPPORT_GFX_DMG))
5432 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5433 return 0;
5434}
5435
5436static void gfx_v8_0_get_clockgating_state(void *handle, u64 *flags)
5437{
5438 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5439 int data;
5440
5441 if (amdgpu_sriov_vf(adev))
5442 *flags = 0;
5443
5444 /* AMD_CG_SUPPORT_GFX_MGCG */
5445 data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5446 if (!(data & RLC_CGTT_MGCG_OVERRIDE__CPF_MASK))
5447 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5448
5449 /* AMD_CG_SUPPORT_GFX_CGLG */
5450 data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5451 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5452 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5453
5454 /* AMD_CG_SUPPORT_GFX_CGLS */
5455 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5456 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5457
5458 /* AMD_CG_SUPPORT_GFX_CGTS */
5459 data = RREG32(mmCGTS_SM_CTRL_REG);
5460 if (!(data & CGTS_SM_CTRL_REG__OVERRIDE_MASK))
5461 *flags |= AMD_CG_SUPPORT_GFX_CGTS;
5462
5463 /* AMD_CG_SUPPORT_GFX_CGTS_LS */
5464 if (!(data & CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK))
5465 *flags |= AMD_CG_SUPPORT_GFX_CGTS_LS;
5466
5467 /* AMD_CG_SUPPORT_GFX_RLC_LS */
5468 data = RREG32(mmRLC_MEM_SLP_CNTL);
5469 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5470 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5471
5472 /* AMD_CG_SUPPORT_GFX_CP_LS */
5473 data = RREG32(mmCP_MEM_SLP_CNTL);
5474 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5475 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5476}
5477
5478static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
5479 uint32_t reg_addr, uint32_t cmd)
5480{
5481 uint32_t data;
5482
5483 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
5484
5485 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5486 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5487
5488 data = RREG32(mmRLC_SERDES_WR_CTRL);
5489 if (adev->asic_type == CHIP_STONEY)
5490 data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5491 RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5492 RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5493 RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5494 RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5495 RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5496 RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5497 RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5498 RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5499 else
5500 data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5501 RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5502 RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5503 RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5504 RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5505 RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5506 RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5507 RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5508 RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
5509 RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
5510 RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5511 data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
5512 (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
5513 (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
5514 (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
5515
5516 WREG32(mmRLC_SERDES_WR_CTRL, data);
5517}
5518
5519#define MSG_ENTER_RLC_SAFE_MODE 1
5520#define MSG_EXIT_RLC_SAFE_MODE 0
5521#define RLC_GPR_REG2__REQ_MASK 0x00000001
5522#define RLC_GPR_REG2__REQ__SHIFT 0
5523#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
5524#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
5525
5526static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
5527{
5528 uint32_t rlc_setting;
5529
5530 rlc_setting = RREG32(mmRLC_CNTL);
5531 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
5532 return false;
5533
5534 return true;
5535}
5536
5537static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
5538{
5539 uint32_t data;
5540 unsigned i;
5541 data = RREG32(mmRLC_CNTL);
5542 data |= RLC_SAFE_MODE__CMD_MASK;
5543 data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5544 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
5545 WREG32(mmRLC_SAFE_MODE, data);
5546
5547 /* wait for RLC_SAFE_MODE */
5548 for (i = 0; i < adev->usec_timeout; i++) {
5549 if ((RREG32(mmRLC_GPM_STAT) &
5550 (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5551 RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
5552 (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5553 RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
5554 break;
5555 udelay(1);
5556 }
5557 for (i = 0; i < adev->usec_timeout; i++) {
5558 if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5559 break;
5560 udelay(1);
5561 }
5562}
5563
5564static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
5565{
5566 uint32_t data;
5567 unsigned i;
5568
5569 data = RREG32(mmRLC_CNTL);
5570 data |= RLC_SAFE_MODE__CMD_MASK;
5571 data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5572 WREG32(mmRLC_SAFE_MODE, data);
5573
5574 for (i = 0; i < adev->usec_timeout; i++) {
5575 if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5576 break;
5577 udelay(1);
5578 }
5579}
5580
5581static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
5582{
5583 u32 data;
5584
5585 amdgpu_gfx_off_ctrl(adev, false);
5586
5587 if (amdgpu_sriov_is_pp_one_vf(adev))
5588 data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
5589 else
5590 data = RREG32(mmRLC_SPM_VMID);
5591
5592 data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
5593 data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
5594
5595 if (amdgpu_sriov_is_pp_one_vf(adev))
5596 WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
5597 else
5598 WREG32(mmRLC_SPM_VMID, data);
5599
5600 amdgpu_gfx_off_ctrl(adev, true);
5601}
5602
5603static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
5604 .is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
5605 .set_safe_mode = gfx_v8_0_set_safe_mode,
5606 .unset_safe_mode = gfx_v8_0_unset_safe_mode,
5607 .init = gfx_v8_0_rlc_init,
5608 .get_csb_size = gfx_v8_0_get_csb_size,
5609 .get_csb_buffer = gfx_v8_0_get_csb_buffer,
5610 .get_cp_table_num = gfx_v8_0_cp_jump_table_num,
5611 .resume = gfx_v8_0_rlc_resume,
5612 .stop = gfx_v8_0_rlc_stop,
5613 .reset = gfx_v8_0_rlc_reset,
5614 .start = gfx_v8_0_rlc_start,
5615 .update_spm_vmid = gfx_v8_0_update_spm_vmid
5616};
5617
5618static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
5619 bool enable)
5620{
5621 uint32_t temp, data;
5622
5623 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5624
5625 /* It is disabled by HW by default */
5626 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
5627 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5628 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
5629 /* 1 - RLC memory Light sleep */
5630 WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
5631
5632 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
5633 WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
5634 }
5635
5636 /* 3 - RLC_CGTT_MGCG_OVERRIDE */
5637 temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5638 if (adev->flags & AMD_IS_APU)
5639 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5640 RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5641 RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK);
5642 else
5643 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5644 RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5645 RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5646 RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5647
5648 if (temp != data)
5649 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5650
5651 /* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5652 gfx_v8_0_wait_for_rlc_serdes(adev);
5653
5654 /* 5 - clear mgcg override */
5655 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5656
5657 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
5658 /* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
5659 temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5660 data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
5661 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
5662 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
5663 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
5664 if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
5665 (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
5666 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
5667 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
5668 data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
5669 if (temp != data)
5670 WREG32(mmCGTS_SM_CTRL_REG, data);
5671 }
5672 udelay(50);
5673
5674 /* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5675 gfx_v8_0_wait_for_rlc_serdes(adev);
5676 } else {
5677 /* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */
5678 temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5679 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5680 RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5681 RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5682 RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5683 if (temp != data)
5684 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5685
5686 /* 2 - disable MGLS in RLC */
5687 data = RREG32(mmRLC_MEM_SLP_CNTL);
5688 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
5689 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
5690 WREG32(mmRLC_MEM_SLP_CNTL, data);
5691 }
5692
5693 /* 3 - disable MGLS in CP */
5694 data = RREG32(mmCP_MEM_SLP_CNTL);
5695 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
5696 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
5697 WREG32(mmCP_MEM_SLP_CNTL, data);
5698 }
5699
5700 /* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */
5701 temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5702 data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK |
5703 CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK);
5704 if (temp != data)
5705 WREG32(mmCGTS_SM_CTRL_REG, data);
5706
5707 /* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5708 gfx_v8_0_wait_for_rlc_serdes(adev);
5709
5710 /* 6 - set mgcg override */
5711 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5712
5713 udelay(50);
5714
5715 /* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5716 gfx_v8_0_wait_for_rlc_serdes(adev);
5717 }
5718
5719 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5720}
5721
5722static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5723 bool enable)
5724{
5725 uint32_t temp, temp1, data, data1;
5726
5727 temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5728
5729 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5730
5731 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5732 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5733 data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
5734 if (temp1 != data1)
5735 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5736
5737 /* : wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5738 gfx_v8_0_wait_for_rlc_serdes(adev);
5739
5740 /* 2 - clear cgcg override */
5741 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5742
5743 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5744 gfx_v8_0_wait_for_rlc_serdes(adev);
5745
5746 /* 3 - write cmd to set CGLS */
5747 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
5748
5749 /* 4 - enable cgcg */
5750 data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5751
5752 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5753 /* enable cgls*/
5754 data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5755
5756 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5757 data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
5758
5759 if (temp1 != data1)
5760 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5761 } else {
5762 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5763 }
5764
5765 if (temp != data)
5766 WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5767
5768 /* 5 enable cntx_empty_int_enable/cntx_busy_int_enable/
5769 * Cmp_busy/GFX_Idle interrupts
5770 */
5771 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5772 } else {
5773 /* disable cntx_empty_int_enable & GFX Idle interrupt */
5774 gfx_v8_0_enable_gui_idle_interrupt(adev, false);
5775
5776 /* TEST CGCG */
5777 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5778 data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK |
5779 RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK);
5780 if (temp1 != data1)
5781 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5782
5783 /* read gfx register to wake up cgcg */
5784 RREG32(mmCB_CGTT_SCLK_CTRL);
5785 RREG32(mmCB_CGTT_SCLK_CTRL);
5786 RREG32(mmCB_CGTT_SCLK_CTRL);
5787 RREG32(mmCB_CGTT_SCLK_CTRL);
5788
5789 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5790 gfx_v8_0_wait_for_rlc_serdes(adev);
5791
5792 /* write cmd to Set CGCG Override */
5793 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5794
5795 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5796 gfx_v8_0_wait_for_rlc_serdes(adev);
5797
5798 /* write cmd to Clear CGLS */
5799 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
5800
5801 /* disable cgcg, cgls should be disabled too. */
5802 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
5803 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5804 if (temp != data)
5805 WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5806 /* enable interrupts again for PG */
5807 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5808 }
5809
5810 gfx_v8_0_wait_for_rlc_serdes(adev);
5811
5812 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5813}
5814static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5815 bool enable)
5816{
5817 if (enable) {
5818 /* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
5819 * === MGCG + MGLS + TS(CG/LS) ===
5820 */
5821 gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5822 gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5823 } else {
5824 /* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
5825 * === CGCG + CGLS ===
5826 */
5827 gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5828 gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5829 }
5830 return 0;
5831}
5832
5833static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
5834 enum amd_clockgating_state state)
5835{
5836 uint32_t msg_id, pp_state = 0;
5837 uint32_t pp_support_state = 0;
5838
5839 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5840 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5841 pp_support_state = PP_STATE_SUPPORT_LS;
5842 pp_state = PP_STATE_LS;
5843 }
5844 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5845 pp_support_state |= PP_STATE_SUPPORT_CG;
5846 pp_state |= PP_STATE_CG;
5847 }
5848 if (state == AMD_CG_STATE_UNGATE)
5849 pp_state = 0;
5850
5851 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5852 PP_BLOCK_GFX_CG,
5853 pp_support_state,
5854 pp_state);
5855 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5856 }
5857
5858 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5859 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5860 pp_support_state = PP_STATE_SUPPORT_LS;
5861 pp_state = PP_STATE_LS;
5862 }
5863
5864 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5865 pp_support_state |= PP_STATE_SUPPORT_CG;
5866 pp_state |= PP_STATE_CG;
5867 }
5868
5869 if (state == AMD_CG_STATE_UNGATE)
5870 pp_state = 0;
5871
5872 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5873 PP_BLOCK_GFX_MG,
5874 pp_support_state,
5875 pp_state);
5876 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5877 }
5878
5879 return 0;
5880}
5881
5882static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
5883 enum amd_clockgating_state state)
5884{
5885
5886 uint32_t msg_id, pp_state = 0;
5887 uint32_t pp_support_state = 0;
5888
5889 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5890 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5891 pp_support_state = PP_STATE_SUPPORT_LS;
5892 pp_state = PP_STATE_LS;
5893 }
5894 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5895 pp_support_state |= PP_STATE_SUPPORT_CG;
5896 pp_state |= PP_STATE_CG;
5897 }
5898 if (state == AMD_CG_STATE_UNGATE)
5899 pp_state = 0;
5900
5901 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5902 PP_BLOCK_GFX_CG,
5903 pp_support_state,
5904 pp_state);
5905 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5906 }
5907
5908 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
5909 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
5910 pp_support_state = PP_STATE_SUPPORT_LS;
5911 pp_state = PP_STATE_LS;
5912 }
5913 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
5914 pp_support_state |= PP_STATE_SUPPORT_CG;
5915 pp_state |= PP_STATE_CG;
5916 }
5917 if (state == AMD_CG_STATE_UNGATE)
5918 pp_state = 0;
5919
5920 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5921 PP_BLOCK_GFX_3D,
5922 pp_support_state,
5923 pp_state);
5924 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5925 }
5926
5927 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5928 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5929 pp_support_state = PP_STATE_SUPPORT_LS;
5930 pp_state = PP_STATE_LS;
5931 }
5932
5933 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5934 pp_support_state |= PP_STATE_SUPPORT_CG;
5935 pp_state |= PP_STATE_CG;
5936 }
5937
5938 if (state == AMD_CG_STATE_UNGATE)
5939 pp_state = 0;
5940
5941 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5942 PP_BLOCK_GFX_MG,
5943 pp_support_state,
5944 pp_state);
5945 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5946 }
5947
5948 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
5949 pp_support_state = PP_STATE_SUPPORT_LS;
5950
5951 if (state == AMD_CG_STATE_UNGATE)
5952 pp_state = 0;
5953 else
5954 pp_state = PP_STATE_LS;
5955
5956 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5957 PP_BLOCK_GFX_RLC,
5958 pp_support_state,
5959 pp_state);
5960 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5961 }
5962
5963 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
5964 pp_support_state = PP_STATE_SUPPORT_LS;
5965
5966 if (state == AMD_CG_STATE_UNGATE)
5967 pp_state = 0;
5968 else
5969 pp_state = PP_STATE_LS;
5970 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5971 PP_BLOCK_GFX_CP,
5972 pp_support_state,
5973 pp_state);
5974 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5975 }
5976
5977 return 0;
5978}
5979
5980static int gfx_v8_0_set_clockgating_state(void *handle,
5981 enum amd_clockgating_state state)
5982{
5983 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5984
5985 if (amdgpu_sriov_vf(adev))
5986 return 0;
5987
5988 switch (adev->asic_type) {
5989 case CHIP_FIJI:
5990 case CHIP_CARRIZO:
5991 case CHIP_STONEY:
5992 gfx_v8_0_update_gfx_clock_gating(adev,
5993 state == AMD_CG_STATE_GATE);
5994 break;
5995 case CHIP_TONGA:
5996 gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
5997 break;
5998 case CHIP_POLARIS10:
5999 case CHIP_POLARIS11:
6000 case CHIP_POLARIS12:
6001 case CHIP_VEGAM:
6002 gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
6003 break;
6004 default:
6005 break;
6006 }
6007 return 0;
6008}
6009
6010static u64 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
6011{
6012 return *ring->rptr_cpu_addr;
6013}
6014
6015static u64 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
6016{
6017 struct amdgpu_device *adev = ring->adev;
6018
6019 if (ring->use_doorbell)
6020 /* XXX check if swapping is necessary on BE */
6021 return *ring->wptr_cpu_addr;
6022 else
6023 return RREG32(mmCP_RB0_WPTR);
6024}
6025
6026static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
6027{
6028 struct amdgpu_device *adev = ring->adev;
6029
6030 if (ring->use_doorbell) {
6031 /* XXX check if swapping is necessary on BE */
6032 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
6033 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6034 } else {
6035 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
6036 (void)RREG32(mmCP_RB0_WPTR);
6037 }
6038}
6039
6040static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
6041{
6042 u32 ref_and_mask, reg_mem_engine;
6043
6044 if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
6045 (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
6046 switch (ring->me) {
6047 case 1:
6048 ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
6049 break;
6050 case 2:
6051 ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
6052 break;
6053 default:
6054 return;
6055 }
6056 reg_mem_engine = 0;
6057 } else {
6058 ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
6059 reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
6060 }
6061
6062 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6063 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
6064 WAIT_REG_MEM_FUNCTION(3) | /* == */
6065 reg_mem_engine));
6066 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
6067 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
6068 amdgpu_ring_write(ring, ref_and_mask);
6069 amdgpu_ring_write(ring, ref_and_mask);
6070 amdgpu_ring_write(ring, 0x20); /* poll interval */
6071}
6072
6073static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
6074{
6075 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6076 amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
6077 EVENT_INDEX(4));
6078
6079 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6080 amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
6081 EVENT_INDEX(0));
6082}
6083
6084static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
6085 struct amdgpu_job *job,
6086 struct amdgpu_ib *ib,
6087 uint32_t flags)
6088{
6089 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6090 u32 header, control = 0;
6091
6092 if (ib->flags & AMDGPU_IB_FLAG_CE)
6093 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
6094 else
6095 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
6096
6097 control |= ib->length_dw | (vmid << 24);
6098
6099 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
6100 control |= INDIRECT_BUFFER_PRE_ENB(1);
6101
6102 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
6103 gfx_v8_0_ring_emit_de_meta(ring);
6104 }
6105
6106 amdgpu_ring_write(ring, header);
6107 amdgpu_ring_write(ring,
6108#ifdef __BIG_ENDIAN
6109 (2 << 0) |
6110#endif
6111 (ib->gpu_addr & 0xFFFFFFFC));
6112 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6113 amdgpu_ring_write(ring, control);
6114}
6115
6116static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
6117 struct amdgpu_job *job,
6118 struct amdgpu_ib *ib,
6119 uint32_t flags)
6120{
6121 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6122 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
6123
6124 /* Currently, there is a high possibility to get wave ID mismatch
6125 * between ME and GDS, leading to a hw deadlock, because ME generates
6126 * different wave IDs than the GDS expects. This situation happens
6127 * randomly when at least 5 compute pipes use GDS ordered append.
6128 * The wave IDs generated by ME are also wrong after suspend/resume.
6129 * Those are probably bugs somewhere else in the kernel driver.
6130 *
6131 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
6132 * GDS to 0 for this ring (me/pipe).
6133 */
6134 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
6135 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
6136 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
6137 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
6138 }
6139
6140 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
6141 amdgpu_ring_write(ring,
6142#ifdef __BIG_ENDIAN
6143 (2 << 0) |
6144#endif
6145 (ib->gpu_addr & 0xFFFFFFFC));
6146 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6147 amdgpu_ring_write(ring, control);
6148}
6149
6150static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
6151 u64 seq, unsigned flags)
6152{
6153 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6154 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6155
6156 /* Workaround for cache flush problems. First send a dummy EOP
6157 * event down the pipe with seq one below.
6158 */
6159 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6160 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6161 EOP_TC_ACTION_EN |
6162 EOP_TC_WB_ACTION_EN |
6163 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6164 EVENT_INDEX(5)));
6165 amdgpu_ring_write(ring, addr & 0xfffffffc);
6166 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6167 DATA_SEL(1) | INT_SEL(0));
6168 amdgpu_ring_write(ring, lower_32_bits(seq - 1));
6169 amdgpu_ring_write(ring, upper_32_bits(seq - 1));
6170
6171 /* Then send the real EOP event down the pipe:
6172 * EVENT_WRITE_EOP - flush caches, send int */
6173 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6174 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6175 EOP_TC_ACTION_EN |
6176 EOP_TC_WB_ACTION_EN |
6177 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6178 EVENT_INDEX(5)));
6179 amdgpu_ring_write(ring, addr & 0xfffffffc);
6180 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6181 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6182 amdgpu_ring_write(ring, lower_32_bits(seq));
6183 amdgpu_ring_write(ring, upper_32_bits(seq));
6184
6185}
6186
6187static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
6188{
6189 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6190 uint32_t seq = ring->fence_drv.sync_seq;
6191 uint64_t addr = ring->fence_drv.gpu_addr;
6192
6193 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6194 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
6195 WAIT_REG_MEM_FUNCTION(3) | /* equal */
6196 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
6197 amdgpu_ring_write(ring, addr & 0xfffffffc);
6198 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
6199 amdgpu_ring_write(ring, seq);
6200 amdgpu_ring_write(ring, 0xffffffff);
6201 amdgpu_ring_write(ring, 4); /* poll interval */
6202}
6203
6204static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
6205 unsigned vmid, uint64_t pd_addr)
6206{
6207 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6208
6209 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
6210
6211 /* wait for the invalidate to complete */
6212 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6213 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
6214 WAIT_REG_MEM_FUNCTION(0) | /* always */
6215 WAIT_REG_MEM_ENGINE(0))); /* me */
6216 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
6217 amdgpu_ring_write(ring, 0);
6218 amdgpu_ring_write(ring, 0); /* ref */
6219 amdgpu_ring_write(ring, 0); /* mask */
6220 amdgpu_ring_write(ring, 0x20); /* poll interval */
6221
6222 /* compute doesn't have PFP */
6223 if (usepfp) {
6224 /* sync PFP to ME, otherwise we might get invalid PFP reads */
6225 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
6226 amdgpu_ring_write(ring, 0x0);
6227 }
6228}
6229
6230static u64 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
6231{
6232 return *ring->wptr_cpu_addr;
6233}
6234
6235static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
6236{
6237 struct amdgpu_device *adev = ring->adev;
6238
6239 /* XXX check if swapping is necessary on BE */
6240 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
6241 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6242}
6243
6244static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
6245 u64 addr, u64 seq,
6246 unsigned flags)
6247{
6248 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6249 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6250
6251 /* RELEASE_MEM - flush caches, send int */
6252 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
6253 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6254 EOP_TC_ACTION_EN |
6255 EOP_TC_WB_ACTION_EN |
6256 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6257 EVENT_INDEX(5)));
6258 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6259 amdgpu_ring_write(ring, addr & 0xfffffffc);
6260 amdgpu_ring_write(ring, upper_32_bits(addr));
6261 amdgpu_ring_write(ring, lower_32_bits(seq));
6262 amdgpu_ring_write(ring, upper_32_bits(seq));
6263}
6264
6265static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
6266 u64 seq, unsigned int flags)
6267{
6268 /* we only allocate 32bit for each seq wb address */
6269 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
6270
6271 /* write fence seq to the "addr" */
6272 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6273 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6274 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
6275 amdgpu_ring_write(ring, lower_32_bits(addr));
6276 amdgpu_ring_write(ring, upper_32_bits(addr));
6277 amdgpu_ring_write(ring, lower_32_bits(seq));
6278
6279 if (flags & AMDGPU_FENCE_FLAG_INT) {
6280 /* set register to trigger INT */
6281 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6282 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6283 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
6284 amdgpu_ring_write(ring, mmCPC_INT_STATUS);
6285 amdgpu_ring_write(ring, 0);
6286 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
6287 }
6288}
6289
6290static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
6291{
6292 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
6293 amdgpu_ring_write(ring, 0);
6294}
6295
6296static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
6297{
6298 uint32_t dw2 = 0;
6299
6300 if (amdgpu_sriov_vf(ring->adev))
6301 gfx_v8_0_ring_emit_ce_meta(ring);
6302
6303 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
6304 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
6305 gfx_v8_0_ring_emit_vgt_flush(ring);
6306 /* set load_global_config & load_global_uconfig */
6307 dw2 |= 0x8001;
6308 /* set load_cs_sh_regs */
6309 dw2 |= 0x01000000;
6310 /* set load_per_context_state & load_gfx_sh_regs for GFX */
6311 dw2 |= 0x10002;
6312
6313 /* set load_ce_ram if preamble presented */
6314 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
6315 dw2 |= 0x10000000;
6316 } else {
6317 /* still load_ce_ram if this is the first time preamble presented
6318 * although there is no context switch happens.
6319 */
6320 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
6321 dw2 |= 0x10000000;
6322 }
6323
6324 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
6325 amdgpu_ring_write(ring, dw2);
6326 amdgpu_ring_write(ring, 0);
6327}
6328
6329static unsigned gfx_v8_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
6330 uint64_t addr)
6331{
6332 unsigned ret;
6333
6334 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
6335 amdgpu_ring_write(ring, lower_32_bits(addr));
6336 amdgpu_ring_write(ring, upper_32_bits(addr));
6337 /* discard following DWs if *cond_exec_gpu_addr==0 */
6338 amdgpu_ring_write(ring, 0);
6339 ret = ring->wptr & ring->buf_mask;
6340 /* patch dummy value later */
6341 amdgpu_ring_write(ring, 0);
6342 return ret;
6343}
6344
6345static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
6346 uint32_t reg_val_offs)
6347{
6348 struct amdgpu_device *adev = ring->adev;
6349
6350 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
6351 amdgpu_ring_write(ring, 0 | /* src: register*/
6352 (5 << 8) | /* dst: memory */
6353 (1 << 20)); /* write confirm */
6354 amdgpu_ring_write(ring, reg);
6355 amdgpu_ring_write(ring, 0);
6356 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
6357 reg_val_offs * 4));
6358 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
6359 reg_val_offs * 4));
6360}
6361
6362static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
6363 uint32_t val)
6364{
6365 uint32_t cmd;
6366
6367 switch (ring->funcs->type) {
6368 case AMDGPU_RING_TYPE_GFX:
6369 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
6370 break;
6371 case AMDGPU_RING_TYPE_KIQ:
6372 cmd = 1 << 16; /* no inc addr */
6373 break;
6374 default:
6375 cmd = WR_CONFIRM;
6376 break;
6377 }
6378
6379 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6380 amdgpu_ring_write(ring, cmd);
6381 amdgpu_ring_write(ring, reg);
6382 amdgpu_ring_write(ring, 0);
6383 amdgpu_ring_write(ring, val);
6384}
6385
6386static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
6387{
6388 struct amdgpu_device *adev = ring->adev;
6389 uint32_t value = 0;
6390
6391 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
6392 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
6393 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
6394 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
6395 WREG32(mmSQ_CMD, value);
6396}
6397
6398static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
6399 enum amdgpu_interrupt_state state)
6400{
6401 WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
6402 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6403}
6404
6405static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
6406 int me, int pipe,
6407 enum amdgpu_interrupt_state state)
6408{
6409 u32 mec_int_cntl, mec_int_cntl_reg;
6410
6411 /*
6412 * amdgpu controls only the first MEC. That's why this function only
6413 * handles the setting of interrupts for this specific MEC. All other
6414 * pipes' interrupts are set by amdkfd.
6415 */
6416
6417 if (me == 1) {
6418 switch (pipe) {
6419 case 0:
6420 mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
6421 break;
6422 case 1:
6423 mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
6424 break;
6425 case 2:
6426 mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
6427 break;
6428 case 3:
6429 mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
6430 break;
6431 default:
6432 DRM_DEBUG("invalid pipe %d\n", pipe);
6433 return;
6434 }
6435 } else {
6436 DRM_DEBUG("invalid me %d\n", me);
6437 return;
6438 }
6439
6440 switch (state) {
6441 case AMDGPU_IRQ_STATE_DISABLE:
6442 mec_int_cntl = RREG32(mec_int_cntl_reg);
6443 mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
6444 WREG32(mec_int_cntl_reg, mec_int_cntl);
6445 break;
6446 case AMDGPU_IRQ_STATE_ENABLE:
6447 mec_int_cntl = RREG32(mec_int_cntl_reg);
6448 mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
6449 WREG32(mec_int_cntl_reg, mec_int_cntl);
6450 break;
6451 default:
6452 break;
6453 }
6454}
6455
6456static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6457 struct amdgpu_irq_src *source,
6458 unsigned type,
6459 enum amdgpu_interrupt_state state)
6460{
6461 WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
6462 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6463
6464 return 0;
6465}
6466
6467static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6468 struct amdgpu_irq_src *source,
6469 unsigned type,
6470 enum amdgpu_interrupt_state state)
6471{
6472 WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
6473 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6474
6475 return 0;
6476}
6477
6478static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6479 struct amdgpu_irq_src *src,
6480 unsigned type,
6481 enum amdgpu_interrupt_state state)
6482{
6483 switch (type) {
6484 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6485 gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
6486 break;
6487 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6488 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6489 break;
6490 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6491 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6492 break;
6493 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6494 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6495 break;
6496 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6497 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6498 break;
6499 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
6500 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
6501 break;
6502 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
6503 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
6504 break;
6505 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
6506 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
6507 break;
6508 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
6509 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
6510 break;
6511 default:
6512 break;
6513 }
6514 return 0;
6515}
6516
6517static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
6518 struct amdgpu_irq_src *source,
6519 unsigned int type,
6520 enum amdgpu_interrupt_state state)
6521{
6522 int enable_flag;
6523
6524 switch (state) {
6525 case AMDGPU_IRQ_STATE_DISABLE:
6526 enable_flag = 0;
6527 break;
6528
6529 case AMDGPU_IRQ_STATE_ENABLE:
6530 enable_flag = 1;
6531 break;
6532
6533 default:
6534 return -EINVAL;
6535 }
6536
6537 WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6538 WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6539 WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6540 WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6541 WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6542 WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6543 enable_flag);
6544 WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6545 enable_flag);
6546 WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6547 enable_flag);
6548 WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6549 enable_flag);
6550 WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6551 enable_flag);
6552 WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6553 enable_flag);
6554 WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6555 enable_flag);
6556 WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6557 enable_flag);
6558
6559 return 0;
6560}
6561
6562static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
6563 struct amdgpu_irq_src *source,
6564 unsigned int type,
6565 enum amdgpu_interrupt_state state)
6566{
6567 int enable_flag;
6568
6569 switch (state) {
6570 case AMDGPU_IRQ_STATE_DISABLE:
6571 enable_flag = 1;
6572 break;
6573
6574 case AMDGPU_IRQ_STATE_ENABLE:
6575 enable_flag = 0;
6576 break;
6577
6578 default:
6579 return -EINVAL;
6580 }
6581
6582 WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
6583 enable_flag);
6584
6585 return 0;
6586}
6587
6588static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
6589 struct amdgpu_irq_src *source,
6590 struct amdgpu_iv_entry *entry)
6591{
6592 int i;
6593 u8 me_id, pipe_id, queue_id;
6594 struct amdgpu_ring *ring;
6595
6596 DRM_DEBUG("IH: CP EOP\n");
6597 me_id = (entry->ring_id & 0x0c) >> 2;
6598 pipe_id = (entry->ring_id & 0x03) >> 0;
6599 queue_id = (entry->ring_id & 0x70) >> 4;
6600
6601 switch (me_id) {
6602 case 0:
6603 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6604 break;
6605 case 1:
6606 case 2:
6607 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6608 ring = &adev->gfx.compute_ring[i];
6609 /* Per-queue interrupt is supported for MEC starting from VI.
6610 * The interrupt can only be enabled/disabled per pipe instead of per queue.
6611 */
6612 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
6613 amdgpu_fence_process(ring);
6614 }
6615 break;
6616 }
6617 return 0;
6618}
6619
6620static void gfx_v8_0_fault(struct amdgpu_device *adev,
6621 struct amdgpu_iv_entry *entry)
6622{
6623 u8 me_id, pipe_id, queue_id;
6624 struct amdgpu_ring *ring;
6625 int i;
6626
6627 me_id = (entry->ring_id & 0x0c) >> 2;
6628 pipe_id = (entry->ring_id & 0x03) >> 0;
6629 queue_id = (entry->ring_id & 0x70) >> 4;
6630
6631 switch (me_id) {
6632 case 0:
6633 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
6634 break;
6635 case 1:
6636 case 2:
6637 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6638 ring = &adev->gfx.compute_ring[i];
6639 if (ring->me == me_id && ring->pipe == pipe_id &&
6640 ring->queue == queue_id)
6641 drm_sched_fault(&ring->sched);
6642 }
6643 break;
6644 }
6645}
6646
6647static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
6648 struct amdgpu_irq_src *source,
6649 struct amdgpu_iv_entry *entry)
6650{
6651 DRM_ERROR("Illegal register access in command stream\n");
6652 gfx_v8_0_fault(adev, entry);
6653 return 0;
6654}
6655
6656static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
6657 struct amdgpu_irq_src *source,
6658 struct amdgpu_iv_entry *entry)
6659{
6660 DRM_ERROR("Illegal instruction in command stream\n");
6661 gfx_v8_0_fault(adev, entry);
6662 return 0;
6663}
6664
6665static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
6666 struct amdgpu_irq_src *source,
6667 struct amdgpu_iv_entry *entry)
6668{
6669 DRM_ERROR("CP EDC/ECC error detected.");
6670 return 0;
6671}
6672
6673static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data,
6674 bool from_wq)
6675{
6676 u32 enc, se_id, sh_id, cu_id;
6677 char type[20];
6678 int sq_edc_source = -1;
6679
6680 enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
6681 se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
6682
6683 switch (enc) {
6684 case 0:
6685 DRM_INFO("SQ general purpose intr detected:"
6686 "se_id %d, immed_overflow %d, host_reg_overflow %d,"
6687 "host_cmd_overflow %d, cmd_timestamp %d,"
6688 "reg_timestamp %d, thread_trace_buff_full %d,"
6689 "wlt %d, thread_trace %d.\n",
6690 se_id,
6691 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
6692 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
6693 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
6694 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
6695 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
6696 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
6697 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
6698 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
6699 );
6700 break;
6701 case 1:
6702 case 2:
6703
6704 cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
6705 sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
6706
6707 /*
6708 * This function can be called either directly from ISR
6709 * or from BH in which case we can access SQ_EDC_INFO
6710 * instance
6711 */
6712 if (from_wq) {
6713 mutex_lock(&adev->grbm_idx_mutex);
6714 gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id, 0);
6715
6716 sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
6717
6718 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
6719 mutex_unlock(&adev->grbm_idx_mutex);
6720 }
6721
6722 if (enc == 1)
6723 sprintf(type, "instruction intr");
6724 else
6725 sprintf(type, "EDC/ECC error");
6726
6727 DRM_INFO(
6728 "SQ %s detected: "
6729 "se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
6730 "trap %s, sq_ed_info.source %s.\n",
6731 type, se_id, sh_id, cu_id,
6732 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
6733 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
6734 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
6735 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
6736 (sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
6737 );
6738 break;
6739 default:
6740 DRM_ERROR("SQ invalid encoding type\n.");
6741 }
6742}
6743
6744static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
6745{
6746
6747 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
6748 struct sq_work *sq_work = container_of(work, struct sq_work, work);
6749
6750 gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data, true);
6751}
6752
6753static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
6754 struct amdgpu_irq_src *source,
6755 struct amdgpu_iv_entry *entry)
6756{
6757 unsigned ih_data = entry->src_data[0];
6758
6759 /*
6760 * Try to submit work so SQ_EDC_INFO can be accessed from
6761 * BH. If previous work submission hasn't finished yet
6762 * just print whatever info is possible directly from the ISR.
6763 */
6764 if (work_pending(&adev->gfx.sq_work.work)) {
6765 gfx_v8_0_parse_sq_irq(adev, ih_data, false);
6766 } else {
6767 adev->gfx.sq_work.ih_data = ih_data;
6768 schedule_work(&adev->gfx.sq_work.work);
6769 }
6770
6771 return 0;
6772}
6773
6774static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
6775{
6776 amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
6777 amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6778 PACKET3_TC_ACTION_ENA |
6779 PACKET3_SH_KCACHE_ACTION_ENA |
6780 PACKET3_SH_ICACHE_ACTION_ENA |
6781 PACKET3_TC_WB_ACTION_ENA); /* CP_COHER_CNTL */
6782 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
6783 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6784 amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
6785}
6786
6787static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
6788{
6789 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6790 amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6791 PACKET3_TC_ACTION_ENA |
6792 PACKET3_SH_KCACHE_ACTION_ENA |
6793 PACKET3_SH_ICACHE_ACTION_ENA |
6794 PACKET3_TC_WB_ACTION_ENA); /* CP_COHER_CNTL */
6795 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
6796 amdgpu_ring_write(ring, 0xff); /* CP_COHER_SIZE_HI */
6797 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6798 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
6799 amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
6800}
6801
6802
6803/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
6804#define mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT 0x0000007f
6805static void gfx_v8_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
6806 uint32_t pipe, bool enable)
6807{
6808 uint32_t val;
6809 uint32_t wcl_cs_reg;
6810
6811 val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT;
6812
6813 switch (pipe) {
6814 case 0:
6815 wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS0;
6816 break;
6817 case 1:
6818 wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS1;
6819 break;
6820 case 2:
6821 wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS2;
6822 break;
6823 case 3:
6824 wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS3;
6825 break;
6826 default:
6827 DRM_DEBUG("invalid pipe %d\n", pipe);
6828 return;
6829 }
6830
6831 amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
6832
6833}
6834
6835#define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT 0x07ffffff
6836static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
6837{
6838 struct amdgpu_device *adev = ring->adev;
6839 uint32_t val;
6840 int i;
6841
6842 /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
6843 * number of gfx waves. Setting 5 bit will make sure gfx only gets
6844 * around 25% of gpu resources.
6845 */
6846 val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
6847 amdgpu_ring_emit_wreg(ring, mmSPI_WCL_PIPE_PERCENT_GFX, val);
6848
6849 /* Restrict waves for normal/low priority compute queues as well
6850 * to get best QoS for high priority compute jobs.
6851 *
6852 * amdgpu controls only 1st ME(0-3 CS pipes).
6853 */
6854 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
6855 if (i != ring->pipe)
6856 gfx_v8_0_emit_wave_limit_cs(ring, i, enable);
6857
6858 }
6859
6860}
6861
6862static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
6863 .name = "gfx_v8_0",
6864 .early_init = gfx_v8_0_early_init,
6865 .late_init = gfx_v8_0_late_init,
6866 .sw_init = gfx_v8_0_sw_init,
6867 .sw_fini = gfx_v8_0_sw_fini,
6868 .hw_init = gfx_v8_0_hw_init,
6869 .hw_fini = gfx_v8_0_hw_fini,
6870 .suspend = gfx_v8_0_suspend,
6871 .resume = gfx_v8_0_resume,
6872 .is_idle = gfx_v8_0_is_idle,
6873 .wait_for_idle = gfx_v8_0_wait_for_idle,
6874 .check_soft_reset = gfx_v8_0_check_soft_reset,
6875 .pre_soft_reset = gfx_v8_0_pre_soft_reset,
6876 .soft_reset = gfx_v8_0_soft_reset,
6877 .post_soft_reset = gfx_v8_0_post_soft_reset,
6878 .set_clockgating_state = gfx_v8_0_set_clockgating_state,
6879 .set_powergating_state = gfx_v8_0_set_powergating_state,
6880 .get_clockgating_state = gfx_v8_0_get_clockgating_state,
6881};
6882
6883static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
6884 .type = AMDGPU_RING_TYPE_GFX,
6885 .align_mask = 0xff,
6886 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6887 .support_64bit_ptrs = false,
6888 .get_rptr = gfx_v8_0_ring_get_rptr,
6889 .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
6890 .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
6891 .emit_frame_size = /* maximum 215dw if count 16 IBs in */
6892 5 + /* COND_EXEC */
6893 7 + /* PIPELINE_SYNC */
6894 VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
6895 12 + /* FENCE for VM_FLUSH */
6896 20 + /* GDS switch */
6897 4 + /* double SWITCH_BUFFER,
6898 the first COND_EXEC jump to the place just
6899 prior to this double SWITCH_BUFFER */
6900 5 + /* COND_EXEC */
6901 7 + /* HDP_flush */
6902 4 + /* VGT_flush */
6903 14 + /* CE_META */
6904 31 + /* DE_META */
6905 3 + /* CNTX_CTRL */
6906 5 + /* HDP_INVL */
6907 12 + 12 + /* FENCE x2 */
6908 2 + /* SWITCH_BUFFER */
6909 5, /* SURFACE_SYNC */
6910 .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
6911 .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
6912 .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
6913 .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
6914 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6915 .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6916 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
6917 .test_ring = gfx_v8_0_ring_test_ring,
6918 .test_ib = gfx_v8_0_ring_test_ib,
6919 .insert_nop = amdgpu_ring_insert_nop,
6920 .pad_ib = amdgpu_ring_generic_pad_ib,
6921 .emit_switch_buffer = gfx_v8_ring_emit_sb,
6922 .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
6923 .init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
6924 .emit_wreg = gfx_v8_0_ring_emit_wreg,
6925 .soft_recovery = gfx_v8_0_ring_soft_recovery,
6926 .emit_mem_sync = gfx_v8_0_emit_mem_sync,
6927};
6928
6929static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
6930 .type = AMDGPU_RING_TYPE_COMPUTE,
6931 .align_mask = 0xff,
6932 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6933 .support_64bit_ptrs = false,
6934 .get_rptr = gfx_v8_0_ring_get_rptr,
6935 .get_wptr = gfx_v8_0_ring_get_wptr_compute,
6936 .set_wptr = gfx_v8_0_ring_set_wptr_compute,
6937 .emit_frame_size =
6938 20 + /* gfx_v8_0_ring_emit_gds_switch */
6939 7 + /* gfx_v8_0_ring_emit_hdp_flush */
6940 5 + /* hdp_invalidate */
6941 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6942 VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
6943 7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
6944 7 + /* gfx_v8_0_emit_mem_sync_compute */
6945 5 + /* gfx_v8_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
6946 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
6947 .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
6948 .emit_ib = gfx_v8_0_ring_emit_ib_compute,
6949 .emit_fence = gfx_v8_0_ring_emit_fence_compute,
6950 .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
6951 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6952 .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6953 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
6954 .test_ring = gfx_v8_0_ring_test_ring,
6955 .test_ib = gfx_v8_0_ring_test_ib,
6956 .insert_nop = amdgpu_ring_insert_nop,
6957 .pad_ib = amdgpu_ring_generic_pad_ib,
6958 .emit_wreg = gfx_v8_0_ring_emit_wreg,
6959 .emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
6960 .emit_wave_limit = gfx_v8_0_emit_wave_limit,
6961};
6962
6963static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
6964 .type = AMDGPU_RING_TYPE_KIQ,
6965 .align_mask = 0xff,
6966 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6967 .support_64bit_ptrs = false,
6968 .get_rptr = gfx_v8_0_ring_get_rptr,
6969 .get_wptr = gfx_v8_0_ring_get_wptr_compute,
6970 .set_wptr = gfx_v8_0_ring_set_wptr_compute,
6971 .emit_frame_size =
6972 20 + /* gfx_v8_0_ring_emit_gds_switch */
6973 7 + /* gfx_v8_0_ring_emit_hdp_flush */
6974 5 + /* hdp_invalidate */
6975 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6976 17 + /* gfx_v8_0_ring_emit_vm_flush */
6977 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6978 .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
6979 .emit_fence = gfx_v8_0_ring_emit_fence_kiq,
6980 .test_ring = gfx_v8_0_ring_test_ring,
6981 .insert_nop = amdgpu_ring_insert_nop,
6982 .pad_ib = amdgpu_ring_generic_pad_ib,
6983 .emit_rreg = gfx_v8_0_ring_emit_rreg,
6984 .emit_wreg = gfx_v8_0_ring_emit_wreg,
6985};
6986
6987static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
6988{
6989 int i;
6990
6991 adev->gfx.kiq[0].ring.funcs = &gfx_v8_0_ring_funcs_kiq;
6992
6993 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6994 adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
6995
6996 for (i = 0; i < adev->gfx.num_compute_rings; i++)
6997 adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
6998}
6999
7000static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = {
7001 .set = gfx_v8_0_set_eop_interrupt_state,
7002 .process = gfx_v8_0_eop_irq,
7003};
7004
7005static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = {
7006 .set = gfx_v8_0_set_priv_reg_fault_state,
7007 .process = gfx_v8_0_priv_reg_irq,
7008};
7009
7010static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
7011 .set = gfx_v8_0_set_priv_inst_fault_state,
7012 .process = gfx_v8_0_priv_inst_irq,
7013};
7014
7015static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
7016 .set = gfx_v8_0_set_cp_ecc_int_state,
7017 .process = gfx_v8_0_cp_ecc_error_irq,
7018};
7019
7020static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
7021 .set = gfx_v8_0_set_sq_int_state,
7022 .process = gfx_v8_0_sq_irq,
7023};
7024
7025static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
7026{
7027 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7028 adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
7029
7030 adev->gfx.priv_reg_irq.num_types = 1;
7031 adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
7032
7033 adev->gfx.priv_inst_irq.num_types = 1;
7034 adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
7035
7036 adev->gfx.cp_ecc_error_irq.num_types = 1;
7037 adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
7038
7039 adev->gfx.sq_irq.num_types = 1;
7040 adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
7041}
7042
7043static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
7044{
7045 adev->gfx.rlc.funcs = &iceland_rlc_funcs;
7046}
7047
7048static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
7049{
7050 /* init asci gds info */
7051 adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
7052 adev->gds.gws_size = 64;
7053 adev->gds.oa_size = 16;
7054 adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
7055}
7056
7057static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7058 u32 bitmap)
7059{
7060 u32 data;
7061
7062 if (!bitmap)
7063 return;
7064
7065 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7066 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7067
7068 WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
7069}
7070
7071static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7072{
7073 u32 data, mask;
7074
7075 data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
7076 RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
7077
7078 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7079
7080 return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
7081}
7082
7083static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
7084{
7085 int i, j, k, counter, active_cu_number = 0;
7086 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7087 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
7088 unsigned disable_masks[4 * 2];
7089 u32 ao_cu_num;
7090
7091 memset(cu_info, 0, sizeof(*cu_info));
7092
7093 if (adev->flags & AMD_IS_APU)
7094 ao_cu_num = 2;
7095 else
7096 ao_cu_num = adev->gfx.config.max_cu_per_sh;
7097
7098 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
7099
7100 mutex_lock(&adev->grbm_idx_mutex);
7101 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7102 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7103 mask = 1;
7104 ao_bitmap = 0;
7105 counter = 0;
7106 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
7107 if (i < 4 && j < 2)
7108 gfx_v8_0_set_user_cu_inactive_bitmap(
7109 adev, disable_masks[i * 2 + j]);
7110 bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
7111 cu_info->bitmap[0][i][j] = bitmap;
7112
7113 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7114 if (bitmap & mask) {
7115 if (counter < ao_cu_num)
7116 ao_bitmap |= mask;
7117 counter ++;
7118 }
7119 mask <<= 1;
7120 }
7121 active_cu_number += counter;
7122 if (i < 2 && j < 2)
7123 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7124 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
7125 }
7126 }
7127 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7128 mutex_unlock(&adev->grbm_idx_mutex);
7129
7130 cu_info->number = active_cu_number;
7131 cu_info->ao_cu_mask = ao_cu_mask;
7132 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7133 cu_info->max_waves_per_simd = 10;
7134 cu_info->max_scratch_slots_per_cu = 32;
7135 cu_info->wave_front_size = 64;
7136 cu_info->lds_size = 64;
7137}
7138
7139const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
7140{
7141 .type = AMD_IP_BLOCK_TYPE_GFX,
7142 .major = 8,
7143 .minor = 0,
7144 .rev = 0,
7145 .funcs = &gfx_v8_0_ip_funcs,
7146};
7147
7148const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
7149{
7150 .type = AMD_IP_BLOCK_TYPE_GFX,
7151 .major = 8,
7152 .minor = 1,
7153 .rev = 0,
7154 .funcs = &gfx_v8_0_ip_funcs,
7155};
7156
7157static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
7158{
7159 uint64_t ce_payload_addr;
7160 int cnt_ce;
7161 union {
7162 struct vi_ce_ib_state regular;
7163 struct vi_ce_ib_state_chained_ib chained;
7164 } ce_payload = {};
7165
7166 if (ring->adev->virt.chained_ib_support) {
7167 ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7168 offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload);
7169 cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2;
7170 } else {
7171 ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7172 offsetof(struct vi_gfx_meta_data, ce_payload);
7173 cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2;
7174 }
7175
7176 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce));
7177 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
7178 WRITE_DATA_DST_SEL(8) |
7179 WR_CONFIRM) |
7180 WRITE_DATA_CACHE_POLICY(0));
7181 amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr));
7182 amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr));
7183 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2);
7184}
7185
7186static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
7187{
7188 uint64_t de_payload_addr, gds_addr, csa_addr;
7189 int cnt_de;
7190 union {
7191 struct vi_de_ib_state regular;
7192 struct vi_de_ib_state_chained_ib chained;
7193 } de_payload = {};
7194
7195 csa_addr = amdgpu_csa_vaddr(ring->adev);
7196 gds_addr = csa_addr + 4096;
7197 if (ring->adev->virt.chained_ib_support) {
7198 de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr);
7199 de_payload.chained.gds_backup_addrhi = upper_32_bits(gds_addr);
7200 de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data_chained_ib, de_payload);
7201 cnt_de = (sizeof(de_payload.chained) >> 2) + 4 - 2;
7202 } else {
7203 de_payload.regular.gds_backup_addrlo = lower_32_bits(gds_addr);
7204 de_payload.regular.gds_backup_addrhi = upper_32_bits(gds_addr);
7205 de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data, de_payload);
7206 cnt_de = (sizeof(de_payload.regular) >> 2) + 4 - 2;
7207 }
7208
7209 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de));
7210 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
7211 WRITE_DATA_DST_SEL(8) |
7212 WR_CONFIRM) |
7213 WRITE_DATA_CACHE_POLICY(0));
7214 amdgpu_ring_write(ring, lower_32_bits(de_payload_addr));
7215 amdgpu_ring_write(ring, upper_32_bits(de_payload_addr));
7216 amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2);
7217}
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/kernel.h>
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_gfx.h"
28#include "vi.h"
29#include "vi_structs.h"
30#include "vid.h"
31#include "amdgpu_ucode.h"
32#include "amdgpu_atombios.h"
33#include "atombios_i2c.h"
34#include "clearstate_vi.h"
35
36#include "gmc/gmc_8_2_d.h"
37#include "gmc/gmc_8_2_sh_mask.h"
38
39#include "oss/oss_3_0_d.h"
40#include "oss/oss_3_0_sh_mask.h"
41
42#include "bif/bif_5_0_d.h"
43#include "bif/bif_5_0_sh_mask.h"
44#include "gca/gfx_8_0_d.h"
45#include "gca/gfx_8_0_enum.h"
46#include "gca/gfx_8_0_sh_mask.h"
47#include "gca/gfx_8_0_enum.h"
48
49#include "dce/dce_10_0_d.h"
50#include "dce/dce_10_0_sh_mask.h"
51
52#include "smu/smu_7_1_3_d.h"
53
54#define GFX8_NUM_GFX_RINGS 1
55#define GFX8_MEC_HPD_SIZE 2048
56
57#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
58#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
59#define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002
60#define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
61
62#define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
63#define PIPE_CONFIG(x) ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
64#define TILE_SPLIT(x) ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
65#define MICRO_TILE_MODE_NEW(x) ((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
66#define SAMPLE_SPLIT(x) ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
67#define BANK_WIDTH(x) ((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
68#define BANK_HEIGHT(x) ((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
69#define MACRO_TILE_ASPECT(x) ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
70#define NUM_BANKS(x) ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
71
72#define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK 0x00000001L
73#define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK 0x00000002L
74#define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK 0x00000004L
75#define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK 0x00000008L
76#define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK 0x00000010L
77#define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK 0x00000020L
78
79/* BPM SERDES CMD */
80#define SET_BPM_SERDES_CMD 1
81#define CLE_BPM_SERDES_CMD 0
82
83/* BPM Register Address*/
84enum {
85 BPM_REG_CGLS_EN = 0, /* Enable/Disable CGLS */
86 BPM_REG_CGLS_ON, /* ON/OFF CGLS: shall be controlled by RLC FW */
87 BPM_REG_CGCG_OVERRIDE, /* Set/Clear CGCG Override */
88 BPM_REG_MGCG_OVERRIDE, /* Set/Clear MGCG Override */
89 BPM_REG_FGCG_OVERRIDE, /* Set/Clear FGCG Override */
90 BPM_REG_FGCG_MAX
91};
92
93#define RLC_FormatDirectRegListLength 14
94
95MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
96MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
97MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
98MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
99MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
100MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
101
102MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
103MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
104MODULE_FIRMWARE("amdgpu/stoney_me.bin");
105MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
106MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
107
108MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
109MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
110MODULE_FIRMWARE("amdgpu/tonga_me.bin");
111MODULE_FIRMWARE("amdgpu/tonga_mec.bin");
112MODULE_FIRMWARE("amdgpu/tonga_mec2.bin");
113MODULE_FIRMWARE("amdgpu/tonga_rlc.bin");
114
115MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
116MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
117MODULE_FIRMWARE("amdgpu/topaz_me.bin");
118MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
119MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
120
121MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
122MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
123MODULE_FIRMWARE("amdgpu/fiji_me.bin");
124MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
125MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
126MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
127
128MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
129MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
130MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
131MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
132MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
133MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
134MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
135MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
136MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
137MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
138MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
139
140MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
141MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
142MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
143MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
144MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
145MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
146MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
147MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
148MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
149MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
150MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
151
152MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
153MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
154MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
155MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
156MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
157MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
158MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
159MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
160MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
161MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
162MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
163
164static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
165{
166 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
167 {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
168 {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
169 {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
170 {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
171 {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
172 {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
173 {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
174 {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
175 {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
176 {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
177 {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
178 {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
179 {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
180 {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
181 {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
182};
183
184static const u32 golden_settings_tonga_a11[] =
185{
186 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
187 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
188 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
189 mmGB_GPU_ID, 0x0000000f, 0x00000000,
190 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
191 mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
192 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
193 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
194 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
195 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
196 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
197 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
198 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
199 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
200 mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
201 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
202};
203
204static const u32 tonga_golden_common_all[] =
205{
206 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
207 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
208 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
209 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
210 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
211 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
212 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
213 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
214};
215
216static const u32 tonga_mgcg_cgcg_init[] =
217{
218 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
219 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
220 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
221 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
222 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
223 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
224 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
225 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
226 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
227 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
228 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
229 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
230 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
231 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
232 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
233 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
234 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
235 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
236 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
237 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
238 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
239 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
240 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
241 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
242 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
243 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
244 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
245 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
246 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
247 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
248 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
249 mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
250 mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
251 mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
252 mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
253 mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
254 mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
255 mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
256 mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
257 mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
258 mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
259 mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
260 mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
261 mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
262 mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
263 mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
264 mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
265 mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
266 mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
267 mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
268 mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
269 mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
270 mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
271 mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
272 mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
273 mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
274 mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
275 mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
276 mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
277 mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
278 mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
279 mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
280 mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
281 mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
282 mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
283 mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
284 mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
285 mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
286 mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
287 mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
288 mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
289 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
290 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
291 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
292 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
293};
294
295static const u32 golden_settings_polaris11_a11[] =
296{
297 mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
298 mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
299 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
300 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
301 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
302 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
303 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
304 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
305 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
306 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
307 mmSQ_CONFIG, 0x07f80000, 0x01180000,
308 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
309 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
310 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
311 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
312 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
313 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
314};
315
316static const u32 polaris11_golden_common_all[] =
317{
318 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
319 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
320 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
321 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
322 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
323 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
324};
325
326static const u32 golden_settings_polaris10_a11[] =
327{
328 mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
329 mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
330 mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
331 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
332 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
333 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
334 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
335 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
336 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a,
337 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
338 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
339 mmSQ_CONFIG, 0x07f80000, 0x07180000,
340 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
341 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
342 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
343 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
344 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
345};
346
347static const u32 polaris10_golden_common_all[] =
348{
349 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
350 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
351 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
352 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
353 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
354 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
355 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
356 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
357};
358
359static const u32 fiji_golden_common_all[] =
360{
361 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
362 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
363 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
364 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
365 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
366 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
367 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
368 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
369 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
370 mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
371};
372
373static const u32 golden_settings_fiji_a10[] =
374{
375 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
376 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
377 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
378 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
379 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
380 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
381 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
382 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
383 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
384 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
385 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
386};
387
388static const u32 fiji_mgcg_cgcg_init[] =
389{
390 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
391 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
392 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
393 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
394 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
395 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
396 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
397 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
398 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
399 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
400 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
401 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
402 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
403 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
404 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
405 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
406 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
407 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
408 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
409 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
410 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
411 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
412 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
413 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
414 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
415 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
416 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
417 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
418 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
419 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
420 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
421 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
422 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
423 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
424 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
425};
426
427static const u32 golden_settings_iceland_a11[] =
428{
429 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
430 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
431 mmDB_DEBUG3, 0xc0000000, 0xc0000000,
432 mmGB_GPU_ID, 0x0000000f, 0x00000000,
433 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
434 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
435 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
436 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
437 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
438 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
439 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
440 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
441 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
442 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
443 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
444 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
445};
446
447static const u32 iceland_golden_common_all[] =
448{
449 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
450 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
451 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
452 mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
453 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
454 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
455 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
456 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
457};
458
459static const u32 iceland_mgcg_cgcg_init[] =
460{
461 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
462 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
463 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
464 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
465 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100,
466 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100,
467 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100,
468 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
469 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
470 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
471 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
472 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
473 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
474 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
475 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
476 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
477 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
478 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
479 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
480 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
481 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
482 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
483 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100,
484 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
485 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
486 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
487 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
488 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
489 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
490 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
491 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
492 mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
493 mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
494 mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
495 mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
496 mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
497 mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
498 mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
499 mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
500 mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
501 mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
502 mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
503 mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
504 mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
505 mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
506 mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
507 mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
508 mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
509 mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
510 mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
511 mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
512 mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
513 mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
514 mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
515 mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
516 mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
517 mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
518 mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
519 mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
520 mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
521 mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
522 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
523 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
524 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
525};
526
527static const u32 cz_golden_settings_a11[] =
528{
529 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
530 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
531 mmGB_GPU_ID, 0x0000000f, 0x00000000,
532 mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
533 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
534 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
535 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
536 mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
537 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
538 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
539 mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
540 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
541};
542
543static const u32 cz_golden_common_all[] =
544{
545 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
546 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
547 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
548 mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
549 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
550 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
551 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
552 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
553};
554
555static const u32 cz_mgcg_cgcg_init[] =
556{
557 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
558 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
559 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
560 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
561 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
562 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
563 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100,
564 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
565 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
566 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
567 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
568 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
569 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
570 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
571 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
572 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
573 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
574 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
575 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
576 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
577 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
578 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
579 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
580 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
581 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
582 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
583 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
584 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
585 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
586 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
587 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
588 mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
589 mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
590 mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
591 mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
592 mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
593 mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
594 mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
595 mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
596 mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
597 mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
598 mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
599 mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
600 mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
601 mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
602 mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
603 mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
604 mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
605 mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
606 mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
607 mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
608 mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
609 mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
610 mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
611 mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
612 mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
613 mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
614 mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
615 mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
616 mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
617 mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
618 mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
619 mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
620 mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
621 mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
622 mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
623 mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
624 mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
625 mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
626 mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
627 mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
628 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
629 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
630 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
631 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
632};
633
634static const u32 stoney_golden_settings_a11[] =
635{
636 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
637 mmGB_GPU_ID, 0x0000000f, 0x00000000,
638 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
639 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
640 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
641 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
642 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
643 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
644 mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
645 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
646};
647
648static const u32 stoney_golden_common_all[] =
649{
650 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
651 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
652 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
653 mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
654 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
655 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
656 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
657 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
658};
659
660static const u32 stoney_mgcg_cgcg_init[] =
661{
662 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
663 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
664 mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
665 mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
666 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
667};
668
669static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
670static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
671static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
672static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
673static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
674static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
675static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
676static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
677
678static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
679{
680 switch (adev->asic_type) {
681 case CHIP_TOPAZ:
682 amdgpu_device_program_register_sequence(adev,
683 iceland_mgcg_cgcg_init,
684 ARRAY_SIZE(iceland_mgcg_cgcg_init));
685 amdgpu_device_program_register_sequence(adev,
686 golden_settings_iceland_a11,
687 ARRAY_SIZE(golden_settings_iceland_a11));
688 amdgpu_device_program_register_sequence(adev,
689 iceland_golden_common_all,
690 ARRAY_SIZE(iceland_golden_common_all));
691 break;
692 case CHIP_FIJI:
693 amdgpu_device_program_register_sequence(adev,
694 fiji_mgcg_cgcg_init,
695 ARRAY_SIZE(fiji_mgcg_cgcg_init));
696 amdgpu_device_program_register_sequence(adev,
697 golden_settings_fiji_a10,
698 ARRAY_SIZE(golden_settings_fiji_a10));
699 amdgpu_device_program_register_sequence(adev,
700 fiji_golden_common_all,
701 ARRAY_SIZE(fiji_golden_common_all));
702 break;
703
704 case CHIP_TONGA:
705 amdgpu_device_program_register_sequence(adev,
706 tonga_mgcg_cgcg_init,
707 ARRAY_SIZE(tonga_mgcg_cgcg_init));
708 amdgpu_device_program_register_sequence(adev,
709 golden_settings_tonga_a11,
710 ARRAY_SIZE(golden_settings_tonga_a11));
711 amdgpu_device_program_register_sequence(adev,
712 tonga_golden_common_all,
713 ARRAY_SIZE(tonga_golden_common_all));
714 break;
715 case CHIP_POLARIS11:
716 case CHIP_POLARIS12:
717 amdgpu_device_program_register_sequence(adev,
718 golden_settings_polaris11_a11,
719 ARRAY_SIZE(golden_settings_polaris11_a11));
720 amdgpu_device_program_register_sequence(adev,
721 polaris11_golden_common_all,
722 ARRAY_SIZE(polaris11_golden_common_all));
723 break;
724 case CHIP_POLARIS10:
725 amdgpu_device_program_register_sequence(adev,
726 golden_settings_polaris10_a11,
727 ARRAY_SIZE(golden_settings_polaris10_a11));
728 amdgpu_device_program_register_sequence(adev,
729 polaris10_golden_common_all,
730 ARRAY_SIZE(polaris10_golden_common_all));
731 WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
732 if (adev->pdev->revision == 0xc7 &&
733 ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
734 (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
735 (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) {
736 amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
737 amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
738 }
739 break;
740 case CHIP_CARRIZO:
741 amdgpu_device_program_register_sequence(adev,
742 cz_mgcg_cgcg_init,
743 ARRAY_SIZE(cz_mgcg_cgcg_init));
744 amdgpu_device_program_register_sequence(adev,
745 cz_golden_settings_a11,
746 ARRAY_SIZE(cz_golden_settings_a11));
747 amdgpu_device_program_register_sequence(adev,
748 cz_golden_common_all,
749 ARRAY_SIZE(cz_golden_common_all));
750 break;
751 case CHIP_STONEY:
752 amdgpu_device_program_register_sequence(adev,
753 stoney_mgcg_cgcg_init,
754 ARRAY_SIZE(stoney_mgcg_cgcg_init));
755 amdgpu_device_program_register_sequence(adev,
756 stoney_golden_settings_a11,
757 ARRAY_SIZE(stoney_golden_settings_a11));
758 amdgpu_device_program_register_sequence(adev,
759 stoney_golden_common_all,
760 ARRAY_SIZE(stoney_golden_common_all));
761 break;
762 default:
763 break;
764 }
765}
766
767static void gfx_v8_0_scratch_init(struct amdgpu_device *adev)
768{
769 adev->gfx.scratch.num_reg = 8;
770 adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
771 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
772}
773
774static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
775{
776 struct amdgpu_device *adev = ring->adev;
777 uint32_t scratch;
778 uint32_t tmp = 0;
779 unsigned i;
780 int r;
781
782 r = amdgpu_gfx_scratch_get(adev, &scratch);
783 if (r) {
784 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
785 return r;
786 }
787 WREG32(scratch, 0xCAFEDEAD);
788 r = amdgpu_ring_alloc(ring, 3);
789 if (r) {
790 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
791 ring->idx, r);
792 amdgpu_gfx_scratch_free(adev, scratch);
793 return r;
794 }
795 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
796 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
797 amdgpu_ring_write(ring, 0xDEADBEEF);
798 amdgpu_ring_commit(ring);
799
800 for (i = 0; i < adev->usec_timeout; i++) {
801 tmp = RREG32(scratch);
802 if (tmp == 0xDEADBEEF)
803 break;
804 DRM_UDELAY(1);
805 }
806 if (i < adev->usec_timeout) {
807 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
808 ring->idx, i);
809 } else {
810 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
811 ring->idx, scratch, tmp);
812 r = -EINVAL;
813 }
814 amdgpu_gfx_scratch_free(adev, scratch);
815 return r;
816}
817
818static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
819{
820 struct amdgpu_device *adev = ring->adev;
821 struct amdgpu_ib ib;
822 struct dma_fence *f = NULL;
823 uint32_t scratch;
824 uint32_t tmp = 0;
825 long r;
826
827 r = amdgpu_gfx_scratch_get(adev, &scratch);
828 if (r) {
829 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
830 return r;
831 }
832 WREG32(scratch, 0xCAFEDEAD);
833 memset(&ib, 0, sizeof(ib));
834 r = amdgpu_ib_get(adev, NULL, 256, &ib);
835 if (r) {
836 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
837 goto err1;
838 }
839 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
840 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
841 ib.ptr[2] = 0xDEADBEEF;
842 ib.length_dw = 3;
843
844 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
845 if (r)
846 goto err2;
847
848 r = dma_fence_wait_timeout(f, false, timeout);
849 if (r == 0) {
850 DRM_ERROR("amdgpu: IB test timed out.\n");
851 r = -ETIMEDOUT;
852 goto err2;
853 } else if (r < 0) {
854 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
855 goto err2;
856 }
857 tmp = RREG32(scratch);
858 if (tmp == 0xDEADBEEF) {
859 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
860 r = 0;
861 } else {
862 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
863 scratch, tmp);
864 r = -EINVAL;
865 }
866err2:
867 amdgpu_ib_free(adev, &ib, NULL);
868 dma_fence_put(f);
869err1:
870 amdgpu_gfx_scratch_free(adev, scratch);
871 return r;
872}
873
874
875static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
876{
877 release_firmware(adev->gfx.pfp_fw);
878 adev->gfx.pfp_fw = NULL;
879 release_firmware(adev->gfx.me_fw);
880 adev->gfx.me_fw = NULL;
881 release_firmware(adev->gfx.ce_fw);
882 adev->gfx.ce_fw = NULL;
883 release_firmware(adev->gfx.rlc_fw);
884 adev->gfx.rlc_fw = NULL;
885 release_firmware(adev->gfx.mec_fw);
886 adev->gfx.mec_fw = NULL;
887 if ((adev->asic_type != CHIP_STONEY) &&
888 (adev->asic_type != CHIP_TOPAZ))
889 release_firmware(adev->gfx.mec2_fw);
890 adev->gfx.mec2_fw = NULL;
891
892 kfree(adev->gfx.rlc.register_list_format);
893}
894
895static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
896{
897 const char *chip_name;
898 char fw_name[30];
899 int err;
900 struct amdgpu_firmware_info *info = NULL;
901 const struct common_firmware_header *header = NULL;
902 const struct gfx_firmware_header_v1_0 *cp_hdr;
903 const struct rlc_firmware_header_v2_0 *rlc_hdr;
904 unsigned int *tmp = NULL, i;
905
906 DRM_DEBUG("\n");
907
908 switch (adev->asic_type) {
909 case CHIP_TOPAZ:
910 chip_name = "topaz";
911 break;
912 case CHIP_TONGA:
913 chip_name = "tonga";
914 break;
915 case CHIP_CARRIZO:
916 chip_name = "carrizo";
917 break;
918 case CHIP_FIJI:
919 chip_name = "fiji";
920 break;
921 case CHIP_POLARIS11:
922 chip_name = "polaris11";
923 break;
924 case CHIP_POLARIS10:
925 chip_name = "polaris10";
926 break;
927 case CHIP_POLARIS12:
928 chip_name = "polaris12";
929 break;
930 case CHIP_STONEY:
931 chip_name = "stoney";
932 break;
933 default:
934 BUG();
935 }
936
937 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
938 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
939 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
940 if (err == -ENOENT) {
941 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
942 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
943 }
944 } else {
945 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
946 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
947 }
948 if (err)
949 goto out;
950 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
951 if (err)
952 goto out;
953 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
954 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
955 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
956
957 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
958 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
959 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
960 if (err == -ENOENT) {
961 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
962 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
963 }
964 } else {
965 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
966 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
967 }
968 if (err)
969 goto out;
970 err = amdgpu_ucode_validate(adev->gfx.me_fw);
971 if (err)
972 goto out;
973 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
974 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
975
976 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
977
978 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
979 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
980 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
981 if (err == -ENOENT) {
982 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
983 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
984 }
985 } else {
986 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
987 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
988 }
989 if (err)
990 goto out;
991 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
992 if (err)
993 goto out;
994 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
995 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
996 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
997
998 /*
999 * Support for MCBP/Virtualization in combination with chained IBs is
1000 * formal released on feature version #46
1001 */
1002 if (adev->gfx.ce_feature_version >= 46 &&
1003 adev->gfx.pfp_feature_version >= 46) {
1004 adev->virt.chained_ib_support = true;
1005 DRM_INFO("Chained IB support enabled!\n");
1006 } else
1007 adev->virt.chained_ib_support = false;
1008
1009 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1010 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1011 if (err)
1012 goto out;
1013 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1014 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1015 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1016 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1017
1018 adev->gfx.rlc.save_and_restore_offset =
1019 le32_to_cpu(rlc_hdr->save_and_restore_offset);
1020 adev->gfx.rlc.clear_state_descriptor_offset =
1021 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1022 adev->gfx.rlc.avail_scratch_ram_locations =
1023 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1024 adev->gfx.rlc.reg_restore_list_size =
1025 le32_to_cpu(rlc_hdr->reg_restore_list_size);
1026 adev->gfx.rlc.reg_list_format_start =
1027 le32_to_cpu(rlc_hdr->reg_list_format_start);
1028 adev->gfx.rlc.reg_list_format_separate_start =
1029 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1030 adev->gfx.rlc.starting_offsets_start =
1031 le32_to_cpu(rlc_hdr->starting_offsets_start);
1032 adev->gfx.rlc.reg_list_format_size_bytes =
1033 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1034 adev->gfx.rlc.reg_list_size_bytes =
1035 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1036
1037 adev->gfx.rlc.register_list_format =
1038 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1039 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1040
1041 if (!adev->gfx.rlc.register_list_format) {
1042 err = -ENOMEM;
1043 goto out;
1044 }
1045
1046 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1047 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1048 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
1049 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1050
1051 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1052
1053 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1054 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1055 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
1056 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1057
1058 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1059 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
1060 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1061 if (err == -ENOENT) {
1062 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1063 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1064 }
1065 } else {
1066 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1067 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1068 }
1069 if (err)
1070 goto out;
1071 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1072 if (err)
1073 goto out;
1074 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1075 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1076 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1077
1078 if ((adev->asic_type != CHIP_STONEY) &&
1079 (adev->asic_type != CHIP_TOPAZ)) {
1080 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1081 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
1082 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1083 if (err == -ENOENT) {
1084 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1085 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1086 }
1087 } else {
1088 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1089 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1090 }
1091 if (!err) {
1092 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1093 if (err)
1094 goto out;
1095 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1096 adev->gfx.mec2_fw->data;
1097 adev->gfx.mec2_fw_version =
1098 le32_to_cpu(cp_hdr->header.ucode_version);
1099 adev->gfx.mec2_feature_version =
1100 le32_to_cpu(cp_hdr->ucode_feature_version);
1101 } else {
1102 err = 0;
1103 adev->gfx.mec2_fw = NULL;
1104 }
1105 }
1106
1107 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
1108 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1109 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1110 info->fw = adev->gfx.pfp_fw;
1111 header = (const struct common_firmware_header *)info->fw->data;
1112 adev->firmware.fw_size +=
1113 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1114
1115 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1116 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1117 info->fw = adev->gfx.me_fw;
1118 header = (const struct common_firmware_header *)info->fw->data;
1119 adev->firmware.fw_size +=
1120 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1121
1122 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1123 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1124 info->fw = adev->gfx.ce_fw;
1125 header = (const struct common_firmware_header *)info->fw->data;
1126 adev->firmware.fw_size +=
1127 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1128
1129 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1130 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1131 info->fw = adev->gfx.rlc_fw;
1132 header = (const struct common_firmware_header *)info->fw->data;
1133 adev->firmware.fw_size +=
1134 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1135
1136 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1137 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1138 info->fw = adev->gfx.mec_fw;
1139 header = (const struct common_firmware_header *)info->fw->data;
1140 adev->firmware.fw_size +=
1141 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1142
1143 /* we need account JT in */
1144 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1145 adev->firmware.fw_size +=
1146 ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
1147
1148 if (amdgpu_sriov_vf(adev)) {
1149 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
1150 info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
1151 info->fw = adev->gfx.mec_fw;
1152 adev->firmware.fw_size +=
1153 ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
1154 }
1155
1156 if (adev->gfx.mec2_fw) {
1157 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1158 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1159 info->fw = adev->gfx.mec2_fw;
1160 header = (const struct common_firmware_header *)info->fw->data;
1161 adev->firmware.fw_size +=
1162 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1163 }
1164
1165 }
1166
1167out:
1168 if (err) {
1169 dev_err(adev->dev,
1170 "gfx8: Failed to load firmware \"%s\"\n",
1171 fw_name);
1172 release_firmware(adev->gfx.pfp_fw);
1173 adev->gfx.pfp_fw = NULL;
1174 release_firmware(adev->gfx.me_fw);
1175 adev->gfx.me_fw = NULL;
1176 release_firmware(adev->gfx.ce_fw);
1177 adev->gfx.ce_fw = NULL;
1178 release_firmware(adev->gfx.rlc_fw);
1179 adev->gfx.rlc_fw = NULL;
1180 release_firmware(adev->gfx.mec_fw);
1181 adev->gfx.mec_fw = NULL;
1182 release_firmware(adev->gfx.mec2_fw);
1183 adev->gfx.mec2_fw = NULL;
1184 }
1185 return err;
1186}
1187
1188static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
1189 volatile u32 *buffer)
1190{
1191 u32 count = 0, i;
1192 const struct cs_section_def *sect = NULL;
1193 const struct cs_extent_def *ext = NULL;
1194
1195 if (adev->gfx.rlc.cs_data == NULL)
1196 return;
1197 if (buffer == NULL)
1198 return;
1199
1200 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1201 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1202
1203 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1204 buffer[count++] = cpu_to_le32(0x80000000);
1205 buffer[count++] = cpu_to_le32(0x80000000);
1206
1207 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1208 for (ext = sect->section; ext->extent != NULL; ++ext) {
1209 if (sect->id == SECT_CONTEXT) {
1210 buffer[count++] =
1211 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1212 buffer[count++] = cpu_to_le32(ext->reg_index -
1213 PACKET3_SET_CONTEXT_REG_START);
1214 for (i = 0; i < ext->reg_count; i++)
1215 buffer[count++] = cpu_to_le32(ext->extent[i]);
1216 } else {
1217 return;
1218 }
1219 }
1220 }
1221
1222 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
1223 buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
1224 PACKET3_SET_CONTEXT_REG_START);
1225 buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
1226 buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
1227
1228 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1229 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1230
1231 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1232 buffer[count++] = cpu_to_le32(0);
1233}
1234
1235static void cz_init_cp_jump_table(struct amdgpu_device *adev)
1236{
1237 const __le32 *fw_data;
1238 volatile u32 *dst_ptr;
1239 int me, i, max_me = 4;
1240 u32 bo_offset = 0;
1241 u32 table_offset, table_size;
1242
1243 if (adev->asic_type == CHIP_CARRIZO)
1244 max_me = 5;
1245
1246 /* write the cp table buffer */
1247 dst_ptr = adev->gfx.rlc.cp_table_ptr;
1248 for (me = 0; me < max_me; me++) {
1249 if (me == 0) {
1250 const struct gfx_firmware_header_v1_0 *hdr =
1251 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1252 fw_data = (const __le32 *)
1253 (adev->gfx.ce_fw->data +
1254 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1255 table_offset = le32_to_cpu(hdr->jt_offset);
1256 table_size = le32_to_cpu(hdr->jt_size);
1257 } else if (me == 1) {
1258 const struct gfx_firmware_header_v1_0 *hdr =
1259 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1260 fw_data = (const __le32 *)
1261 (adev->gfx.pfp_fw->data +
1262 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1263 table_offset = le32_to_cpu(hdr->jt_offset);
1264 table_size = le32_to_cpu(hdr->jt_size);
1265 } else if (me == 2) {
1266 const struct gfx_firmware_header_v1_0 *hdr =
1267 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1268 fw_data = (const __le32 *)
1269 (adev->gfx.me_fw->data +
1270 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1271 table_offset = le32_to_cpu(hdr->jt_offset);
1272 table_size = le32_to_cpu(hdr->jt_size);
1273 } else if (me == 3) {
1274 const struct gfx_firmware_header_v1_0 *hdr =
1275 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1276 fw_data = (const __le32 *)
1277 (adev->gfx.mec_fw->data +
1278 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1279 table_offset = le32_to_cpu(hdr->jt_offset);
1280 table_size = le32_to_cpu(hdr->jt_size);
1281 } else if (me == 4) {
1282 const struct gfx_firmware_header_v1_0 *hdr =
1283 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
1284 fw_data = (const __le32 *)
1285 (adev->gfx.mec2_fw->data +
1286 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1287 table_offset = le32_to_cpu(hdr->jt_offset);
1288 table_size = le32_to_cpu(hdr->jt_size);
1289 }
1290
1291 for (i = 0; i < table_size; i ++) {
1292 dst_ptr[bo_offset + i] =
1293 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
1294 }
1295
1296 bo_offset += table_size;
1297 }
1298}
1299
1300static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
1301{
1302 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
1303 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
1304}
1305
1306static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
1307{
1308 volatile u32 *dst_ptr;
1309 u32 dws;
1310 const struct cs_section_def *cs_data;
1311 int r;
1312
1313 adev->gfx.rlc.cs_data = vi_cs_data;
1314
1315 cs_data = adev->gfx.rlc.cs_data;
1316
1317 if (cs_data) {
1318 /* clear state block */
1319 adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
1320
1321 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
1322 AMDGPU_GEM_DOMAIN_VRAM,
1323 &adev->gfx.rlc.clear_state_obj,
1324 &adev->gfx.rlc.clear_state_gpu_addr,
1325 (void **)&adev->gfx.rlc.cs_ptr);
1326 if (r) {
1327 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
1328 gfx_v8_0_rlc_fini(adev);
1329 return r;
1330 }
1331
1332 /* set up the cs buffer */
1333 dst_ptr = adev->gfx.rlc.cs_ptr;
1334 gfx_v8_0_get_csb_buffer(adev, dst_ptr);
1335 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
1336 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1337 }
1338
1339 if ((adev->asic_type == CHIP_CARRIZO) ||
1340 (adev->asic_type == CHIP_STONEY)) {
1341 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1342 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
1343 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1344 &adev->gfx.rlc.cp_table_obj,
1345 &adev->gfx.rlc.cp_table_gpu_addr,
1346 (void **)&adev->gfx.rlc.cp_table_ptr);
1347 if (r) {
1348 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
1349 return r;
1350 }
1351
1352 cz_init_cp_jump_table(adev);
1353
1354 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
1355 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
1356 }
1357
1358 return 0;
1359}
1360
1361static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
1362{
1363 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1364}
1365
1366static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
1367{
1368 int r;
1369 u32 *hpd;
1370 size_t mec_hpd_size;
1371
1372 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1373
1374 /* take ownership of the relevant compute queues */
1375 amdgpu_gfx_compute_queue_acquire(adev);
1376
1377 mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
1378
1379 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1380 AMDGPU_GEM_DOMAIN_GTT,
1381 &adev->gfx.mec.hpd_eop_obj,
1382 &adev->gfx.mec.hpd_eop_gpu_addr,
1383 (void **)&hpd);
1384 if (r) {
1385 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1386 return r;
1387 }
1388
1389 memset(hpd, 0, mec_hpd_size);
1390
1391 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1392 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1393
1394 return 0;
1395}
1396
1397static const u32 vgpr_init_compute_shader[] =
1398{
1399 0x7e000209, 0x7e020208,
1400 0x7e040207, 0x7e060206,
1401 0x7e080205, 0x7e0a0204,
1402 0x7e0c0203, 0x7e0e0202,
1403 0x7e100201, 0x7e120200,
1404 0x7e140209, 0x7e160208,
1405 0x7e180207, 0x7e1a0206,
1406 0x7e1c0205, 0x7e1e0204,
1407 0x7e200203, 0x7e220202,
1408 0x7e240201, 0x7e260200,
1409 0x7e280209, 0x7e2a0208,
1410 0x7e2c0207, 0x7e2e0206,
1411 0x7e300205, 0x7e320204,
1412 0x7e340203, 0x7e360202,
1413 0x7e380201, 0x7e3a0200,
1414 0x7e3c0209, 0x7e3e0208,
1415 0x7e400207, 0x7e420206,
1416 0x7e440205, 0x7e460204,
1417 0x7e480203, 0x7e4a0202,
1418 0x7e4c0201, 0x7e4e0200,
1419 0x7e500209, 0x7e520208,
1420 0x7e540207, 0x7e560206,
1421 0x7e580205, 0x7e5a0204,
1422 0x7e5c0203, 0x7e5e0202,
1423 0x7e600201, 0x7e620200,
1424 0x7e640209, 0x7e660208,
1425 0x7e680207, 0x7e6a0206,
1426 0x7e6c0205, 0x7e6e0204,
1427 0x7e700203, 0x7e720202,
1428 0x7e740201, 0x7e760200,
1429 0x7e780209, 0x7e7a0208,
1430 0x7e7c0207, 0x7e7e0206,
1431 0xbf8a0000, 0xbf810000,
1432};
1433
1434static const u32 sgpr_init_compute_shader[] =
1435{
1436 0xbe8a0100, 0xbe8c0102,
1437 0xbe8e0104, 0xbe900106,
1438 0xbe920108, 0xbe940100,
1439 0xbe960102, 0xbe980104,
1440 0xbe9a0106, 0xbe9c0108,
1441 0xbe9e0100, 0xbea00102,
1442 0xbea20104, 0xbea40106,
1443 0xbea60108, 0xbea80100,
1444 0xbeaa0102, 0xbeac0104,
1445 0xbeae0106, 0xbeb00108,
1446 0xbeb20100, 0xbeb40102,
1447 0xbeb60104, 0xbeb80106,
1448 0xbeba0108, 0xbebc0100,
1449 0xbebe0102, 0xbec00104,
1450 0xbec20106, 0xbec40108,
1451 0xbec60100, 0xbec80102,
1452 0xbee60004, 0xbee70005,
1453 0xbeea0006, 0xbeeb0007,
1454 0xbee80008, 0xbee90009,
1455 0xbefc0000, 0xbf8a0000,
1456 0xbf810000, 0x00000000,
1457};
1458
1459static const u32 vgpr_init_regs[] =
1460{
1461 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1462 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1463 mmCOMPUTE_NUM_THREAD_X, 256*4,
1464 mmCOMPUTE_NUM_THREAD_Y, 1,
1465 mmCOMPUTE_NUM_THREAD_Z, 1,
1466 mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
1467 mmCOMPUTE_PGM_RSRC2, 20,
1468 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1469 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1470 mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1471 mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1472 mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1473 mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1474 mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1475 mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1476 mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1477 mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1478};
1479
1480static const u32 sgpr1_init_regs[] =
1481{
1482 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1483 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1484 mmCOMPUTE_NUM_THREAD_X, 256*5,
1485 mmCOMPUTE_NUM_THREAD_Y, 1,
1486 mmCOMPUTE_NUM_THREAD_Z, 1,
1487 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1488 mmCOMPUTE_PGM_RSRC2, 20,
1489 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1490 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1491 mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1492 mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1493 mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1494 mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1495 mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1496 mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1497 mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1498 mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1499};
1500
1501static const u32 sgpr2_init_regs[] =
1502{
1503 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0,
1504 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1505 mmCOMPUTE_NUM_THREAD_X, 256*5,
1506 mmCOMPUTE_NUM_THREAD_Y, 1,
1507 mmCOMPUTE_NUM_THREAD_Z, 1,
1508 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1509 mmCOMPUTE_PGM_RSRC2, 20,
1510 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1511 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1512 mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1513 mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1514 mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1515 mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1516 mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1517 mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1518 mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1519 mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1520};
1521
1522static const u32 sec_ded_counter_registers[] =
1523{
1524 mmCPC_EDC_ATC_CNT,
1525 mmCPC_EDC_SCRATCH_CNT,
1526 mmCPC_EDC_UCODE_CNT,
1527 mmCPF_EDC_ATC_CNT,
1528 mmCPF_EDC_ROQ_CNT,
1529 mmCPF_EDC_TAG_CNT,
1530 mmCPG_EDC_ATC_CNT,
1531 mmCPG_EDC_DMA_CNT,
1532 mmCPG_EDC_TAG_CNT,
1533 mmDC_EDC_CSINVOC_CNT,
1534 mmDC_EDC_RESTORE_CNT,
1535 mmDC_EDC_STATE_CNT,
1536 mmGDS_EDC_CNT,
1537 mmGDS_EDC_GRBM_CNT,
1538 mmGDS_EDC_OA_DED,
1539 mmSPI_EDC_CNT,
1540 mmSQC_ATC_EDC_GATCL1_CNT,
1541 mmSQC_EDC_CNT,
1542 mmSQ_EDC_DED_CNT,
1543 mmSQ_EDC_INFO,
1544 mmSQ_EDC_SEC_CNT,
1545 mmTCC_EDC_CNT,
1546 mmTCP_ATC_EDC_GATCL1_CNT,
1547 mmTCP_EDC_CNT,
1548 mmTD_EDC_CNT
1549};
1550
1551static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1552{
1553 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
1554 struct amdgpu_ib ib;
1555 struct dma_fence *f = NULL;
1556 int r, i;
1557 u32 tmp;
1558 unsigned total_size, vgpr_offset, sgpr_offset;
1559 u64 gpu_addr;
1560
1561 /* only supported on CZ */
1562 if (adev->asic_type != CHIP_CARRIZO)
1563 return 0;
1564
1565 /* bail if the compute ring is not ready */
1566 if (!ring->ready)
1567 return 0;
1568
1569 tmp = RREG32(mmGB_EDC_MODE);
1570 WREG32(mmGB_EDC_MODE, 0);
1571
1572 total_size =
1573 (((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1574 total_size +=
1575 (((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1576 total_size +=
1577 (((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1578 total_size = ALIGN(total_size, 256);
1579 vgpr_offset = total_size;
1580 total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
1581 sgpr_offset = total_size;
1582 total_size += sizeof(sgpr_init_compute_shader);
1583
1584 /* allocate an indirect buffer to put the commands in */
1585 memset(&ib, 0, sizeof(ib));
1586 r = amdgpu_ib_get(adev, NULL, total_size, &ib);
1587 if (r) {
1588 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
1589 return r;
1590 }
1591
1592 /* load the compute shaders */
1593 for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
1594 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
1595
1596 for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
1597 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
1598
1599 /* init the ib length to 0 */
1600 ib.length_dw = 0;
1601
1602 /* VGPR */
1603 /* write the register state for the compute dispatch */
1604 for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
1605 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1606 ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START;
1607 ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
1608 }
1609 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1610 gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
1611 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1612 ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1613 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1614 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1615
1616 /* write dispatch packet */
1617 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1618 ib.ptr[ib.length_dw++] = 8; /* x */
1619 ib.ptr[ib.length_dw++] = 1; /* y */
1620 ib.ptr[ib.length_dw++] = 1; /* z */
1621 ib.ptr[ib.length_dw++] =
1622 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1623
1624 /* write CS partial flush packet */
1625 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1626 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1627
1628 /* SGPR1 */
1629 /* write the register state for the compute dispatch */
1630 for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
1631 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1632 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START;
1633 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
1634 }
1635 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1636 gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1637 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1638 ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1639 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1640 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1641
1642 /* write dispatch packet */
1643 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1644 ib.ptr[ib.length_dw++] = 8; /* x */
1645 ib.ptr[ib.length_dw++] = 1; /* y */
1646 ib.ptr[ib.length_dw++] = 1; /* z */
1647 ib.ptr[ib.length_dw++] =
1648 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1649
1650 /* write CS partial flush packet */
1651 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1652 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1653
1654 /* SGPR2 */
1655 /* write the register state for the compute dispatch */
1656 for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
1657 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1658 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START;
1659 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
1660 }
1661 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1662 gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1663 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1664 ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1665 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1666 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1667
1668 /* write dispatch packet */
1669 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1670 ib.ptr[ib.length_dw++] = 8; /* x */
1671 ib.ptr[ib.length_dw++] = 1; /* y */
1672 ib.ptr[ib.length_dw++] = 1; /* z */
1673 ib.ptr[ib.length_dw++] =
1674 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1675
1676 /* write CS partial flush packet */
1677 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1678 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1679
1680 /* shedule the ib on the ring */
1681 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1682 if (r) {
1683 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
1684 goto fail;
1685 }
1686
1687 /* wait for the GPU to finish processing the IB */
1688 r = dma_fence_wait(f, false);
1689 if (r) {
1690 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
1691 goto fail;
1692 }
1693
1694 tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
1695 tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
1696 WREG32(mmGB_EDC_MODE, tmp);
1697
1698 tmp = RREG32(mmCC_GC_EDC_CONFIG);
1699 tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
1700 WREG32(mmCC_GC_EDC_CONFIG, tmp);
1701
1702
1703 /* read back registers to clear the counters */
1704 for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
1705 RREG32(sec_ded_counter_registers[i]);
1706
1707fail:
1708 amdgpu_ib_free(adev, &ib, NULL);
1709 dma_fence_put(f);
1710
1711 return r;
1712}
1713
1714static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1715{
1716 u32 gb_addr_config;
1717 u32 mc_shared_chmap, mc_arb_ramcfg;
1718 u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
1719 u32 tmp;
1720 int ret;
1721
1722 switch (adev->asic_type) {
1723 case CHIP_TOPAZ:
1724 adev->gfx.config.max_shader_engines = 1;
1725 adev->gfx.config.max_tile_pipes = 2;
1726 adev->gfx.config.max_cu_per_sh = 6;
1727 adev->gfx.config.max_sh_per_se = 1;
1728 adev->gfx.config.max_backends_per_se = 2;
1729 adev->gfx.config.max_texture_channel_caches = 2;
1730 adev->gfx.config.max_gprs = 256;
1731 adev->gfx.config.max_gs_threads = 32;
1732 adev->gfx.config.max_hw_contexts = 8;
1733
1734 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1735 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1736 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1737 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1738 gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
1739 break;
1740 case CHIP_FIJI:
1741 adev->gfx.config.max_shader_engines = 4;
1742 adev->gfx.config.max_tile_pipes = 16;
1743 adev->gfx.config.max_cu_per_sh = 16;
1744 adev->gfx.config.max_sh_per_se = 1;
1745 adev->gfx.config.max_backends_per_se = 4;
1746 adev->gfx.config.max_texture_channel_caches = 16;
1747 adev->gfx.config.max_gprs = 256;
1748 adev->gfx.config.max_gs_threads = 32;
1749 adev->gfx.config.max_hw_contexts = 8;
1750
1751 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1752 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1753 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1754 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1755 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1756 break;
1757 case CHIP_POLARIS11:
1758 case CHIP_POLARIS12:
1759 ret = amdgpu_atombios_get_gfx_info(adev);
1760 if (ret)
1761 return ret;
1762 adev->gfx.config.max_gprs = 256;
1763 adev->gfx.config.max_gs_threads = 32;
1764 adev->gfx.config.max_hw_contexts = 8;
1765
1766 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1767 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1768 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1769 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1770 gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
1771 break;
1772 case CHIP_POLARIS10:
1773 ret = amdgpu_atombios_get_gfx_info(adev);
1774 if (ret)
1775 return ret;
1776 adev->gfx.config.max_gprs = 256;
1777 adev->gfx.config.max_gs_threads = 32;
1778 adev->gfx.config.max_hw_contexts = 8;
1779
1780 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1781 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1782 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1783 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1784 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1785 break;
1786 case CHIP_TONGA:
1787 adev->gfx.config.max_shader_engines = 4;
1788 adev->gfx.config.max_tile_pipes = 8;
1789 adev->gfx.config.max_cu_per_sh = 8;
1790 adev->gfx.config.max_sh_per_se = 1;
1791 adev->gfx.config.max_backends_per_se = 2;
1792 adev->gfx.config.max_texture_channel_caches = 8;
1793 adev->gfx.config.max_gprs = 256;
1794 adev->gfx.config.max_gs_threads = 32;
1795 adev->gfx.config.max_hw_contexts = 8;
1796
1797 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1798 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1799 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1800 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1801 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1802 break;
1803 case CHIP_CARRIZO:
1804 adev->gfx.config.max_shader_engines = 1;
1805 adev->gfx.config.max_tile_pipes = 2;
1806 adev->gfx.config.max_sh_per_se = 1;
1807 adev->gfx.config.max_backends_per_se = 2;
1808 adev->gfx.config.max_cu_per_sh = 8;
1809 adev->gfx.config.max_texture_channel_caches = 2;
1810 adev->gfx.config.max_gprs = 256;
1811 adev->gfx.config.max_gs_threads = 32;
1812 adev->gfx.config.max_hw_contexts = 8;
1813
1814 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1815 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1816 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1817 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1818 gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1819 break;
1820 case CHIP_STONEY:
1821 adev->gfx.config.max_shader_engines = 1;
1822 adev->gfx.config.max_tile_pipes = 2;
1823 adev->gfx.config.max_sh_per_se = 1;
1824 adev->gfx.config.max_backends_per_se = 1;
1825 adev->gfx.config.max_cu_per_sh = 3;
1826 adev->gfx.config.max_texture_channel_caches = 2;
1827 adev->gfx.config.max_gprs = 256;
1828 adev->gfx.config.max_gs_threads = 16;
1829 adev->gfx.config.max_hw_contexts = 8;
1830
1831 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1832 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1833 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1834 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1835 gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1836 break;
1837 default:
1838 adev->gfx.config.max_shader_engines = 2;
1839 adev->gfx.config.max_tile_pipes = 4;
1840 adev->gfx.config.max_cu_per_sh = 2;
1841 adev->gfx.config.max_sh_per_se = 1;
1842 adev->gfx.config.max_backends_per_se = 2;
1843 adev->gfx.config.max_texture_channel_caches = 4;
1844 adev->gfx.config.max_gprs = 256;
1845 adev->gfx.config.max_gs_threads = 32;
1846 adev->gfx.config.max_hw_contexts = 8;
1847
1848 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1849 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1850 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1851 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1852 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1853 break;
1854 }
1855
1856 mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
1857 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
1858 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
1859
1860 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
1861 adev->gfx.config.mem_max_burst_length_bytes = 256;
1862 if (adev->flags & AMD_IS_APU) {
1863 /* Get memory bank mapping mode. */
1864 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
1865 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1866 dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1867
1868 tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
1869 dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1870 dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1871
1872 /* Validate settings in case only one DIMM installed. */
1873 if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
1874 dimm00_addr_map = 0;
1875 if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
1876 dimm01_addr_map = 0;
1877 if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
1878 dimm10_addr_map = 0;
1879 if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
1880 dimm11_addr_map = 0;
1881
1882 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
1883 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
1884 if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
1885 adev->gfx.config.mem_row_size_in_kb = 2;
1886 else
1887 adev->gfx.config.mem_row_size_in_kb = 1;
1888 } else {
1889 tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
1890 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1891 if (adev->gfx.config.mem_row_size_in_kb > 4)
1892 adev->gfx.config.mem_row_size_in_kb = 4;
1893 }
1894
1895 adev->gfx.config.shader_engine_tile_size = 32;
1896 adev->gfx.config.num_gpus = 1;
1897 adev->gfx.config.multi_gpu_tile_size = 64;
1898
1899 /* fix up row size */
1900 switch (adev->gfx.config.mem_row_size_in_kb) {
1901 case 1:
1902 default:
1903 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
1904 break;
1905 case 2:
1906 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
1907 break;
1908 case 4:
1909 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
1910 break;
1911 }
1912 adev->gfx.config.gb_addr_config = gb_addr_config;
1913
1914 return 0;
1915}
1916
1917static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1918 int mec, int pipe, int queue)
1919{
1920 int r;
1921 unsigned irq_type;
1922 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1923
1924 ring = &adev->gfx.compute_ring[ring_id];
1925
1926 /* mec0 is me1 */
1927 ring->me = mec + 1;
1928 ring->pipe = pipe;
1929 ring->queue = queue;
1930
1931 ring->ring_obj = NULL;
1932 ring->use_doorbell = true;
1933 ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
1934 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1935 + (ring_id * GFX8_MEC_HPD_SIZE);
1936 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1937
1938 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1939 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1940 + ring->pipe;
1941
1942 /* type-2 packets are deprecated on MEC, use type-3 instead */
1943 r = amdgpu_ring_init(adev, ring, 1024,
1944 &adev->gfx.eop_irq, irq_type);
1945 if (r)
1946 return r;
1947
1948
1949 return 0;
1950}
1951
1952static int gfx_v8_0_sw_init(void *handle)
1953{
1954 int i, j, k, r, ring_id;
1955 struct amdgpu_ring *ring;
1956 struct amdgpu_kiq *kiq;
1957 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1958
1959 switch (adev->asic_type) {
1960 case CHIP_FIJI:
1961 case CHIP_TONGA:
1962 case CHIP_POLARIS11:
1963 case CHIP_POLARIS12:
1964 case CHIP_POLARIS10:
1965 case CHIP_CARRIZO:
1966 adev->gfx.mec.num_mec = 2;
1967 break;
1968 case CHIP_TOPAZ:
1969 case CHIP_STONEY:
1970 default:
1971 adev->gfx.mec.num_mec = 1;
1972 break;
1973 }
1974
1975 adev->gfx.mec.num_pipe_per_mec = 4;
1976 adev->gfx.mec.num_queue_per_pipe = 8;
1977
1978 /* KIQ event */
1979 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, &adev->gfx.kiq.irq);
1980 if (r)
1981 return r;
1982
1983 /* EOP Event */
1984 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
1985 if (r)
1986 return r;
1987
1988 /* Privileged reg */
1989 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
1990 &adev->gfx.priv_reg_irq);
1991 if (r)
1992 return r;
1993
1994 /* Privileged inst */
1995 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
1996 &adev->gfx.priv_inst_irq);
1997 if (r)
1998 return r;
1999
2000 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2001
2002 gfx_v8_0_scratch_init(adev);
2003
2004 r = gfx_v8_0_init_microcode(adev);
2005 if (r) {
2006 DRM_ERROR("Failed to load gfx firmware!\n");
2007 return r;
2008 }
2009
2010 r = gfx_v8_0_rlc_init(adev);
2011 if (r) {
2012 DRM_ERROR("Failed to init rlc BOs!\n");
2013 return r;
2014 }
2015
2016 r = gfx_v8_0_mec_init(adev);
2017 if (r) {
2018 DRM_ERROR("Failed to init MEC BOs!\n");
2019 return r;
2020 }
2021
2022 /* set up the gfx ring */
2023 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2024 ring = &adev->gfx.gfx_ring[i];
2025 ring->ring_obj = NULL;
2026 sprintf(ring->name, "gfx");
2027 /* no gfx doorbells on iceland */
2028 if (adev->asic_type != CHIP_TOPAZ) {
2029 ring->use_doorbell = true;
2030 ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
2031 }
2032
2033 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2034 AMDGPU_CP_IRQ_GFX_EOP);
2035 if (r)
2036 return r;
2037 }
2038
2039
2040 /* set up the compute queues - allocate horizontally across pipes */
2041 ring_id = 0;
2042 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2043 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2044 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2045 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2046 continue;
2047
2048 r = gfx_v8_0_compute_ring_init(adev,
2049 ring_id,
2050 i, k, j);
2051 if (r)
2052 return r;
2053
2054 ring_id++;
2055 }
2056 }
2057 }
2058
2059 r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE);
2060 if (r) {
2061 DRM_ERROR("Failed to init KIQ BOs!\n");
2062 return r;
2063 }
2064
2065 kiq = &adev->gfx.kiq;
2066 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2067 if (r)
2068 return r;
2069
2070 /* create MQD for all compute queues as well as KIQ for SRIOV case */
2071 r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation));
2072 if (r)
2073 return r;
2074
2075 /* reserve GDS, GWS and OA resource for gfx */
2076 r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
2077 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
2078 &adev->gds.gds_gfx_bo, NULL, NULL);
2079 if (r)
2080 return r;
2081
2082 r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
2083 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
2084 &adev->gds.gws_gfx_bo, NULL, NULL);
2085 if (r)
2086 return r;
2087
2088 r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
2089 PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
2090 &adev->gds.oa_gfx_bo, NULL, NULL);
2091 if (r)
2092 return r;
2093
2094 adev->gfx.ce_ram_size = 0x8000;
2095
2096 r = gfx_v8_0_gpu_early_init(adev);
2097 if (r)
2098 return r;
2099
2100 return 0;
2101}
2102
2103static int gfx_v8_0_sw_fini(void *handle)
2104{
2105 int i;
2106 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2107
2108 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
2109 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
2110 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
2111
2112 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2113 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2114 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2115 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2116
2117 amdgpu_gfx_compute_mqd_sw_fini(adev);
2118 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
2119 amdgpu_gfx_kiq_fini(adev);
2120
2121 gfx_v8_0_mec_fini(adev);
2122 gfx_v8_0_rlc_fini(adev);
2123 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2124 &adev->gfx.rlc.clear_state_gpu_addr,
2125 (void **)&adev->gfx.rlc.cs_ptr);
2126 if ((adev->asic_type == CHIP_CARRIZO) ||
2127 (adev->asic_type == CHIP_STONEY)) {
2128 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2129 &adev->gfx.rlc.cp_table_gpu_addr,
2130 (void **)&adev->gfx.rlc.cp_table_ptr);
2131 }
2132 gfx_v8_0_free_microcode(adev);
2133
2134 return 0;
2135}
2136
2137static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
2138{
2139 uint32_t *modearray, *mod2array;
2140 const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2141 const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2142 u32 reg_offset;
2143
2144 modearray = adev->gfx.config.tile_mode_array;
2145 mod2array = adev->gfx.config.macrotile_mode_array;
2146
2147 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2148 modearray[reg_offset] = 0;
2149
2150 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2151 mod2array[reg_offset] = 0;
2152
2153 switch (adev->asic_type) {
2154 case CHIP_TOPAZ:
2155 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2156 PIPE_CONFIG(ADDR_SURF_P2) |
2157 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2158 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2159 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2160 PIPE_CONFIG(ADDR_SURF_P2) |
2161 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2162 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2163 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2164 PIPE_CONFIG(ADDR_SURF_P2) |
2165 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2166 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2167 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2168 PIPE_CONFIG(ADDR_SURF_P2) |
2169 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2170 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2171 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2172 PIPE_CONFIG(ADDR_SURF_P2) |
2173 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2174 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2175 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2176 PIPE_CONFIG(ADDR_SURF_P2) |
2177 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2178 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2179 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2180 PIPE_CONFIG(ADDR_SURF_P2) |
2181 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2182 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2183 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2184 PIPE_CONFIG(ADDR_SURF_P2));
2185 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2186 PIPE_CONFIG(ADDR_SURF_P2) |
2187 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2188 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2189 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2190 PIPE_CONFIG(ADDR_SURF_P2) |
2191 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2192 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2193 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2194 PIPE_CONFIG(ADDR_SURF_P2) |
2195 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2196 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2197 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2198 PIPE_CONFIG(ADDR_SURF_P2) |
2199 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2200 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2201 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2202 PIPE_CONFIG(ADDR_SURF_P2) |
2203 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2204 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2205 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2206 PIPE_CONFIG(ADDR_SURF_P2) |
2207 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2208 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2209 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2210 PIPE_CONFIG(ADDR_SURF_P2) |
2211 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2212 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2213 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2214 PIPE_CONFIG(ADDR_SURF_P2) |
2215 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2216 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2217 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2218 PIPE_CONFIG(ADDR_SURF_P2) |
2219 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2220 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2221 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2222 PIPE_CONFIG(ADDR_SURF_P2) |
2223 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2224 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2225 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2226 PIPE_CONFIG(ADDR_SURF_P2) |
2227 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2228 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2229 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2230 PIPE_CONFIG(ADDR_SURF_P2) |
2231 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2232 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2233 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2234 PIPE_CONFIG(ADDR_SURF_P2) |
2235 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2236 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2237 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2238 PIPE_CONFIG(ADDR_SURF_P2) |
2239 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2240 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2241 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2242 PIPE_CONFIG(ADDR_SURF_P2) |
2243 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2244 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2245 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2246 PIPE_CONFIG(ADDR_SURF_P2) |
2247 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2248 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2249 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2250 PIPE_CONFIG(ADDR_SURF_P2) |
2251 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2252 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2253 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2254 PIPE_CONFIG(ADDR_SURF_P2) |
2255 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2256 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2257
2258 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2259 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2260 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2261 NUM_BANKS(ADDR_SURF_8_BANK));
2262 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2263 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2264 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2265 NUM_BANKS(ADDR_SURF_8_BANK));
2266 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2267 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2268 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2269 NUM_BANKS(ADDR_SURF_8_BANK));
2270 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2271 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2272 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2273 NUM_BANKS(ADDR_SURF_8_BANK));
2274 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2275 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2276 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2277 NUM_BANKS(ADDR_SURF_8_BANK));
2278 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2279 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2280 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2281 NUM_BANKS(ADDR_SURF_8_BANK));
2282 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2283 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2284 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2285 NUM_BANKS(ADDR_SURF_8_BANK));
2286 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2287 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2288 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2289 NUM_BANKS(ADDR_SURF_16_BANK));
2290 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2291 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2292 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2293 NUM_BANKS(ADDR_SURF_16_BANK));
2294 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2295 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2296 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2297 NUM_BANKS(ADDR_SURF_16_BANK));
2298 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2299 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2300 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2301 NUM_BANKS(ADDR_SURF_16_BANK));
2302 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2303 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2304 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2305 NUM_BANKS(ADDR_SURF_16_BANK));
2306 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2307 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2308 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2309 NUM_BANKS(ADDR_SURF_16_BANK));
2310 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2311 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2312 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2313 NUM_BANKS(ADDR_SURF_8_BANK));
2314
2315 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2316 if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2317 reg_offset != 23)
2318 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2319
2320 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2321 if (reg_offset != 7)
2322 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2323
2324 break;
2325 case CHIP_FIJI:
2326 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2327 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2328 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2329 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2330 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2331 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2332 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2333 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2334 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2335 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2336 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2337 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2338 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2339 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2340 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2341 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2342 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2343 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2344 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2345 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2346 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2347 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2348 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2349 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2350 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2351 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2352 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2353 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2354 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2355 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2356 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2357 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2358 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2359 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
2360 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2361 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2362 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2363 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2364 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2365 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2366 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2367 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2368 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2369 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2370 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2371 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2372 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2373 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2374 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2375 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2376 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2377 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2378 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2379 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2380 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2381 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2382 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2383 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2384 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2385 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2386 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2387 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2388 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2389 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2390 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2391 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2392 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2393 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2394 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2395 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2396 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2397 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2398 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2399 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2400 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2401 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2402 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2403 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2404 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2405 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2406 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2407 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2408 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2409 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2410 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2411 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2412 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2413 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2414 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2415 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2416 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2417 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2418 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2419 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2420 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2421 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2422 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2423 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2424 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2425 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2426 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2427 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2428 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2429 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2430 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2431 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2432 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2433 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2434 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2435 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2436 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2437 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2438 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2439 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2440 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2441 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2442 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2443 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2444 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2445 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2446 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2447 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2448
2449 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2450 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2451 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2452 NUM_BANKS(ADDR_SURF_8_BANK));
2453 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2454 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2455 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2456 NUM_BANKS(ADDR_SURF_8_BANK));
2457 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2458 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2459 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2460 NUM_BANKS(ADDR_SURF_8_BANK));
2461 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2462 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2463 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2464 NUM_BANKS(ADDR_SURF_8_BANK));
2465 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2466 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2467 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2468 NUM_BANKS(ADDR_SURF_8_BANK));
2469 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2470 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2471 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2472 NUM_BANKS(ADDR_SURF_8_BANK));
2473 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2474 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2475 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2476 NUM_BANKS(ADDR_SURF_8_BANK));
2477 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2478 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2479 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2480 NUM_BANKS(ADDR_SURF_8_BANK));
2481 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2482 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2483 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2484 NUM_BANKS(ADDR_SURF_8_BANK));
2485 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2486 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2487 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2488 NUM_BANKS(ADDR_SURF_8_BANK));
2489 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2490 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2491 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2492 NUM_BANKS(ADDR_SURF_8_BANK));
2493 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2494 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2495 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2496 NUM_BANKS(ADDR_SURF_8_BANK));
2497 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2498 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2499 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2500 NUM_BANKS(ADDR_SURF_8_BANK));
2501 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2502 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2503 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2504 NUM_BANKS(ADDR_SURF_4_BANK));
2505
2506 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2507 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2508
2509 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2510 if (reg_offset != 7)
2511 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2512
2513 break;
2514 case CHIP_TONGA:
2515 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2516 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2517 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2518 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2519 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2520 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2521 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2522 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2523 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2524 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2525 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2526 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2527 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2528 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2529 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2530 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2531 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2532 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2533 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2534 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2535 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2536 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2537 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2538 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2539 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2540 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2541 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2542 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2543 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2544 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2545 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2546 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2547 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2548 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2549 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2550 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2551 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2552 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2553 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2554 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2555 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2556 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2557 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2558 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2559 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2560 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2561 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2562 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2563 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2564 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2565 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2566 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2567 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2568 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2569 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2570 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2571 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2572 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2573 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2574 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2575 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2576 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2577 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2578 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2579 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2580 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2581 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2582 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2583 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2584 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2585 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2586 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2587 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2588 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2589 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2590 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2591 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2592 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2593 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2594 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2595 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2596 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2597 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2598 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2599 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2600 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2601 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2602 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2603 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2604 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2605 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2606 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2607 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2608 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2609 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2610 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2611 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2612 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2613 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2614 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2615 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2616 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2617 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2618 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2619 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2620 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2621 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2622 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2623 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2624 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2625 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2626 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2627 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2628 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2629 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2630 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2631 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2632 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2633 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2634 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2635 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2636 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2637
2638 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2639 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2640 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2641 NUM_BANKS(ADDR_SURF_16_BANK));
2642 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2643 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2644 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2645 NUM_BANKS(ADDR_SURF_16_BANK));
2646 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2647 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2648 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2649 NUM_BANKS(ADDR_SURF_16_BANK));
2650 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2651 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2652 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2653 NUM_BANKS(ADDR_SURF_16_BANK));
2654 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2655 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2656 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2657 NUM_BANKS(ADDR_SURF_16_BANK));
2658 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2659 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2660 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2661 NUM_BANKS(ADDR_SURF_16_BANK));
2662 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2663 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2664 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2665 NUM_BANKS(ADDR_SURF_16_BANK));
2666 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2667 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2668 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2669 NUM_BANKS(ADDR_SURF_16_BANK));
2670 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2671 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2672 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2673 NUM_BANKS(ADDR_SURF_16_BANK));
2674 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2675 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2676 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2677 NUM_BANKS(ADDR_SURF_16_BANK));
2678 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2679 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2680 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2681 NUM_BANKS(ADDR_SURF_16_BANK));
2682 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2683 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2684 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2685 NUM_BANKS(ADDR_SURF_8_BANK));
2686 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2687 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2688 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2689 NUM_BANKS(ADDR_SURF_4_BANK));
2690 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2691 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2692 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2693 NUM_BANKS(ADDR_SURF_4_BANK));
2694
2695 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2696 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2697
2698 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2699 if (reg_offset != 7)
2700 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2701
2702 break;
2703 case CHIP_POLARIS11:
2704 case CHIP_POLARIS12:
2705 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2706 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2707 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2708 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2709 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2710 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2711 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2712 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2713 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2714 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2715 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2716 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2717 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2718 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2719 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2720 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2721 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2722 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2723 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2724 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2725 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2726 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2727 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2728 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2729 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2730 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2731 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2732 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2733 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2734 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2735 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2736 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2737 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2738 PIPE_CONFIG(ADDR_SURF_P4_16x16));
2739 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2740 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2741 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2742 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2743 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2744 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2745 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2746 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2747 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2748 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2749 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2750 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2751 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2752 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2753 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2754 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2755 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2756 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2757 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2758 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2759 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2760 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2761 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2762 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2763 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2764 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2765 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2766 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2767 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2768 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2769 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2770 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2771 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2772 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2773 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2774 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2775 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2776 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2777 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2778 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2779 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2780 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2781 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2782 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2783 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2784 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2785 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2786 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2787 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2788 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2789 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2790 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2791 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2792 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2793 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2794 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2795 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2796 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2797 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2798 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2799 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2800 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2801 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2802 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2803 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2804 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2805 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2806 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2807 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2808 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2809 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2810 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2811 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2812 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2813 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2814 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2815 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2816 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2817 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2818 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2819 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2820 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2821 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2822 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2823 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2824 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2825 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2826 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2827
2828 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2829 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2830 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2831 NUM_BANKS(ADDR_SURF_16_BANK));
2832
2833 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2834 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2835 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2836 NUM_BANKS(ADDR_SURF_16_BANK));
2837
2838 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2839 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2840 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2841 NUM_BANKS(ADDR_SURF_16_BANK));
2842
2843 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2844 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2845 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2846 NUM_BANKS(ADDR_SURF_16_BANK));
2847
2848 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2849 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2850 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2851 NUM_BANKS(ADDR_SURF_16_BANK));
2852
2853 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2854 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2855 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2856 NUM_BANKS(ADDR_SURF_16_BANK));
2857
2858 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2859 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2860 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2861 NUM_BANKS(ADDR_SURF_16_BANK));
2862
2863 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2864 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2865 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2866 NUM_BANKS(ADDR_SURF_16_BANK));
2867
2868 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2869 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2870 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2871 NUM_BANKS(ADDR_SURF_16_BANK));
2872
2873 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2874 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2875 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2876 NUM_BANKS(ADDR_SURF_16_BANK));
2877
2878 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2879 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2880 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2881 NUM_BANKS(ADDR_SURF_16_BANK));
2882
2883 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2884 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2885 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2886 NUM_BANKS(ADDR_SURF_16_BANK));
2887
2888 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2889 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2890 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2891 NUM_BANKS(ADDR_SURF_8_BANK));
2892
2893 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2894 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2895 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2896 NUM_BANKS(ADDR_SURF_4_BANK));
2897
2898 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2899 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2900
2901 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2902 if (reg_offset != 7)
2903 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2904
2905 break;
2906 case CHIP_POLARIS10:
2907 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2908 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2909 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2910 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2911 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2912 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2913 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2914 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2915 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2916 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2917 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2918 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2919 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2920 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2921 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2922 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2923 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2924 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2925 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2926 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2927 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2928 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2929 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2930 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2931 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2932 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2933 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2934 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2935 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2936 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2937 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2938 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2939 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2940 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2941 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2942 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2943 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2944 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2945 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2946 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2947 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2948 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2949 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2950 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2951 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2952 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2953 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2954 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2955 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2956 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2957 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2958 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2959 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2960 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2961 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2962 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2963 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2964 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2965 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2966 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2967 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2968 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2969 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2970 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2971 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2972 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2973 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2974 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2975 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2976 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2977 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2978 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2979 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2980 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2981 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2982 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2983 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2984 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2985 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2986 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2987 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2988 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2989 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2990 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2991 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2992 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2993 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2994 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2995 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2996 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2997 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2998 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2999 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3000 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3001 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3002 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
3003 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3004 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3005 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3006 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
3007 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3008 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3009 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3010 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
3011 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3012 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3013 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3014 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
3015 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3016 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3017 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3018 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
3019 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3020 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3021 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3022 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
3023 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3024 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3025 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3026 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
3027 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3028 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3029
3030 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3031 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3032 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3033 NUM_BANKS(ADDR_SURF_16_BANK));
3034
3035 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3036 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3037 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3038 NUM_BANKS(ADDR_SURF_16_BANK));
3039
3040 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3041 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3042 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3043 NUM_BANKS(ADDR_SURF_16_BANK));
3044
3045 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3046 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3047 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3048 NUM_BANKS(ADDR_SURF_16_BANK));
3049
3050 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3051 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3052 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3053 NUM_BANKS(ADDR_SURF_16_BANK));
3054
3055 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3056 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3057 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3058 NUM_BANKS(ADDR_SURF_16_BANK));
3059
3060 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3061 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3062 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3063 NUM_BANKS(ADDR_SURF_16_BANK));
3064
3065 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3066 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3067 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3068 NUM_BANKS(ADDR_SURF_16_BANK));
3069
3070 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3071 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3072 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3073 NUM_BANKS(ADDR_SURF_16_BANK));
3074
3075 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3076 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3077 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3078 NUM_BANKS(ADDR_SURF_16_BANK));
3079
3080 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3081 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3082 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3083 NUM_BANKS(ADDR_SURF_16_BANK));
3084
3085 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3086 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3087 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3088 NUM_BANKS(ADDR_SURF_8_BANK));
3089
3090 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3091 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3092 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3093 NUM_BANKS(ADDR_SURF_4_BANK));
3094
3095 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3096 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3097 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3098 NUM_BANKS(ADDR_SURF_4_BANK));
3099
3100 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3101 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3102
3103 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3104 if (reg_offset != 7)
3105 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3106
3107 break;
3108 case CHIP_STONEY:
3109 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3110 PIPE_CONFIG(ADDR_SURF_P2) |
3111 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3112 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3113 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3114 PIPE_CONFIG(ADDR_SURF_P2) |
3115 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3116 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3117 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3118 PIPE_CONFIG(ADDR_SURF_P2) |
3119 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3120 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3121 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3122 PIPE_CONFIG(ADDR_SURF_P2) |
3123 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3124 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3125 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3126 PIPE_CONFIG(ADDR_SURF_P2) |
3127 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3128 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3129 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3130 PIPE_CONFIG(ADDR_SURF_P2) |
3131 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3132 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3133 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3134 PIPE_CONFIG(ADDR_SURF_P2) |
3135 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3136 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3137 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3138 PIPE_CONFIG(ADDR_SURF_P2));
3139 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3140 PIPE_CONFIG(ADDR_SURF_P2) |
3141 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3142 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3143 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3144 PIPE_CONFIG(ADDR_SURF_P2) |
3145 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3146 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3147 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3148 PIPE_CONFIG(ADDR_SURF_P2) |
3149 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3150 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3151 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3152 PIPE_CONFIG(ADDR_SURF_P2) |
3153 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3154 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3155 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3156 PIPE_CONFIG(ADDR_SURF_P2) |
3157 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3158 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3159 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3160 PIPE_CONFIG(ADDR_SURF_P2) |
3161 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3162 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3163 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3164 PIPE_CONFIG(ADDR_SURF_P2) |
3165 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3166 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3167 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3168 PIPE_CONFIG(ADDR_SURF_P2) |
3169 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3170 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3171 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3172 PIPE_CONFIG(ADDR_SURF_P2) |
3173 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3174 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3175 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3176 PIPE_CONFIG(ADDR_SURF_P2) |
3177 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3178 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3179 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3180 PIPE_CONFIG(ADDR_SURF_P2) |
3181 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3182 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3183 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3184 PIPE_CONFIG(ADDR_SURF_P2) |
3185 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3186 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3187 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3188 PIPE_CONFIG(ADDR_SURF_P2) |
3189 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3190 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3191 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3192 PIPE_CONFIG(ADDR_SURF_P2) |
3193 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3194 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3195 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3196 PIPE_CONFIG(ADDR_SURF_P2) |
3197 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3198 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3199 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3200 PIPE_CONFIG(ADDR_SURF_P2) |
3201 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3202 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3203 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3204 PIPE_CONFIG(ADDR_SURF_P2) |
3205 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3206 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3207 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3208 PIPE_CONFIG(ADDR_SURF_P2) |
3209 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3210 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3211
3212 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3213 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3214 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3215 NUM_BANKS(ADDR_SURF_8_BANK));
3216 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3217 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3218 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3219 NUM_BANKS(ADDR_SURF_8_BANK));
3220 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3221 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3222 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3223 NUM_BANKS(ADDR_SURF_8_BANK));
3224 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3225 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3226 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3227 NUM_BANKS(ADDR_SURF_8_BANK));
3228 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3229 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3230 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3231 NUM_BANKS(ADDR_SURF_8_BANK));
3232 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3233 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3234 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3235 NUM_BANKS(ADDR_SURF_8_BANK));
3236 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3237 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3238 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3239 NUM_BANKS(ADDR_SURF_8_BANK));
3240 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3241 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3242 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3243 NUM_BANKS(ADDR_SURF_16_BANK));
3244 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3245 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3246 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3247 NUM_BANKS(ADDR_SURF_16_BANK));
3248 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3249 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3250 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3251 NUM_BANKS(ADDR_SURF_16_BANK));
3252 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3253 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3254 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3255 NUM_BANKS(ADDR_SURF_16_BANK));
3256 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3257 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3258 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3259 NUM_BANKS(ADDR_SURF_16_BANK));
3260 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3261 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3262 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3263 NUM_BANKS(ADDR_SURF_16_BANK));
3264 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3265 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3266 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3267 NUM_BANKS(ADDR_SURF_8_BANK));
3268
3269 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3270 if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3271 reg_offset != 23)
3272 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3273
3274 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3275 if (reg_offset != 7)
3276 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3277
3278 break;
3279 default:
3280 dev_warn(adev->dev,
3281 "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
3282 adev->asic_type);
3283
3284 case CHIP_CARRIZO:
3285 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3286 PIPE_CONFIG(ADDR_SURF_P2) |
3287 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3288 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3289 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3290 PIPE_CONFIG(ADDR_SURF_P2) |
3291 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3292 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3293 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3294 PIPE_CONFIG(ADDR_SURF_P2) |
3295 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3296 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3297 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3298 PIPE_CONFIG(ADDR_SURF_P2) |
3299 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3300 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3301 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3302 PIPE_CONFIG(ADDR_SURF_P2) |
3303 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3304 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3305 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3306 PIPE_CONFIG(ADDR_SURF_P2) |
3307 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3308 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3309 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3310 PIPE_CONFIG(ADDR_SURF_P2) |
3311 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3312 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3313 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3314 PIPE_CONFIG(ADDR_SURF_P2));
3315 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3316 PIPE_CONFIG(ADDR_SURF_P2) |
3317 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3318 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3319 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3320 PIPE_CONFIG(ADDR_SURF_P2) |
3321 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3322 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3323 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3324 PIPE_CONFIG(ADDR_SURF_P2) |
3325 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3326 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3327 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3328 PIPE_CONFIG(ADDR_SURF_P2) |
3329 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3330 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3331 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3332 PIPE_CONFIG(ADDR_SURF_P2) |
3333 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3334 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3335 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3336 PIPE_CONFIG(ADDR_SURF_P2) |
3337 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3338 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3339 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3340 PIPE_CONFIG(ADDR_SURF_P2) |
3341 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3342 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3343 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3344 PIPE_CONFIG(ADDR_SURF_P2) |
3345 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3346 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3347 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3348 PIPE_CONFIG(ADDR_SURF_P2) |
3349 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3350 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3351 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3352 PIPE_CONFIG(ADDR_SURF_P2) |
3353 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3354 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3355 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3356 PIPE_CONFIG(ADDR_SURF_P2) |
3357 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3358 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3359 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3360 PIPE_CONFIG(ADDR_SURF_P2) |
3361 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3362 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3363 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3364 PIPE_CONFIG(ADDR_SURF_P2) |
3365 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3366 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3367 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3368 PIPE_CONFIG(ADDR_SURF_P2) |
3369 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3370 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3371 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3372 PIPE_CONFIG(ADDR_SURF_P2) |
3373 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3374 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3375 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3376 PIPE_CONFIG(ADDR_SURF_P2) |
3377 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3378 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3379 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3380 PIPE_CONFIG(ADDR_SURF_P2) |
3381 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3382 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3383 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3384 PIPE_CONFIG(ADDR_SURF_P2) |
3385 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3386 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3387
3388 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3389 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3390 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3391 NUM_BANKS(ADDR_SURF_8_BANK));
3392 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3393 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3394 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3395 NUM_BANKS(ADDR_SURF_8_BANK));
3396 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3397 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3398 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3399 NUM_BANKS(ADDR_SURF_8_BANK));
3400 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3401 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3402 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3403 NUM_BANKS(ADDR_SURF_8_BANK));
3404 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3405 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3406 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3407 NUM_BANKS(ADDR_SURF_8_BANK));
3408 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3409 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3410 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3411 NUM_BANKS(ADDR_SURF_8_BANK));
3412 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3413 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3414 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3415 NUM_BANKS(ADDR_SURF_8_BANK));
3416 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3417 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3418 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3419 NUM_BANKS(ADDR_SURF_16_BANK));
3420 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3421 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3422 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3423 NUM_BANKS(ADDR_SURF_16_BANK));
3424 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3425 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3426 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3427 NUM_BANKS(ADDR_SURF_16_BANK));
3428 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3429 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3430 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3431 NUM_BANKS(ADDR_SURF_16_BANK));
3432 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3433 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3434 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3435 NUM_BANKS(ADDR_SURF_16_BANK));
3436 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3437 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3438 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3439 NUM_BANKS(ADDR_SURF_16_BANK));
3440 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3441 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3442 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3443 NUM_BANKS(ADDR_SURF_8_BANK));
3444
3445 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3446 if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3447 reg_offset != 23)
3448 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3449
3450 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3451 if (reg_offset != 7)
3452 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3453
3454 break;
3455 }
3456}
3457
3458static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
3459 u32 se_num, u32 sh_num, u32 instance)
3460{
3461 u32 data;
3462
3463 if (instance == 0xffffffff)
3464 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
3465 else
3466 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
3467
3468 if (se_num == 0xffffffff)
3469 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
3470 else
3471 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
3472
3473 if (sh_num == 0xffffffff)
3474 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
3475 else
3476 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
3477
3478 WREG32(mmGRBM_GFX_INDEX, data);
3479}
3480
3481static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
3482 u32 me, u32 pipe, u32 q)
3483{
3484 vi_srbm_select(adev, me, pipe, q, 0);
3485}
3486
3487static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
3488{
3489 u32 data, mask;
3490
3491 data = RREG32(mmCC_RB_BACKEND_DISABLE) |
3492 RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3493
3494 data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
3495
3496 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
3497 adev->gfx.config.max_sh_per_se);
3498
3499 return (~data) & mask;
3500}
3501
3502static void
3503gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
3504{
3505 switch (adev->asic_type) {
3506 case CHIP_FIJI:
3507 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
3508 RB_XSEL2(1) | PKR_MAP(2) |
3509 PKR_XSEL(1) | PKR_YSEL(1) |
3510 SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
3511 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
3512 SE_PAIR_YSEL(2);
3513 break;
3514 case CHIP_TONGA:
3515 case CHIP_POLARIS10:
3516 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3517 SE_XSEL(1) | SE_YSEL(1);
3518 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
3519 SE_PAIR_YSEL(2);
3520 break;
3521 case CHIP_TOPAZ:
3522 case CHIP_CARRIZO:
3523 *rconf |= RB_MAP_PKR0(2);
3524 *rconf1 |= 0x0;
3525 break;
3526 case CHIP_POLARIS11:
3527 case CHIP_POLARIS12:
3528 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3529 SE_XSEL(1) | SE_YSEL(1);
3530 *rconf1 |= 0x0;
3531 break;
3532 case CHIP_STONEY:
3533 *rconf |= 0x0;
3534 *rconf1 |= 0x0;
3535 break;
3536 default:
3537 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
3538 break;
3539 }
3540}
3541
3542static void
3543gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
3544 u32 raster_config, u32 raster_config_1,
3545 unsigned rb_mask, unsigned num_rb)
3546{
3547 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
3548 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
3549 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
3550 unsigned rb_per_se = num_rb / num_se;
3551 unsigned se_mask[4];
3552 unsigned se;
3553
3554 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
3555 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
3556 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
3557 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
3558
3559 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
3560 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
3561 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
3562
3563 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
3564 (!se_mask[2] && !se_mask[3]))) {
3565 raster_config_1 &= ~SE_PAIR_MAP_MASK;
3566
3567 if (!se_mask[0] && !se_mask[1]) {
3568 raster_config_1 |=
3569 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
3570 } else {
3571 raster_config_1 |=
3572 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
3573 }
3574 }
3575
3576 for (se = 0; se < num_se; se++) {
3577 unsigned raster_config_se = raster_config;
3578 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
3579 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
3580 int idx = (se / 2) * 2;
3581
3582 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
3583 raster_config_se &= ~SE_MAP_MASK;
3584
3585 if (!se_mask[idx]) {
3586 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
3587 } else {
3588 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
3589 }
3590 }
3591
3592 pkr0_mask &= rb_mask;
3593 pkr1_mask &= rb_mask;
3594 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
3595 raster_config_se &= ~PKR_MAP_MASK;
3596
3597 if (!pkr0_mask) {
3598 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
3599 } else {
3600 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
3601 }
3602 }
3603
3604 if (rb_per_se >= 2) {
3605 unsigned rb0_mask = 1 << (se * rb_per_se);
3606 unsigned rb1_mask = rb0_mask << 1;
3607
3608 rb0_mask &= rb_mask;
3609 rb1_mask &= rb_mask;
3610 if (!rb0_mask || !rb1_mask) {
3611 raster_config_se &= ~RB_MAP_PKR0_MASK;
3612
3613 if (!rb0_mask) {
3614 raster_config_se |=
3615 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
3616 } else {
3617 raster_config_se |=
3618 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
3619 }
3620 }
3621
3622 if (rb_per_se > 2) {
3623 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
3624 rb1_mask = rb0_mask << 1;
3625 rb0_mask &= rb_mask;
3626 rb1_mask &= rb_mask;
3627 if (!rb0_mask || !rb1_mask) {
3628 raster_config_se &= ~RB_MAP_PKR1_MASK;
3629
3630 if (!rb0_mask) {
3631 raster_config_se |=
3632 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
3633 } else {
3634 raster_config_se |=
3635 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
3636 }
3637 }
3638 }
3639 }
3640
3641 /* GRBM_GFX_INDEX has a different offset on VI */
3642 gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
3643 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
3644 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3645 }
3646
3647 /* GRBM_GFX_INDEX has a different offset on VI */
3648 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3649}
3650
3651static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
3652{
3653 int i, j;
3654 u32 data;
3655 u32 raster_config = 0, raster_config_1 = 0;
3656 u32 active_rbs = 0;
3657 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
3658 adev->gfx.config.max_sh_per_se;
3659 unsigned num_rb_pipes;
3660
3661 mutex_lock(&adev->grbm_idx_mutex);
3662 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3663 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3664 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3665 data = gfx_v8_0_get_rb_active_bitmap(adev);
3666 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
3667 rb_bitmap_width_per_sh);
3668 }
3669 }
3670 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3671
3672 adev->gfx.config.backend_enable_mask = active_rbs;
3673 adev->gfx.config.num_rbs = hweight32(active_rbs);
3674
3675 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
3676 adev->gfx.config.max_shader_engines, 16);
3677
3678 gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
3679
3680 if (!adev->gfx.config.backend_enable_mask ||
3681 adev->gfx.config.num_rbs >= num_rb_pipes) {
3682 WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
3683 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3684 } else {
3685 gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
3686 adev->gfx.config.backend_enable_mask,
3687 num_rb_pipes);
3688 }
3689
3690 /* cache the values for userspace */
3691 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3692 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3693 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3694 adev->gfx.config.rb_config[i][j].rb_backend_disable =
3695 RREG32(mmCC_RB_BACKEND_DISABLE);
3696 adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
3697 RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3698 adev->gfx.config.rb_config[i][j].raster_config =
3699 RREG32(mmPA_SC_RASTER_CONFIG);
3700 adev->gfx.config.rb_config[i][j].raster_config_1 =
3701 RREG32(mmPA_SC_RASTER_CONFIG_1);
3702 }
3703 }
3704 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3705 mutex_unlock(&adev->grbm_idx_mutex);
3706}
3707
3708/**
3709 * gfx_v8_0_init_compute_vmid - gart enable
3710 *
3711 * @adev: amdgpu_device pointer
3712 *
3713 * Initialize compute vmid sh_mem registers
3714 *
3715 */
3716#define DEFAULT_SH_MEM_BASES (0x6000)
3717#define FIRST_COMPUTE_VMID (8)
3718#define LAST_COMPUTE_VMID (16)
3719static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
3720{
3721 int i;
3722 uint32_t sh_mem_config;
3723 uint32_t sh_mem_bases;
3724
3725 /*
3726 * Configure apertures:
3727 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
3728 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
3729 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
3730 */
3731 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
3732
3733 sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
3734 SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
3735 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
3736 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
3737 MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
3738 SH_MEM_CONFIG__PRIVATE_ATC_MASK;
3739
3740 mutex_lock(&adev->srbm_mutex);
3741 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
3742 vi_srbm_select(adev, 0, 0, 0, i);
3743 /* CP and shaders */
3744 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
3745 WREG32(mmSH_MEM_APE1_BASE, 1);
3746 WREG32(mmSH_MEM_APE1_LIMIT, 0);
3747 WREG32(mmSH_MEM_BASES, sh_mem_bases);
3748 }
3749 vi_srbm_select(adev, 0, 0, 0, 0);
3750 mutex_unlock(&adev->srbm_mutex);
3751}
3752
3753static void gfx_v8_0_config_init(struct amdgpu_device *adev)
3754{
3755 switch (adev->asic_type) {
3756 default:
3757 adev->gfx.config.double_offchip_lds_buf = 1;
3758 break;
3759 case CHIP_CARRIZO:
3760 case CHIP_STONEY:
3761 adev->gfx.config.double_offchip_lds_buf = 0;
3762 break;
3763 }
3764}
3765
3766static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
3767{
3768 u32 tmp, sh_static_mem_cfg;
3769 int i;
3770
3771 WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
3772 WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3773 WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3774 WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
3775
3776 gfx_v8_0_tiling_mode_table_init(adev);
3777 gfx_v8_0_setup_rb(adev);
3778 gfx_v8_0_get_cu_info(adev);
3779 gfx_v8_0_config_init(adev);
3780
3781 /* XXX SH_MEM regs */
3782 /* where to put LDS, scratch, GPUVM in FSA64 space */
3783 sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
3784 SWIZZLE_ENABLE, 1);
3785 sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3786 ELEMENT_SIZE, 1);
3787 sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3788 INDEX_STRIDE, 3);
3789 WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
3790
3791 mutex_lock(&adev->srbm_mutex);
3792 for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
3793 vi_srbm_select(adev, 0, 0, 0, i);
3794 /* CP and shaders */
3795 if (i == 0) {
3796 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
3797 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3798 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3799 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3800 WREG32(mmSH_MEM_CONFIG, tmp);
3801 WREG32(mmSH_MEM_BASES, 0);
3802 } else {
3803 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
3804 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3805 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3806 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3807 WREG32(mmSH_MEM_CONFIG, tmp);
3808 tmp = adev->gmc.shared_aperture_start >> 48;
3809 WREG32(mmSH_MEM_BASES, tmp);
3810 }
3811
3812 WREG32(mmSH_MEM_APE1_BASE, 1);
3813 WREG32(mmSH_MEM_APE1_LIMIT, 0);
3814 }
3815 vi_srbm_select(adev, 0, 0, 0, 0);
3816 mutex_unlock(&adev->srbm_mutex);
3817
3818 gfx_v8_0_init_compute_vmid(adev);
3819
3820 mutex_lock(&adev->grbm_idx_mutex);
3821 /*
3822 * making sure that the following register writes will be broadcasted
3823 * to all the shaders
3824 */
3825 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3826
3827 WREG32(mmPA_SC_FIFO_SIZE,
3828 (adev->gfx.config.sc_prim_fifo_size_frontend <<
3829 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
3830 (adev->gfx.config.sc_prim_fifo_size_backend <<
3831 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
3832 (adev->gfx.config.sc_hiz_tile_fifo_size <<
3833 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
3834 (adev->gfx.config.sc_earlyz_tile_fifo_size <<
3835 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
3836
3837 tmp = RREG32(mmSPI_ARB_PRIORITY);
3838 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
3839 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
3840 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
3841 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
3842 WREG32(mmSPI_ARB_PRIORITY, tmp);
3843
3844 mutex_unlock(&adev->grbm_idx_mutex);
3845
3846}
3847
3848static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3849{
3850 u32 i, j, k;
3851 u32 mask;
3852
3853 mutex_lock(&adev->grbm_idx_mutex);
3854 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3855 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3856 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3857 for (k = 0; k < adev->usec_timeout; k++) {
3858 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3859 break;
3860 udelay(1);
3861 }
3862 if (k == adev->usec_timeout) {
3863 gfx_v8_0_select_se_sh(adev, 0xffffffff,
3864 0xffffffff, 0xffffffff);
3865 mutex_unlock(&adev->grbm_idx_mutex);
3866 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
3867 i, j);
3868 return;
3869 }
3870 }
3871 }
3872 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3873 mutex_unlock(&adev->grbm_idx_mutex);
3874
3875 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3876 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3877 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3878 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3879 for (k = 0; k < adev->usec_timeout; k++) {
3880 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3881 break;
3882 udelay(1);
3883 }
3884}
3885
3886static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3887 bool enable)
3888{
3889 u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3890
3891 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
3892 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
3893 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
3894 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
3895
3896 WREG32(mmCP_INT_CNTL_RING0, tmp);
3897}
3898
3899static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
3900{
3901 /* csib */
3902 WREG32(mmRLC_CSIB_ADDR_HI,
3903 adev->gfx.rlc.clear_state_gpu_addr >> 32);
3904 WREG32(mmRLC_CSIB_ADDR_LO,
3905 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
3906 WREG32(mmRLC_CSIB_LENGTH,
3907 adev->gfx.rlc.clear_state_size);
3908}
3909
3910static void gfx_v8_0_parse_ind_reg_list(int *register_list_format,
3911 int ind_offset,
3912 int list_size,
3913 int *unique_indices,
3914 int *indices_count,
3915 int max_indices,
3916 int *ind_start_offsets,
3917 int *offset_count,
3918 int max_offset)
3919{
3920 int indices;
3921 bool new_entry = true;
3922
3923 for (; ind_offset < list_size; ind_offset++) {
3924
3925 if (new_entry) {
3926 new_entry = false;
3927 ind_start_offsets[*offset_count] = ind_offset;
3928 *offset_count = *offset_count + 1;
3929 BUG_ON(*offset_count >= max_offset);
3930 }
3931
3932 if (register_list_format[ind_offset] == 0xFFFFFFFF) {
3933 new_entry = true;
3934 continue;
3935 }
3936
3937 ind_offset += 2;
3938
3939 /* look for the matching indice */
3940 for (indices = 0;
3941 indices < *indices_count;
3942 indices++) {
3943 if (unique_indices[indices] ==
3944 register_list_format[ind_offset])
3945 break;
3946 }
3947
3948 if (indices >= *indices_count) {
3949 unique_indices[*indices_count] =
3950 register_list_format[ind_offset];
3951 indices = *indices_count;
3952 *indices_count = *indices_count + 1;
3953 BUG_ON(*indices_count >= max_indices);
3954 }
3955
3956 register_list_format[ind_offset] = indices;
3957 }
3958}
3959
3960static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
3961{
3962 int i, temp, data;
3963 int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0};
3964 int indices_count = 0;
3965 int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3966 int offset_count = 0;
3967
3968 int list_size;
3969 unsigned int *register_list_format =
3970 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
3971 if (!register_list_format)
3972 return -ENOMEM;
3973 memcpy(register_list_format, adev->gfx.rlc.register_list_format,
3974 adev->gfx.rlc.reg_list_format_size_bytes);
3975
3976 gfx_v8_0_parse_ind_reg_list(register_list_format,
3977 RLC_FormatDirectRegListLength,
3978 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
3979 unique_indices,
3980 &indices_count,
3981 ARRAY_SIZE(unique_indices),
3982 indirect_start_offsets,
3983 &offset_count,
3984 ARRAY_SIZE(indirect_start_offsets));
3985
3986 /* save and restore list */
3987 WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
3988
3989 WREG32(mmRLC_SRM_ARAM_ADDR, 0);
3990 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
3991 WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]);
3992
3993 /* indirect list */
3994 WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start);
3995 for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
3996 WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]);
3997
3998 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
3999 list_size = list_size >> 1;
4000 WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size);
4001 WREG32(mmRLC_GPM_SCRATCH_DATA, list_size);
4002
4003 /* starting offsets starts */
4004 WREG32(mmRLC_GPM_SCRATCH_ADDR,
4005 adev->gfx.rlc.starting_offsets_start);
4006 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
4007 WREG32(mmRLC_GPM_SCRATCH_DATA,
4008 indirect_start_offsets[i]);
4009
4010 /* unique indices */
4011 temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
4012 data = mmRLC_SRM_INDEX_CNTL_DATA_0;
4013 for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
4014 if (unique_indices[i] != 0) {
4015 WREG32(temp + i, unique_indices[i] & 0x3FFFF);
4016 WREG32(data + i, unique_indices[i] >> 20);
4017 }
4018 }
4019 kfree(register_list_format);
4020
4021 return 0;
4022}
4023
4024static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
4025{
4026 WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
4027}
4028
4029static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
4030{
4031 uint32_t data;
4032
4033 WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
4034
4035 data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
4036 data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
4037 data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
4038 data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
4039 WREG32(mmRLC_PG_DELAY, data);
4040
4041 WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
4042 WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
4043
4044}
4045
4046static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
4047 bool enable)
4048{
4049 WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
4050}
4051
4052static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
4053 bool enable)
4054{
4055 WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
4056}
4057
4058static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
4059{
4060 WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
4061}
4062
4063static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
4064{
4065 if ((adev->asic_type == CHIP_CARRIZO) ||
4066 (adev->asic_type == CHIP_STONEY)) {
4067 gfx_v8_0_init_csb(adev);
4068 gfx_v8_0_init_save_restore_list(adev);
4069 gfx_v8_0_enable_save_restore_machine(adev);
4070 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
4071 gfx_v8_0_init_power_gating(adev);
4072 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
4073 } else if ((adev->asic_type == CHIP_POLARIS11) ||
4074 (adev->asic_type == CHIP_POLARIS12)) {
4075 gfx_v8_0_init_csb(adev);
4076 gfx_v8_0_init_save_restore_list(adev);
4077 gfx_v8_0_enable_save_restore_machine(adev);
4078 gfx_v8_0_init_power_gating(adev);
4079 }
4080
4081}
4082
4083static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
4084{
4085 WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
4086
4087 gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4088 gfx_v8_0_wait_for_rlc_serdes(adev);
4089}
4090
4091static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
4092{
4093 WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4094 udelay(50);
4095
4096 WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
4097 udelay(50);
4098}
4099
4100static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
4101{
4102 WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
4103
4104 /* carrizo do enable cp interrupt after cp inited */
4105 if (!(adev->flags & AMD_IS_APU))
4106 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4107
4108 udelay(50);
4109}
4110
4111static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
4112{
4113 const struct rlc_firmware_header_v2_0 *hdr;
4114 const __le32 *fw_data;
4115 unsigned i, fw_size;
4116
4117 if (!adev->gfx.rlc_fw)
4118 return -EINVAL;
4119
4120 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
4121 amdgpu_ucode_print_rlc_hdr(&hdr->header);
4122
4123 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
4124 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
4125 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
4126
4127 WREG32(mmRLC_GPM_UCODE_ADDR, 0);
4128 for (i = 0; i < fw_size; i++)
4129 WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
4130 WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
4131
4132 return 0;
4133}
4134
4135static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
4136{
4137 int r;
4138 u32 tmp;
4139
4140 gfx_v8_0_rlc_stop(adev);
4141
4142 /* disable CG */
4143 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
4144 tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
4145 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4146 WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
4147 if (adev->asic_type == CHIP_POLARIS11 ||
4148 adev->asic_type == CHIP_POLARIS10 ||
4149 adev->asic_type == CHIP_POLARIS12) {
4150 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
4151 tmp &= ~0x3;
4152 WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
4153 }
4154
4155 /* disable PG */
4156 WREG32(mmRLC_PG_CNTL, 0);
4157
4158 gfx_v8_0_rlc_reset(adev);
4159 gfx_v8_0_init_pg(adev);
4160
4161
4162 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4163 /* legacy rlc firmware loading */
4164 r = gfx_v8_0_rlc_load_microcode(adev);
4165 if (r)
4166 return r;
4167 }
4168
4169 gfx_v8_0_rlc_start(adev);
4170
4171 return 0;
4172}
4173
4174static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
4175{
4176 int i;
4177 u32 tmp = RREG32(mmCP_ME_CNTL);
4178
4179 if (enable) {
4180 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
4181 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
4182 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
4183 } else {
4184 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
4185 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
4186 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
4187 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4188 adev->gfx.gfx_ring[i].ready = false;
4189 }
4190 WREG32(mmCP_ME_CNTL, tmp);
4191 udelay(50);
4192}
4193
4194static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
4195{
4196 const struct gfx_firmware_header_v1_0 *pfp_hdr;
4197 const struct gfx_firmware_header_v1_0 *ce_hdr;
4198 const struct gfx_firmware_header_v1_0 *me_hdr;
4199 const __le32 *fw_data;
4200 unsigned i, fw_size;
4201
4202 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
4203 return -EINVAL;
4204
4205 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
4206 adev->gfx.pfp_fw->data;
4207 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
4208 adev->gfx.ce_fw->data;
4209 me_hdr = (const struct gfx_firmware_header_v1_0 *)
4210 adev->gfx.me_fw->data;
4211
4212 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
4213 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
4214 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
4215
4216 gfx_v8_0_cp_gfx_enable(adev, false);
4217
4218 /* PFP */
4219 fw_data = (const __le32 *)
4220 (adev->gfx.pfp_fw->data +
4221 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
4222 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
4223 WREG32(mmCP_PFP_UCODE_ADDR, 0);
4224 for (i = 0; i < fw_size; i++)
4225 WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
4226 WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
4227
4228 /* CE */
4229 fw_data = (const __le32 *)
4230 (adev->gfx.ce_fw->data +
4231 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
4232 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
4233 WREG32(mmCP_CE_UCODE_ADDR, 0);
4234 for (i = 0; i < fw_size; i++)
4235 WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
4236 WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
4237
4238 /* ME */
4239 fw_data = (const __le32 *)
4240 (adev->gfx.me_fw->data +
4241 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
4242 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
4243 WREG32(mmCP_ME_RAM_WADDR, 0);
4244 for (i = 0; i < fw_size; i++)
4245 WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
4246 WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
4247
4248 return 0;
4249}
4250
4251static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
4252{
4253 u32 count = 0;
4254 const struct cs_section_def *sect = NULL;
4255 const struct cs_extent_def *ext = NULL;
4256
4257 /* begin clear state */
4258 count += 2;
4259 /* context control state */
4260 count += 3;
4261
4262 for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4263 for (ext = sect->section; ext->extent != NULL; ++ext) {
4264 if (sect->id == SECT_CONTEXT)
4265 count += 2 + ext->reg_count;
4266 else
4267 return 0;
4268 }
4269 }
4270 /* pa_sc_raster_config/pa_sc_raster_config1 */
4271 count += 4;
4272 /* end clear state */
4273 count += 2;
4274 /* clear state */
4275 count += 2;
4276
4277 return count;
4278}
4279
4280static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
4281{
4282 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
4283 const struct cs_section_def *sect = NULL;
4284 const struct cs_extent_def *ext = NULL;
4285 int r, i;
4286
4287 /* init the CP */
4288 WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
4289 WREG32(mmCP_ENDIAN_SWAP, 0);
4290 WREG32(mmCP_DEVICE_ID, 1);
4291
4292 gfx_v8_0_cp_gfx_enable(adev, true);
4293
4294 r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
4295 if (r) {
4296 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
4297 return r;
4298 }
4299
4300 /* clear state buffer */
4301 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4302 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4303
4304 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4305 amdgpu_ring_write(ring, 0x80000000);
4306 amdgpu_ring_write(ring, 0x80000000);
4307
4308 for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4309 for (ext = sect->section; ext->extent != NULL; ++ext) {
4310 if (sect->id == SECT_CONTEXT) {
4311 amdgpu_ring_write(ring,
4312 PACKET3(PACKET3_SET_CONTEXT_REG,
4313 ext->reg_count));
4314 amdgpu_ring_write(ring,
4315 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4316 for (i = 0; i < ext->reg_count; i++)
4317 amdgpu_ring_write(ring, ext->extent[i]);
4318 }
4319 }
4320 }
4321
4322 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4323 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4324 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
4325 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
4326
4327 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4328 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
4329
4330 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
4331 amdgpu_ring_write(ring, 0);
4332
4333 /* init the CE partitions */
4334 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
4335 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
4336 amdgpu_ring_write(ring, 0x8000);
4337 amdgpu_ring_write(ring, 0x8000);
4338
4339 amdgpu_ring_commit(ring);
4340
4341 return 0;
4342}
4343static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu_ring *ring)
4344{
4345 u32 tmp;
4346 /* no gfx doorbells on iceland */
4347 if (adev->asic_type == CHIP_TOPAZ)
4348 return;
4349
4350 tmp = RREG32(mmCP_RB_DOORBELL_CONTROL);
4351
4352 if (ring->use_doorbell) {
4353 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4354 DOORBELL_OFFSET, ring->doorbell_index);
4355 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4356 DOORBELL_HIT, 0);
4357 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4358 DOORBELL_EN, 1);
4359 } else {
4360 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
4361 }
4362
4363 WREG32(mmCP_RB_DOORBELL_CONTROL, tmp);
4364
4365 if (adev->flags & AMD_IS_APU)
4366 return;
4367
4368 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
4369 DOORBELL_RANGE_LOWER,
4370 AMDGPU_DOORBELL_GFX_RING0);
4371 WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
4372
4373 WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
4374 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
4375}
4376
4377static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4378{
4379 struct amdgpu_ring *ring;
4380 u32 tmp;
4381 u32 rb_bufsz;
4382 u64 rb_addr, rptr_addr, wptr_gpu_addr;
4383 int r;
4384
4385 /* Set the write pointer delay */
4386 WREG32(mmCP_RB_WPTR_DELAY, 0);
4387
4388 /* set the RB to use vmid 0 */
4389 WREG32(mmCP_RB_VMID, 0);
4390
4391 /* Set ring buffer size */
4392 ring = &adev->gfx.gfx_ring[0];
4393 rb_bufsz = order_base_2(ring->ring_size / 8);
4394 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
4395 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
4396 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3);
4397 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1);
4398#ifdef __BIG_ENDIAN
4399 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
4400#endif
4401 WREG32(mmCP_RB0_CNTL, tmp);
4402
4403 /* Initialize the ring buffer's read and write pointers */
4404 WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
4405 ring->wptr = 0;
4406 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4407
4408 /* set the wb address wether it's enabled or not */
4409 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
4410 WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
4411 WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
4412
4413 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
4414 WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
4415 WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
4416 mdelay(1);
4417 WREG32(mmCP_RB0_CNTL, tmp);
4418
4419 rb_addr = ring->gpu_addr >> 8;
4420 WREG32(mmCP_RB0_BASE, rb_addr);
4421 WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
4422
4423 gfx_v8_0_set_cpg_door_bell(adev, ring);
4424 /* start the ring */
4425 amdgpu_ring_clear_ring(ring);
4426 gfx_v8_0_cp_gfx_start(adev);
4427 ring->ready = true;
4428 r = amdgpu_ring_test_ring(ring);
4429 if (r)
4430 ring->ready = false;
4431
4432 return r;
4433}
4434
4435static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
4436{
4437 int i;
4438
4439 if (enable) {
4440 WREG32(mmCP_MEC_CNTL, 0);
4441 } else {
4442 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
4443 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4444 adev->gfx.compute_ring[i].ready = false;
4445 adev->gfx.kiq.ring.ready = false;
4446 }
4447 udelay(50);
4448}
4449
4450static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
4451{
4452 const struct gfx_firmware_header_v1_0 *mec_hdr;
4453 const __le32 *fw_data;
4454 unsigned i, fw_size;
4455
4456 if (!adev->gfx.mec_fw)
4457 return -EINVAL;
4458
4459 gfx_v8_0_cp_compute_enable(adev, false);
4460
4461 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
4462 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
4463
4464 fw_data = (const __le32 *)
4465 (adev->gfx.mec_fw->data +
4466 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
4467 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
4468
4469 /* MEC1 */
4470 WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
4471 for (i = 0; i < fw_size; i++)
4472 WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data+i));
4473 WREG32(mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
4474
4475 /* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
4476 if (adev->gfx.mec2_fw) {
4477 const struct gfx_firmware_header_v1_0 *mec2_hdr;
4478
4479 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
4480 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
4481
4482 fw_data = (const __le32 *)
4483 (adev->gfx.mec2_fw->data +
4484 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
4485 fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
4486
4487 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
4488 for (i = 0; i < fw_size; i++)
4489 WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data+i));
4490 WREG32(mmCP_MEC_ME2_UCODE_ADDR, adev->gfx.mec2_fw_version);
4491 }
4492
4493 return 0;
4494}
4495
4496/* KIQ functions */
4497static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
4498{
4499 uint32_t tmp;
4500 struct amdgpu_device *adev = ring->adev;
4501
4502 /* tell RLC which is KIQ queue */
4503 tmp = RREG32(mmRLC_CP_SCHEDULERS);
4504 tmp &= 0xffffff00;
4505 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
4506 WREG32(mmRLC_CP_SCHEDULERS, tmp);
4507 tmp |= 0x80;
4508 WREG32(mmRLC_CP_SCHEDULERS, tmp);
4509}
4510
4511static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
4512{
4513 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
4514 uint32_t scratch, tmp = 0;
4515 uint64_t queue_mask = 0;
4516 int r, i;
4517
4518 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
4519 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
4520 continue;
4521
4522 /* This situation may be hit in the future if a new HW
4523 * generation exposes more than 64 queues. If so, the
4524 * definition of queue_mask needs updating */
4525 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
4526 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
4527 break;
4528 }
4529
4530 queue_mask |= (1ull << i);
4531 }
4532
4533 r = amdgpu_gfx_scratch_get(adev, &scratch);
4534 if (r) {
4535 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
4536 return r;
4537 }
4538 WREG32(scratch, 0xCAFEDEAD);
4539
4540 r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 11);
4541 if (r) {
4542 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4543 amdgpu_gfx_scratch_free(adev, scratch);
4544 return r;
4545 }
4546 /* set resources */
4547 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
4548 amdgpu_ring_write(kiq_ring, 0); /* vmid_mask:0 queue_type:0 (KIQ) */
4549 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
4550 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
4551 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
4552 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
4553 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
4554 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
4555 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4556 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4557 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
4558 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
4559
4560 /* map queues */
4561 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
4562 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
4563 amdgpu_ring_write(kiq_ring,
4564 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
4565 amdgpu_ring_write(kiq_ring,
4566 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index) |
4567 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
4568 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
4569 PACKET3_MAP_QUEUES_ME(ring->me == 1 ? 0 : 1)); /* doorbell */
4570 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
4571 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
4572 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
4573 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
4574 }
4575 /* write to scratch for completion */
4576 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
4577 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
4578 amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
4579 amdgpu_ring_commit(kiq_ring);
4580
4581 for (i = 0; i < adev->usec_timeout; i++) {
4582 tmp = RREG32(scratch);
4583 if (tmp == 0xDEADBEEF)
4584 break;
4585 DRM_UDELAY(1);
4586 }
4587 if (i >= adev->usec_timeout) {
4588 DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
4589 scratch, tmp);
4590 r = -EINVAL;
4591 }
4592 amdgpu_gfx_scratch_free(adev, scratch);
4593
4594 return r;
4595}
4596
4597static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
4598{
4599 int i, r = 0;
4600
4601 if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
4602 WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, req);
4603 for (i = 0; i < adev->usec_timeout; i++) {
4604 if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
4605 break;
4606 udelay(1);
4607 }
4608 if (i == adev->usec_timeout)
4609 r = -ETIMEDOUT;
4610 }
4611 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
4612 WREG32(mmCP_HQD_PQ_RPTR, 0);
4613 WREG32(mmCP_HQD_PQ_WPTR, 0);
4614
4615 return r;
4616}
4617
4618static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
4619{
4620 struct amdgpu_device *adev = ring->adev;
4621 struct vi_mqd *mqd = ring->mqd_ptr;
4622 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
4623 uint32_t tmp;
4624
4625 mqd->header = 0xC0310800;
4626 mqd->compute_pipelinestat_enable = 0x00000001;
4627 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4628 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4629 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4630 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4631 mqd->compute_misc_reserved = 0x00000003;
4632 mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
4633 + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4634 mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
4635 + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4636 eop_base_addr = ring->eop_gpu_addr >> 8;
4637 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
4638 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
4639
4640 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4641 tmp = RREG32(mmCP_HQD_EOP_CONTROL);
4642 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4643 (order_base_2(GFX8_MEC_HPD_SIZE / 4) - 1));
4644
4645 mqd->cp_hqd_eop_control = tmp;
4646
4647 /* enable doorbell? */
4648 tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
4649 CP_HQD_PQ_DOORBELL_CONTROL,
4650 DOORBELL_EN,
4651 ring->use_doorbell ? 1 : 0);
4652
4653 mqd->cp_hqd_pq_doorbell_control = tmp;
4654
4655 /* set the pointer to the MQD */
4656 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
4657 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
4658
4659 /* set MQD vmid to 0 */
4660 tmp = RREG32(mmCP_MQD_CONTROL);
4661 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4662 mqd->cp_mqd_control = tmp;
4663
4664 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4665 hqd_gpu_addr = ring->gpu_addr >> 8;
4666 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4667 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4668
4669 /* set up the HQD, this is similar to CP_RB0_CNTL */
4670 tmp = RREG32(mmCP_HQD_PQ_CONTROL);
4671 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4672 (order_base_2(ring->ring_size / 4) - 1));
4673 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4674 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
4675#ifdef __BIG_ENDIAN
4676 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
4677#endif
4678 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
4679 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
4680 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4681 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4682 mqd->cp_hqd_pq_control = tmp;
4683
4684 /* set the wb address whether it's enabled or not */
4685 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
4686 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4687 mqd->cp_hqd_pq_rptr_report_addr_hi =
4688 upper_32_bits(wb_gpu_addr) & 0xffff;
4689
4690 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4691 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
4692 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4693 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4694
4695 tmp = 0;
4696 /* enable the doorbell if requested */
4697 if (ring->use_doorbell) {
4698 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
4699 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4700 DOORBELL_OFFSET, ring->doorbell_index);
4701
4702 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4703 DOORBELL_EN, 1);
4704 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4705 DOORBELL_SOURCE, 0);
4706 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4707 DOORBELL_HIT, 0);
4708 }
4709
4710 mqd->cp_hqd_pq_doorbell_control = tmp;
4711
4712 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4713 ring->wptr = 0;
4714 mqd->cp_hqd_pq_wptr = ring->wptr;
4715 mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
4716
4717 /* set the vmid for the queue */
4718 mqd->cp_hqd_vmid = 0;
4719
4720 tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
4721 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
4722 mqd->cp_hqd_persistent_state = tmp;
4723
4724 /* set MTYPE */
4725 tmp = RREG32(mmCP_HQD_IB_CONTROL);
4726 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
4727 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MTYPE, 3);
4728 mqd->cp_hqd_ib_control = tmp;
4729
4730 tmp = RREG32(mmCP_HQD_IQ_TIMER);
4731 tmp = REG_SET_FIELD(tmp, CP_HQD_IQ_TIMER, MTYPE, 3);
4732 mqd->cp_hqd_iq_timer = tmp;
4733
4734 tmp = RREG32(mmCP_HQD_CTX_SAVE_CONTROL);
4735 tmp = REG_SET_FIELD(tmp, CP_HQD_CTX_SAVE_CONTROL, MTYPE, 3);
4736 mqd->cp_hqd_ctx_save_control = tmp;
4737
4738 /* defaults */
4739 mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
4740 mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
4741 mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY);
4742 mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY);
4743 mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
4744 mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
4745 mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
4746 mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
4747 mqd->cp_hqd_cntl_stack_size = RREG32(mmCP_HQD_CNTL_STACK_SIZE);
4748 mqd->cp_hqd_wg_state_offset = RREG32(mmCP_HQD_WG_STATE_OFFSET);
4749 mqd->cp_hqd_ctx_save_size = RREG32(mmCP_HQD_CTX_SAVE_SIZE);
4750 mqd->cp_hqd_eop_done_events = RREG32(mmCP_HQD_EOP_EVENTS);
4751 mqd->cp_hqd_error = RREG32(mmCP_HQD_ERROR);
4752 mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
4753 mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
4754
4755 /* activate the queue */
4756 mqd->cp_hqd_active = 1;
4757
4758 return 0;
4759}
4760
4761int gfx_v8_0_mqd_commit(struct amdgpu_device *adev,
4762 struct vi_mqd *mqd)
4763{
4764 uint32_t mqd_reg;
4765 uint32_t *mqd_data;
4766
4767 /* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_HQD_ERROR */
4768 mqd_data = &mqd->cp_mqd_base_addr_lo;
4769
4770 /* disable wptr polling */
4771 WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
4772
4773 /* program all HQD registers */
4774 for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_HQD_EOP_CONTROL; mqd_reg++)
4775 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4776
4777 /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
4778 * This is safe since EOP RPTR==WPTR for any inactive HQD
4779 * on ASICs that do not support context-save.
4780 * EOP writes/reads can start anywhere in the ring.
4781 */
4782 if (adev->asic_type != CHIP_TONGA) {
4783 WREG32(mmCP_HQD_EOP_RPTR, mqd->cp_hqd_eop_rptr);
4784 WREG32(mmCP_HQD_EOP_WPTR, mqd->cp_hqd_eop_wptr);
4785 WREG32(mmCP_HQD_EOP_WPTR_MEM, mqd->cp_hqd_eop_wptr_mem);
4786 }
4787
4788 for (mqd_reg = mmCP_HQD_EOP_EVENTS; mqd_reg <= mmCP_HQD_ERROR; mqd_reg++)
4789 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4790
4791 /* activate the HQD */
4792 for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
4793 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4794
4795 return 0;
4796}
4797
4798static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
4799{
4800 struct amdgpu_device *adev = ring->adev;
4801 struct vi_mqd *mqd = ring->mqd_ptr;
4802 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
4803
4804 gfx_v8_0_kiq_setting(ring);
4805
4806 if (adev->in_gpu_reset) { /* for GPU_RESET case */
4807 /* reset MQD to a clean status */
4808 if (adev->gfx.mec.mqd_backup[mqd_idx])
4809 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4810
4811 /* reset ring buffer */
4812 ring->wptr = 0;
4813 amdgpu_ring_clear_ring(ring);
4814 mutex_lock(&adev->srbm_mutex);
4815 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4816 gfx_v8_0_mqd_commit(adev, mqd);
4817 vi_srbm_select(adev, 0, 0, 0, 0);
4818 mutex_unlock(&adev->srbm_mutex);
4819 } else {
4820 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4821 ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4822 ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4823 mutex_lock(&adev->srbm_mutex);
4824 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4825 gfx_v8_0_mqd_init(ring);
4826 gfx_v8_0_mqd_commit(adev, mqd);
4827 vi_srbm_select(adev, 0, 0, 0, 0);
4828 mutex_unlock(&adev->srbm_mutex);
4829
4830 if (adev->gfx.mec.mqd_backup[mqd_idx])
4831 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
4832 }
4833
4834 return 0;
4835}
4836
4837static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
4838{
4839 struct amdgpu_device *adev = ring->adev;
4840 struct vi_mqd *mqd = ring->mqd_ptr;
4841 int mqd_idx = ring - &adev->gfx.compute_ring[0];
4842
4843 if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
4844 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4845 ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4846 ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4847 mutex_lock(&adev->srbm_mutex);
4848 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4849 gfx_v8_0_mqd_init(ring);
4850 vi_srbm_select(adev, 0, 0, 0, 0);
4851 mutex_unlock(&adev->srbm_mutex);
4852
4853 if (adev->gfx.mec.mqd_backup[mqd_idx])
4854 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
4855 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
4856 /* reset MQD to a clean status */
4857 if (adev->gfx.mec.mqd_backup[mqd_idx])
4858 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4859 /* reset ring buffer */
4860 ring->wptr = 0;
4861 amdgpu_ring_clear_ring(ring);
4862 } else {
4863 amdgpu_ring_clear_ring(ring);
4864 }
4865 return 0;
4866}
4867
4868static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
4869{
4870 if (adev->asic_type > CHIP_TONGA) {
4871 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2);
4872 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, AMDGPU_DOORBELL_MEC_RING7 << 2);
4873 }
4874 /* enable doorbells */
4875 WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4876}
4877
4878static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
4879{
4880 struct amdgpu_ring *ring = NULL;
4881 int r = 0, i;
4882
4883 gfx_v8_0_cp_compute_enable(adev, true);
4884
4885 ring = &adev->gfx.kiq.ring;
4886
4887 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4888 if (unlikely(r != 0))
4889 goto done;
4890
4891 r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4892 if (!r) {
4893 r = gfx_v8_0_kiq_init_queue(ring);
4894 amdgpu_bo_kunmap(ring->mqd_obj);
4895 ring->mqd_ptr = NULL;
4896 }
4897 amdgpu_bo_unreserve(ring->mqd_obj);
4898 if (r)
4899 goto done;
4900
4901 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4902 ring = &adev->gfx.compute_ring[i];
4903
4904 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4905 if (unlikely(r != 0))
4906 goto done;
4907 r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4908 if (!r) {
4909 r = gfx_v8_0_kcq_init_queue(ring);
4910 amdgpu_bo_kunmap(ring->mqd_obj);
4911 ring->mqd_ptr = NULL;
4912 }
4913 amdgpu_bo_unreserve(ring->mqd_obj);
4914 if (r)
4915 goto done;
4916 }
4917
4918 gfx_v8_0_set_mec_doorbell_range(adev);
4919
4920 r = gfx_v8_0_kiq_kcq_enable(adev);
4921 if (r)
4922 goto done;
4923
4924 /* Test KIQ */
4925 ring = &adev->gfx.kiq.ring;
4926 ring->ready = true;
4927 r = amdgpu_ring_test_ring(ring);
4928 if (r) {
4929 ring->ready = false;
4930 goto done;
4931 }
4932
4933 /* Test KCQs */
4934 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4935 ring = &adev->gfx.compute_ring[i];
4936 ring->ready = true;
4937 r = amdgpu_ring_test_ring(ring);
4938 if (r)
4939 ring->ready = false;
4940 }
4941
4942done:
4943 return r;
4944}
4945
4946static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
4947{
4948 int r;
4949
4950 if (!(adev->flags & AMD_IS_APU))
4951 gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4952
4953 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4954 /* legacy firmware loading */
4955 r = gfx_v8_0_cp_gfx_load_microcode(adev);
4956 if (r)
4957 return r;
4958
4959 r = gfx_v8_0_cp_compute_load_microcode(adev);
4960 if (r)
4961 return r;
4962 }
4963
4964 r = gfx_v8_0_cp_gfx_resume(adev);
4965 if (r)
4966 return r;
4967
4968 r = gfx_v8_0_kiq_resume(adev);
4969 if (r)
4970 return r;
4971
4972 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4973
4974 return 0;
4975}
4976
4977static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
4978{
4979 gfx_v8_0_cp_gfx_enable(adev, enable);
4980 gfx_v8_0_cp_compute_enable(adev, enable);
4981}
4982
4983static int gfx_v8_0_hw_init(void *handle)
4984{
4985 int r;
4986 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4987
4988 gfx_v8_0_init_golden_registers(adev);
4989 gfx_v8_0_gpu_init(adev);
4990
4991 r = gfx_v8_0_rlc_resume(adev);
4992 if (r)
4993 return r;
4994
4995 r = gfx_v8_0_cp_resume(adev);
4996
4997 return r;
4998}
4999
5000static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
5001{
5002 struct amdgpu_device *adev = kiq_ring->adev;
5003 uint32_t scratch, tmp = 0;
5004 int r, i;
5005
5006 r = amdgpu_gfx_scratch_get(adev, &scratch);
5007 if (r) {
5008 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
5009 return r;
5010 }
5011 WREG32(scratch, 0xCAFEDEAD);
5012
5013 r = amdgpu_ring_alloc(kiq_ring, 10);
5014 if (r) {
5015 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
5016 amdgpu_gfx_scratch_free(adev, scratch);
5017 return r;
5018 }
5019
5020 /* unmap queues */
5021 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
5022 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
5023 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
5024 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
5025 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
5026 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
5027 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
5028 amdgpu_ring_write(kiq_ring, 0);
5029 amdgpu_ring_write(kiq_ring, 0);
5030 amdgpu_ring_write(kiq_ring, 0);
5031 /* write to scratch for completion */
5032 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
5033 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
5034 amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
5035 amdgpu_ring_commit(kiq_ring);
5036
5037 for (i = 0; i < adev->usec_timeout; i++) {
5038 tmp = RREG32(scratch);
5039 if (tmp == 0xDEADBEEF)
5040 break;
5041 DRM_UDELAY(1);
5042 }
5043 if (i >= adev->usec_timeout) {
5044 DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
5045 r = -EINVAL;
5046 }
5047 amdgpu_gfx_scratch_free(adev, scratch);
5048 return r;
5049}
5050
5051static int gfx_v8_0_hw_fini(void *handle)
5052{
5053 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5054 int i;
5055
5056 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
5057 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
5058
5059 /* disable KCQ to avoid CPC touch memory not valid anymore */
5060 for (i = 0; i < adev->gfx.num_compute_rings; i++)
5061 gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
5062
5063 if (amdgpu_sriov_vf(adev)) {
5064 pr_debug("For SRIOV client, shouldn't do anything.\n");
5065 return 0;
5066 }
5067 gfx_v8_0_cp_enable(adev, false);
5068 gfx_v8_0_rlc_stop(adev);
5069
5070 amdgpu_device_ip_set_powergating_state(adev,
5071 AMD_IP_BLOCK_TYPE_GFX,
5072 AMD_PG_STATE_UNGATE);
5073
5074 return 0;
5075}
5076
5077static int gfx_v8_0_suspend(void *handle)
5078{
5079 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5080 adev->gfx.in_suspend = true;
5081 return gfx_v8_0_hw_fini(adev);
5082}
5083
5084static int gfx_v8_0_resume(void *handle)
5085{
5086 int r;
5087 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5088
5089 r = gfx_v8_0_hw_init(adev);
5090 adev->gfx.in_suspend = false;
5091 return r;
5092}
5093
5094static bool gfx_v8_0_is_idle(void *handle)
5095{
5096 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5097
5098 if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
5099 return false;
5100 else
5101 return true;
5102}
5103
5104static int gfx_v8_0_wait_for_idle(void *handle)
5105{
5106 unsigned i;
5107 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5108
5109 for (i = 0; i < adev->usec_timeout; i++) {
5110 if (gfx_v8_0_is_idle(handle))
5111 return 0;
5112
5113 udelay(1);
5114 }
5115 return -ETIMEDOUT;
5116}
5117
5118static bool gfx_v8_0_check_soft_reset(void *handle)
5119{
5120 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5121 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5122 u32 tmp;
5123
5124 /* GRBM_STATUS */
5125 tmp = RREG32(mmGRBM_STATUS);
5126 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
5127 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
5128 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
5129 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
5130 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
5131 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
5132 GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
5133 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
5134 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
5135 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
5136 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
5137 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
5138 SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
5139 }
5140
5141 /* GRBM_STATUS2 */
5142 tmp = RREG32(mmGRBM_STATUS2);
5143 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
5144 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
5145 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
5146
5147 if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
5148 REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
5149 REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
5150 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5151 SOFT_RESET_CPF, 1);
5152 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5153 SOFT_RESET_CPC, 1);
5154 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5155 SOFT_RESET_CPG, 1);
5156 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
5157 SOFT_RESET_GRBM, 1);
5158 }
5159
5160 /* SRBM_STATUS */
5161 tmp = RREG32(mmSRBM_STATUS);
5162 if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
5163 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
5164 SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
5165 if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
5166 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
5167 SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
5168
5169 if (grbm_soft_reset || srbm_soft_reset) {
5170 adev->gfx.grbm_soft_reset = grbm_soft_reset;
5171 adev->gfx.srbm_soft_reset = srbm_soft_reset;
5172 return true;
5173 } else {
5174 adev->gfx.grbm_soft_reset = 0;
5175 adev->gfx.srbm_soft_reset = 0;
5176 return false;
5177 }
5178}
5179
5180static int gfx_v8_0_pre_soft_reset(void *handle)
5181{
5182 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5183 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5184
5185 if ((!adev->gfx.grbm_soft_reset) &&
5186 (!adev->gfx.srbm_soft_reset))
5187 return 0;
5188
5189 grbm_soft_reset = adev->gfx.grbm_soft_reset;
5190 srbm_soft_reset = adev->gfx.srbm_soft_reset;
5191
5192 /* stop the rlc */
5193 gfx_v8_0_rlc_stop(adev);
5194
5195 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5196 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5197 /* Disable GFX parsing/prefetching */
5198 gfx_v8_0_cp_gfx_enable(adev, false);
5199
5200 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5201 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5202 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5203 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5204 int i;
5205
5206 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5207 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5208
5209 mutex_lock(&adev->srbm_mutex);
5210 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5211 gfx_v8_0_deactivate_hqd(adev, 2);
5212 vi_srbm_select(adev, 0, 0, 0, 0);
5213 mutex_unlock(&adev->srbm_mutex);
5214 }
5215 /* Disable MEC parsing/prefetching */
5216 gfx_v8_0_cp_compute_enable(adev, false);
5217 }
5218
5219 return 0;
5220}
5221
5222static int gfx_v8_0_soft_reset(void *handle)
5223{
5224 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5225 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5226 u32 tmp;
5227
5228 if ((!adev->gfx.grbm_soft_reset) &&
5229 (!adev->gfx.srbm_soft_reset))
5230 return 0;
5231
5232 grbm_soft_reset = adev->gfx.grbm_soft_reset;
5233 srbm_soft_reset = adev->gfx.srbm_soft_reset;
5234
5235 if (grbm_soft_reset || srbm_soft_reset) {
5236 tmp = RREG32(mmGMCON_DEBUG);
5237 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
5238 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
5239 WREG32(mmGMCON_DEBUG, tmp);
5240 udelay(50);
5241 }
5242
5243 if (grbm_soft_reset) {
5244 tmp = RREG32(mmGRBM_SOFT_RESET);
5245 tmp |= grbm_soft_reset;
5246 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
5247 WREG32(mmGRBM_SOFT_RESET, tmp);
5248 tmp = RREG32(mmGRBM_SOFT_RESET);
5249
5250 udelay(50);
5251
5252 tmp &= ~grbm_soft_reset;
5253 WREG32(mmGRBM_SOFT_RESET, tmp);
5254 tmp = RREG32(mmGRBM_SOFT_RESET);
5255 }
5256
5257 if (srbm_soft_reset) {
5258 tmp = RREG32(mmSRBM_SOFT_RESET);
5259 tmp |= srbm_soft_reset;
5260 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
5261 WREG32(mmSRBM_SOFT_RESET, tmp);
5262 tmp = RREG32(mmSRBM_SOFT_RESET);
5263
5264 udelay(50);
5265
5266 tmp &= ~srbm_soft_reset;
5267 WREG32(mmSRBM_SOFT_RESET, tmp);
5268 tmp = RREG32(mmSRBM_SOFT_RESET);
5269 }
5270
5271 if (grbm_soft_reset || srbm_soft_reset) {
5272 tmp = RREG32(mmGMCON_DEBUG);
5273 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
5274 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
5275 WREG32(mmGMCON_DEBUG, tmp);
5276 }
5277
5278 /* Wait a little for things to settle down */
5279 udelay(50);
5280
5281 return 0;
5282}
5283
5284static int gfx_v8_0_post_soft_reset(void *handle)
5285{
5286 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5287 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5288
5289 if ((!adev->gfx.grbm_soft_reset) &&
5290 (!adev->gfx.srbm_soft_reset))
5291 return 0;
5292
5293 grbm_soft_reset = adev->gfx.grbm_soft_reset;
5294 srbm_soft_reset = adev->gfx.srbm_soft_reset;
5295
5296 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5297 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5298 gfx_v8_0_cp_gfx_resume(adev);
5299
5300 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5301 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5302 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5303 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5304 int i;
5305
5306 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5307 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5308
5309 mutex_lock(&adev->srbm_mutex);
5310 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5311 gfx_v8_0_deactivate_hqd(adev, 2);
5312 vi_srbm_select(adev, 0, 0, 0, 0);
5313 mutex_unlock(&adev->srbm_mutex);
5314 }
5315 gfx_v8_0_kiq_resume(adev);
5316 }
5317 gfx_v8_0_rlc_start(adev);
5318
5319 return 0;
5320}
5321
5322/**
5323 * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
5324 *
5325 * @adev: amdgpu_device pointer
5326 *
5327 * Fetches a GPU clock counter snapshot.
5328 * Returns the 64 bit clock counter snapshot.
5329 */
5330static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
5331{
5332 uint64_t clock;
5333
5334 mutex_lock(&adev->gfx.gpu_clock_mutex);
5335 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
5336 clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
5337 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
5338 mutex_unlock(&adev->gfx.gpu_clock_mutex);
5339 return clock;
5340}
5341
5342static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
5343 uint32_t vmid,
5344 uint32_t gds_base, uint32_t gds_size,
5345 uint32_t gws_base, uint32_t gws_size,
5346 uint32_t oa_base, uint32_t oa_size)
5347{
5348 gds_base = gds_base >> AMDGPU_GDS_SHIFT;
5349 gds_size = gds_size >> AMDGPU_GDS_SHIFT;
5350
5351 gws_base = gws_base >> AMDGPU_GWS_SHIFT;
5352 gws_size = gws_size >> AMDGPU_GWS_SHIFT;
5353
5354 oa_base = oa_base >> AMDGPU_OA_SHIFT;
5355 oa_size = oa_size >> AMDGPU_OA_SHIFT;
5356
5357 /* GDS Base */
5358 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5359 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5360 WRITE_DATA_DST_SEL(0)));
5361 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
5362 amdgpu_ring_write(ring, 0);
5363 amdgpu_ring_write(ring, gds_base);
5364
5365 /* GDS Size */
5366 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5367 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5368 WRITE_DATA_DST_SEL(0)));
5369 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
5370 amdgpu_ring_write(ring, 0);
5371 amdgpu_ring_write(ring, gds_size);
5372
5373 /* GWS */
5374 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5375 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5376 WRITE_DATA_DST_SEL(0)));
5377 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
5378 amdgpu_ring_write(ring, 0);
5379 amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
5380
5381 /* OA */
5382 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5383 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5384 WRITE_DATA_DST_SEL(0)));
5385 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
5386 amdgpu_ring_write(ring, 0);
5387 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
5388}
5389
5390static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
5391{
5392 WREG32(mmSQ_IND_INDEX,
5393 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5394 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5395 (address << SQ_IND_INDEX__INDEX__SHIFT) |
5396 (SQ_IND_INDEX__FORCE_READ_MASK));
5397 return RREG32(mmSQ_IND_DATA);
5398}
5399
5400static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
5401 uint32_t wave, uint32_t thread,
5402 uint32_t regno, uint32_t num, uint32_t *out)
5403{
5404 WREG32(mmSQ_IND_INDEX,
5405 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5406 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5407 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
5408 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
5409 (SQ_IND_INDEX__FORCE_READ_MASK) |
5410 (SQ_IND_INDEX__AUTO_INCR_MASK));
5411 while (num--)
5412 *(out++) = RREG32(mmSQ_IND_DATA);
5413}
5414
5415static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
5416{
5417 /* type 0 wave data */
5418 dst[(*no_fields)++] = 0;
5419 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
5420 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
5421 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
5422 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
5423 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
5424 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
5425 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
5426 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
5427 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
5428 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
5429 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
5430 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
5431 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
5432 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
5433 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
5434 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
5435 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
5436 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
5437}
5438
5439static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
5440 uint32_t wave, uint32_t start,
5441 uint32_t size, uint32_t *dst)
5442{
5443 wave_read_regs(
5444 adev, simd, wave, 0,
5445 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
5446}
5447
5448
5449static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
5450 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
5451 .select_se_sh = &gfx_v8_0_select_se_sh,
5452 .read_wave_data = &gfx_v8_0_read_wave_data,
5453 .read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
5454 .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
5455};
5456
5457static int gfx_v8_0_early_init(void *handle)
5458{
5459 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5460
5461 adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
5462 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
5463 adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
5464 gfx_v8_0_set_ring_funcs(adev);
5465 gfx_v8_0_set_irq_funcs(adev);
5466 gfx_v8_0_set_gds_init(adev);
5467 gfx_v8_0_set_rlc_funcs(adev);
5468
5469 return 0;
5470}
5471
5472static int gfx_v8_0_late_init(void *handle)
5473{
5474 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5475 int r;
5476
5477 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
5478 if (r)
5479 return r;
5480
5481 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
5482 if (r)
5483 return r;
5484
5485 /* requires IBs so do in late init after IB pool is initialized */
5486 r = gfx_v8_0_do_edc_gpr_workarounds(adev);
5487 if (r)
5488 return r;
5489
5490 amdgpu_device_ip_set_powergating_state(adev,
5491 AMD_IP_BLOCK_TYPE_GFX,
5492 AMD_PG_STATE_GATE);
5493
5494 return 0;
5495}
5496
5497static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
5498 bool enable)
5499{
5500 if ((adev->asic_type == CHIP_POLARIS11) ||
5501 (adev->asic_type == CHIP_POLARIS12))
5502 /* Send msg to SMU via Powerplay */
5503 amdgpu_device_ip_set_powergating_state(adev,
5504 AMD_IP_BLOCK_TYPE_SMC,
5505 enable ?
5506 AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
5507
5508 WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
5509}
5510
5511static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
5512 bool enable)
5513{
5514 WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
5515}
5516
5517static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
5518 bool enable)
5519{
5520 WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
5521}
5522
5523static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
5524 bool enable)
5525{
5526 WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
5527}
5528
5529static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
5530 bool enable)
5531{
5532 WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
5533
5534 /* Read any GFX register to wake up GFX. */
5535 if (!enable)
5536 RREG32(mmDB_RENDER_CONTROL);
5537}
5538
5539static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
5540 bool enable)
5541{
5542 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
5543 cz_enable_gfx_cg_power_gating(adev, true);
5544 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
5545 cz_enable_gfx_pipeline_power_gating(adev, true);
5546 } else {
5547 cz_enable_gfx_cg_power_gating(adev, false);
5548 cz_enable_gfx_pipeline_power_gating(adev, false);
5549 }
5550}
5551
5552static int gfx_v8_0_set_powergating_state(void *handle,
5553 enum amd_powergating_state state)
5554{
5555 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5556 bool enable = (state == AMD_PG_STATE_GATE);
5557
5558 if (amdgpu_sriov_vf(adev))
5559 return 0;
5560
5561 switch (adev->asic_type) {
5562 case CHIP_CARRIZO:
5563 case CHIP_STONEY:
5564
5565 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5566 cz_enable_sck_slow_down_on_power_up(adev, true);
5567 cz_enable_sck_slow_down_on_power_down(adev, true);
5568 } else {
5569 cz_enable_sck_slow_down_on_power_up(adev, false);
5570 cz_enable_sck_slow_down_on_power_down(adev, false);
5571 }
5572 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5573 cz_enable_cp_power_gating(adev, true);
5574 else
5575 cz_enable_cp_power_gating(adev, false);
5576
5577 cz_update_gfx_cg_power_gating(adev, enable);
5578
5579 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5580 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5581 else
5582 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5583
5584 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5585 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5586 else
5587 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5588 break;
5589 case CHIP_POLARIS11:
5590 case CHIP_POLARIS12:
5591 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5592 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5593 else
5594 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5595
5596 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5597 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5598 else
5599 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5600
5601 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
5602 polaris11_enable_gfx_quick_mg_power_gating(adev, true);
5603 else
5604 polaris11_enable_gfx_quick_mg_power_gating(adev, false);
5605 break;
5606 default:
5607 break;
5608 }
5609
5610 return 0;
5611}
5612
5613static void gfx_v8_0_get_clockgating_state(void *handle, u32 *flags)
5614{
5615 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5616 int data;
5617
5618 if (amdgpu_sriov_vf(adev))
5619 *flags = 0;
5620
5621 /* AMD_CG_SUPPORT_GFX_MGCG */
5622 data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5623 if (!(data & RLC_CGTT_MGCG_OVERRIDE__CPF_MASK))
5624 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5625
5626 /* AMD_CG_SUPPORT_GFX_CGLG */
5627 data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5628 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5629 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5630
5631 /* AMD_CG_SUPPORT_GFX_CGLS */
5632 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5633 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5634
5635 /* AMD_CG_SUPPORT_GFX_CGTS */
5636 data = RREG32(mmCGTS_SM_CTRL_REG);
5637 if (!(data & CGTS_SM_CTRL_REG__OVERRIDE_MASK))
5638 *flags |= AMD_CG_SUPPORT_GFX_CGTS;
5639
5640 /* AMD_CG_SUPPORT_GFX_CGTS_LS */
5641 if (!(data & CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK))
5642 *flags |= AMD_CG_SUPPORT_GFX_CGTS_LS;
5643
5644 /* AMD_CG_SUPPORT_GFX_RLC_LS */
5645 data = RREG32(mmRLC_MEM_SLP_CNTL);
5646 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5647 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5648
5649 /* AMD_CG_SUPPORT_GFX_CP_LS */
5650 data = RREG32(mmCP_MEM_SLP_CNTL);
5651 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5652 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5653}
5654
5655static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
5656 uint32_t reg_addr, uint32_t cmd)
5657{
5658 uint32_t data;
5659
5660 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5661
5662 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5663 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5664
5665 data = RREG32(mmRLC_SERDES_WR_CTRL);
5666 if (adev->asic_type == CHIP_STONEY)
5667 data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5668 RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5669 RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5670 RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5671 RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5672 RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5673 RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5674 RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5675 RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5676 else
5677 data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5678 RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5679 RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5680 RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5681 RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5682 RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5683 RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5684 RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5685 RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
5686 RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
5687 RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5688 data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
5689 (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
5690 (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
5691 (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
5692
5693 WREG32(mmRLC_SERDES_WR_CTRL, data);
5694}
5695
5696#define MSG_ENTER_RLC_SAFE_MODE 1
5697#define MSG_EXIT_RLC_SAFE_MODE 0
5698#define RLC_GPR_REG2__REQ_MASK 0x00000001
5699#define RLC_GPR_REG2__REQ__SHIFT 0
5700#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
5701#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
5702
5703static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
5704{
5705 u32 data;
5706 unsigned i;
5707
5708 data = RREG32(mmRLC_CNTL);
5709 if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
5710 return;
5711
5712 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
5713 data |= RLC_SAFE_MODE__CMD_MASK;
5714 data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5715 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
5716 WREG32(mmRLC_SAFE_MODE, data);
5717
5718 for (i = 0; i < adev->usec_timeout; i++) {
5719 if ((RREG32(mmRLC_GPM_STAT) &
5720 (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5721 RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
5722 (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5723 RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
5724 break;
5725 udelay(1);
5726 }
5727
5728 for (i = 0; i < adev->usec_timeout; i++) {
5729 if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5730 break;
5731 udelay(1);
5732 }
5733 adev->gfx.rlc.in_safe_mode = true;
5734 }
5735}
5736
5737static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
5738{
5739 u32 data = 0;
5740 unsigned i;
5741
5742 data = RREG32(mmRLC_CNTL);
5743 if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
5744 return;
5745
5746 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
5747 if (adev->gfx.rlc.in_safe_mode) {
5748 data |= RLC_SAFE_MODE__CMD_MASK;
5749 data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5750 WREG32(mmRLC_SAFE_MODE, data);
5751 adev->gfx.rlc.in_safe_mode = false;
5752 }
5753 }
5754
5755 for (i = 0; i < adev->usec_timeout; i++) {
5756 if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5757 break;
5758 udelay(1);
5759 }
5760}
5761
5762static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
5763 .enter_safe_mode = iceland_enter_rlc_safe_mode,
5764 .exit_safe_mode = iceland_exit_rlc_safe_mode
5765};
5766
5767static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
5768 bool enable)
5769{
5770 uint32_t temp, data;
5771
5772 adev->gfx.rlc.funcs->enter_safe_mode(adev);
5773
5774 /* It is disabled by HW by default */
5775 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
5776 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5777 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
5778 /* 1 - RLC memory Light sleep */
5779 WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
5780
5781 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
5782 WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
5783 }
5784
5785 /* 3 - RLC_CGTT_MGCG_OVERRIDE */
5786 temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5787 if (adev->flags & AMD_IS_APU)
5788 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5789 RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5790 RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK);
5791 else
5792 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5793 RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5794 RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5795 RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5796
5797 if (temp != data)
5798 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5799
5800 /* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5801 gfx_v8_0_wait_for_rlc_serdes(adev);
5802
5803 /* 5 - clear mgcg override */
5804 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5805
5806 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
5807 /* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
5808 temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5809 data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
5810 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
5811 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
5812 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
5813 if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
5814 (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
5815 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
5816 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
5817 data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
5818 if (temp != data)
5819 WREG32(mmCGTS_SM_CTRL_REG, data);
5820 }
5821 udelay(50);
5822
5823 /* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5824 gfx_v8_0_wait_for_rlc_serdes(adev);
5825 } else {
5826 /* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */
5827 temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5828 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5829 RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5830 RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5831 RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5832 if (temp != data)
5833 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5834
5835 /* 2 - disable MGLS in RLC */
5836 data = RREG32(mmRLC_MEM_SLP_CNTL);
5837 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
5838 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
5839 WREG32(mmRLC_MEM_SLP_CNTL, data);
5840 }
5841
5842 /* 3 - disable MGLS in CP */
5843 data = RREG32(mmCP_MEM_SLP_CNTL);
5844 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
5845 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
5846 WREG32(mmCP_MEM_SLP_CNTL, data);
5847 }
5848
5849 /* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */
5850 temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5851 data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK |
5852 CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK);
5853 if (temp != data)
5854 WREG32(mmCGTS_SM_CTRL_REG, data);
5855
5856 /* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5857 gfx_v8_0_wait_for_rlc_serdes(adev);
5858
5859 /* 6 - set mgcg override */
5860 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5861
5862 udelay(50);
5863
5864 /* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5865 gfx_v8_0_wait_for_rlc_serdes(adev);
5866 }
5867
5868 adev->gfx.rlc.funcs->exit_safe_mode(adev);
5869}
5870
5871static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5872 bool enable)
5873{
5874 uint32_t temp, temp1, data, data1;
5875
5876 temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5877
5878 adev->gfx.rlc.funcs->enter_safe_mode(adev);
5879
5880 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5881 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5882 data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
5883 if (temp1 != data1)
5884 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5885
5886 /* : wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5887 gfx_v8_0_wait_for_rlc_serdes(adev);
5888
5889 /* 2 - clear cgcg override */
5890 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5891
5892 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5893 gfx_v8_0_wait_for_rlc_serdes(adev);
5894
5895 /* 3 - write cmd to set CGLS */
5896 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
5897
5898 /* 4 - enable cgcg */
5899 data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5900
5901 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5902 /* enable cgls*/
5903 data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5904
5905 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5906 data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
5907
5908 if (temp1 != data1)
5909 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5910 } else {
5911 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5912 }
5913
5914 if (temp != data)
5915 WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5916
5917 /* 5 enable cntx_empty_int_enable/cntx_busy_int_enable/
5918 * Cmp_busy/GFX_Idle interrupts
5919 */
5920 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5921 } else {
5922 /* disable cntx_empty_int_enable & GFX Idle interrupt */
5923 gfx_v8_0_enable_gui_idle_interrupt(adev, false);
5924
5925 /* TEST CGCG */
5926 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5927 data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK |
5928 RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK);
5929 if (temp1 != data1)
5930 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5931
5932 /* read gfx register to wake up cgcg */
5933 RREG32(mmCB_CGTT_SCLK_CTRL);
5934 RREG32(mmCB_CGTT_SCLK_CTRL);
5935 RREG32(mmCB_CGTT_SCLK_CTRL);
5936 RREG32(mmCB_CGTT_SCLK_CTRL);
5937
5938 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5939 gfx_v8_0_wait_for_rlc_serdes(adev);
5940
5941 /* write cmd to Set CGCG Overrride */
5942 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5943
5944 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5945 gfx_v8_0_wait_for_rlc_serdes(adev);
5946
5947 /* write cmd to Clear CGLS */
5948 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
5949
5950 /* disable cgcg, cgls should be disabled too. */
5951 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
5952 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5953 if (temp != data)
5954 WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5955 /* enable interrupts again for PG */
5956 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5957 }
5958
5959 gfx_v8_0_wait_for_rlc_serdes(adev);
5960
5961 adev->gfx.rlc.funcs->exit_safe_mode(adev);
5962}
5963static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5964 bool enable)
5965{
5966 if (enable) {
5967 /* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
5968 * === MGCG + MGLS + TS(CG/LS) ===
5969 */
5970 gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5971 gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5972 } else {
5973 /* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
5974 * === CGCG + CGLS ===
5975 */
5976 gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5977 gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5978 }
5979 return 0;
5980}
5981
5982static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
5983 enum amd_clockgating_state state)
5984{
5985 uint32_t msg_id, pp_state = 0;
5986 uint32_t pp_support_state = 0;
5987
5988 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5989 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5990 pp_support_state = PP_STATE_SUPPORT_LS;
5991 pp_state = PP_STATE_LS;
5992 }
5993 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5994 pp_support_state |= PP_STATE_SUPPORT_CG;
5995 pp_state |= PP_STATE_CG;
5996 }
5997 if (state == AMD_CG_STATE_UNGATE)
5998 pp_state = 0;
5999
6000 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6001 PP_BLOCK_GFX_CG,
6002 pp_support_state,
6003 pp_state);
6004 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
6005 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
6006 }
6007
6008 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
6009 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
6010 pp_support_state = PP_STATE_SUPPORT_LS;
6011 pp_state = PP_STATE_LS;
6012 }
6013
6014 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
6015 pp_support_state |= PP_STATE_SUPPORT_CG;
6016 pp_state |= PP_STATE_CG;
6017 }
6018
6019 if (state == AMD_CG_STATE_UNGATE)
6020 pp_state = 0;
6021
6022 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6023 PP_BLOCK_GFX_MG,
6024 pp_support_state,
6025 pp_state);
6026 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
6027 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
6028 }
6029
6030 return 0;
6031}
6032
6033static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
6034 enum amd_clockgating_state state)
6035{
6036
6037 uint32_t msg_id, pp_state = 0;
6038 uint32_t pp_support_state = 0;
6039
6040 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
6041 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
6042 pp_support_state = PP_STATE_SUPPORT_LS;
6043 pp_state = PP_STATE_LS;
6044 }
6045 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
6046 pp_support_state |= PP_STATE_SUPPORT_CG;
6047 pp_state |= PP_STATE_CG;
6048 }
6049 if (state == AMD_CG_STATE_UNGATE)
6050 pp_state = 0;
6051
6052 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6053 PP_BLOCK_GFX_CG,
6054 pp_support_state,
6055 pp_state);
6056 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
6057 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
6058 }
6059
6060 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
6061 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
6062 pp_support_state = PP_STATE_SUPPORT_LS;
6063 pp_state = PP_STATE_LS;
6064 }
6065 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
6066 pp_support_state |= PP_STATE_SUPPORT_CG;
6067 pp_state |= PP_STATE_CG;
6068 }
6069 if (state == AMD_CG_STATE_UNGATE)
6070 pp_state = 0;
6071
6072 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6073 PP_BLOCK_GFX_3D,
6074 pp_support_state,
6075 pp_state);
6076 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
6077 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
6078 }
6079
6080 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
6081 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
6082 pp_support_state = PP_STATE_SUPPORT_LS;
6083 pp_state = PP_STATE_LS;
6084 }
6085
6086 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
6087 pp_support_state |= PP_STATE_SUPPORT_CG;
6088 pp_state |= PP_STATE_CG;
6089 }
6090
6091 if (state == AMD_CG_STATE_UNGATE)
6092 pp_state = 0;
6093
6094 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6095 PP_BLOCK_GFX_MG,
6096 pp_support_state,
6097 pp_state);
6098 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
6099 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
6100 }
6101
6102 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
6103 pp_support_state = PP_STATE_SUPPORT_LS;
6104
6105 if (state == AMD_CG_STATE_UNGATE)
6106 pp_state = 0;
6107 else
6108 pp_state = PP_STATE_LS;
6109
6110 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6111 PP_BLOCK_GFX_RLC,
6112 pp_support_state,
6113 pp_state);
6114 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
6115 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
6116 }
6117
6118 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
6119 pp_support_state = PP_STATE_SUPPORT_LS;
6120
6121 if (state == AMD_CG_STATE_UNGATE)
6122 pp_state = 0;
6123 else
6124 pp_state = PP_STATE_LS;
6125 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6126 PP_BLOCK_GFX_CP,
6127 pp_support_state,
6128 pp_state);
6129 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
6130 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
6131 }
6132
6133 return 0;
6134}
6135
6136static int gfx_v8_0_set_clockgating_state(void *handle,
6137 enum amd_clockgating_state state)
6138{
6139 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6140
6141 if (amdgpu_sriov_vf(adev))
6142 return 0;
6143
6144 switch (adev->asic_type) {
6145 case CHIP_FIJI:
6146 case CHIP_CARRIZO:
6147 case CHIP_STONEY:
6148 gfx_v8_0_update_gfx_clock_gating(adev,
6149 state == AMD_CG_STATE_GATE);
6150 break;
6151 case CHIP_TONGA:
6152 gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
6153 break;
6154 case CHIP_POLARIS10:
6155 case CHIP_POLARIS11:
6156 case CHIP_POLARIS12:
6157 gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
6158 break;
6159 default:
6160 break;
6161 }
6162 return 0;
6163}
6164
6165static u64 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
6166{
6167 return ring->adev->wb.wb[ring->rptr_offs];
6168}
6169
6170static u64 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
6171{
6172 struct amdgpu_device *adev = ring->adev;
6173
6174 if (ring->use_doorbell)
6175 /* XXX check if swapping is necessary on BE */
6176 return ring->adev->wb.wb[ring->wptr_offs];
6177 else
6178 return RREG32(mmCP_RB0_WPTR);
6179}
6180
6181static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
6182{
6183 struct amdgpu_device *adev = ring->adev;
6184
6185 if (ring->use_doorbell) {
6186 /* XXX check if swapping is necessary on BE */
6187 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
6188 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6189 } else {
6190 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
6191 (void)RREG32(mmCP_RB0_WPTR);
6192 }
6193}
6194
6195static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
6196{
6197 u32 ref_and_mask, reg_mem_engine;
6198
6199 if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
6200 (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
6201 switch (ring->me) {
6202 case 1:
6203 ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
6204 break;
6205 case 2:
6206 ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
6207 break;
6208 default:
6209 return;
6210 }
6211 reg_mem_engine = 0;
6212 } else {
6213 ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
6214 reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
6215 }
6216
6217 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6218 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
6219 WAIT_REG_MEM_FUNCTION(3) | /* == */
6220 reg_mem_engine));
6221 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
6222 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
6223 amdgpu_ring_write(ring, ref_and_mask);
6224 amdgpu_ring_write(ring, ref_and_mask);
6225 amdgpu_ring_write(ring, 0x20); /* poll interval */
6226}
6227
6228static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
6229{
6230 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6231 amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
6232 EVENT_INDEX(4));
6233
6234 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6235 amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
6236 EVENT_INDEX(0));
6237}
6238
6239static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
6240 struct amdgpu_ib *ib,
6241 unsigned vmid, bool ctx_switch)
6242{
6243 u32 header, control = 0;
6244
6245 if (ib->flags & AMDGPU_IB_FLAG_CE)
6246 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
6247 else
6248 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
6249
6250 control |= ib->length_dw | (vmid << 24);
6251
6252 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
6253 control |= INDIRECT_BUFFER_PRE_ENB(1);
6254
6255 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
6256 gfx_v8_0_ring_emit_de_meta(ring);
6257 }
6258
6259 amdgpu_ring_write(ring, header);
6260 amdgpu_ring_write(ring,
6261#ifdef __BIG_ENDIAN
6262 (2 << 0) |
6263#endif
6264 (ib->gpu_addr & 0xFFFFFFFC));
6265 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6266 amdgpu_ring_write(ring, control);
6267}
6268
6269static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
6270 struct amdgpu_ib *ib,
6271 unsigned vmid, bool ctx_switch)
6272{
6273 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
6274
6275 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
6276 amdgpu_ring_write(ring,
6277#ifdef __BIG_ENDIAN
6278 (2 << 0) |
6279#endif
6280 (ib->gpu_addr & 0xFFFFFFFC));
6281 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6282 amdgpu_ring_write(ring, control);
6283}
6284
6285static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
6286 u64 seq, unsigned flags)
6287{
6288 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6289 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6290
6291 /* EVENT_WRITE_EOP - flush caches, send int */
6292 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6293 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6294 EOP_TC_ACTION_EN |
6295 EOP_TC_WB_ACTION_EN |
6296 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6297 EVENT_INDEX(5)));
6298 amdgpu_ring_write(ring, addr & 0xfffffffc);
6299 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6300 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6301 amdgpu_ring_write(ring, lower_32_bits(seq));
6302 amdgpu_ring_write(ring, upper_32_bits(seq));
6303
6304}
6305
6306static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
6307{
6308 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6309 uint32_t seq = ring->fence_drv.sync_seq;
6310 uint64_t addr = ring->fence_drv.gpu_addr;
6311
6312 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6313 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
6314 WAIT_REG_MEM_FUNCTION(3) | /* equal */
6315 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
6316 amdgpu_ring_write(ring, addr & 0xfffffffc);
6317 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
6318 amdgpu_ring_write(ring, seq);
6319 amdgpu_ring_write(ring, 0xffffffff);
6320 amdgpu_ring_write(ring, 4); /* poll interval */
6321}
6322
6323static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
6324 unsigned vmid, uint64_t pd_addr)
6325{
6326 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6327
6328 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
6329
6330 /* wait for the invalidate to complete */
6331 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6332 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
6333 WAIT_REG_MEM_FUNCTION(0) | /* always */
6334 WAIT_REG_MEM_ENGINE(0))); /* me */
6335 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
6336 amdgpu_ring_write(ring, 0);
6337 amdgpu_ring_write(ring, 0); /* ref */
6338 amdgpu_ring_write(ring, 0); /* mask */
6339 amdgpu_ring_write(ring, 0x20); /* poll interval */
6340
6341 /* compute doesn't have PFP */
6342 if (usepfp) {
6343 /* sync PFP to ME, otherwise we might get invalid PFP reads */
6344 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
6345 amdgpu_ring_write(ring, 0x0);
6346 }
6347}
6348
6349static u64 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
6350{
6351 return ring->adev->wb.wb[ring->wptr_offs];
6352}
6353
6354static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
6355{
6356 struct amdgpu_device *adev = ring->adev;
6357
6358 /* XXX check if swapping is necessary on BE */
6359 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
6360 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6361}
6362
6363static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
6364 bool acquire)
6365{
6366 struct amdgpu_device *adev = ring->adev;
6367 int pipe_num, tmp, reg;
6368 int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
6369
6370 pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
6371
6372 /* first me only has 2 entries, GFX and HP3D */
6373 if (ring->me > 0)
6374 pipe_num -= 2;
6375
6376 reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
6377 tmp = RREG32(reg);
6378 tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
6379 WREG32(reg, tmp);
6380}
6381
6382static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
6383 struct amdgpu_ring *ring,
6384 bool acquire)
6385{
6386 int i, pipe;
6387 bool reserve;
6388 struct amdgpu_ring *iring;
6389
6390 mutex_lock(&adev->gfx.pipe_reserve_mutex);
6391 pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
6392 if (acquire)
6393 set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
6394 else
6395 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
6396
6397 if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
6398 /* Clear all reservations - everyone reacquires all resources */
6399 for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
6400 gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
6401 true);
6402
6403 for (i = 0; i < adev->gfx.num_compute_rings; ++i)
6404 gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
6405 true);
6406 } else {
6407 /* Lower all pipes without a current reservation */
6408 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
6409 iring = &adev->gfx.gfx_ring[i];
6410 pipe = amdgpu_gfx_queue_to_bit(adev,
6411 iring->me,
6412 iring->pipe,
6413 0);
6414 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
6415 gfx_v8_0_ring_set_pipe_percent(iring, reserve);
6416 }
6417
6418 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
6419 iring = &adev->gfx.compute_ring[i];
6420 pipe = amdgpu_gfx_queue_to_bit(adev,
6421 iring->me,
6422 iring->pipe,
6423 0);
6424 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
6425 gfx_v8_0_ring_set_pipe_percent(iring, reserve);
6426 }
6427 }
6428
6429 mutex_unlock(&adev->gfx.pipe_reserve_mutex);
6430}
6431
6432static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
6433 struct amdgpu_ring *ring,
6434 bool acquire)
6435{
6436 uint32_t pipe_priority = acquire ? 0x2 : 0x0;
6437 uint32_t queue_priority = acquire ? 0xf : 0x0;
6438
6439 mutex_lock(&adev->srbm_mutex);
6440 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
6441
6442 WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
6443 WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
6444
6445 vi_srbm_select(adev, 0, 0, 0, 0);
6446 mutex_unlock(&adev->srbm_mutex);
6447}
6448static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
6449 enum drm_sched_priority priority)
6450{
6451 struct amdgpu_device *adev = ring->adev;
6452 bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
6453
6454 if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
6455 return;
6456
6457 gfx_v8_0_hqd_set_priority(adev, ring, acquire);
6458 gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
6459}
6460
6461static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
6462 u64 addr, u64 seq,
6463 unsigned flags)
6464{
6465 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6466 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6467
6468 /* RELEASE_MEM - flush caches, send int */
6469 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
6470 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6471 EOP_TC_ACTION_EN |
6472 EOP_TC_WB_ACTION_EN |
6473 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6474 EVENT_INDEX(5)));
6475 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6476 amdgpu_ring_write(ring, addr & 0xfffffffc);
6477 amdgpu_ring_write(ring, upper_32_bits(addr));
6478 amdgpu_ring_write(ring, lower_32_bits(seq));
6479 amdgpu_ring_write(ring, upper_32_bits(seq));
6480}
6481
6482static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
6483 u64 seq, unsigned int flags)
6484{
6485 /* we only allocate 32bit for each seq wb address */
6486 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
6487
6488 /* write fence seq to the "addr" */
6489 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6490 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6491 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
6492 amdgpu_ring_write(ring, lower_32_bits(addr));
6493 amdgpu_ring_write(ring, upper_32_bits(addr));
6494 amdgpu_ring_write(ring, lower_32_bits(seq));
6495
6496 if (flags & AMDGPU_FENCE_FLAG_INT) {
6497 /* set register to trigger INT */
6498 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6499 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6500 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
6501 amdgpu_ring_write(ring, mmCPC_INT_STATUS);
6502 amdgpu_ring_write(ring, 0);
6503 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
6504 }
6505}
6506
6507static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
6508{
6509 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
6510 amdgpu_ring_write(ring, 0);
6511}
6512
6513static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
6514{
6515 uint32_t dw2 = 0;
6516
6517 if (amdgpu_sriov_vf(ring->adev))
6518 gfx_v8_0_ring_emit_ce_meta(ring);
6519
6520 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
6521 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
6522 gfx_v8_0_ring_emit_vgt_flush(ring);
6523 /* set load_global_config & load_global_uconfig */
6524 dw2 |= 0x8001;
6525 /* set load_cs_sh_regs */
6526 dw2 |= 0x01000000;
6527 /* set load_per_context_state & load_gfx_sh_regs for GFX */
6528 dw2 |= 0x10002;
6529
6530 /* set load_ce_ram if preamble presented */
6531 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
6532 dw2 |= 0x10000000;
6533 } else {
6534 /* still load_ce_ram if this is the first time preamble presented
6535 * although there is no context switch happens.
6536 */
6537 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
6538 dw2 |= 0x10000000;
6539 }
6540
6541 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
6542 amdgpu_ring_write(ring, dw2);
6543 amdgpu_ring_write(ring, 0);
6544}
6545
6546static unsigned gfx_v8_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
6547{
6548 unsigned ret;
6549
6550 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
6551 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
6552 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
6553 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
6554 ret = ring->wptr & ring->buf_mask;
6555 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
6556 return ret;
6557}
6558
6559static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
6560{
6561 unsigned cur;
6562
6563 BUG_ON(offset > ring->buf_mask);
6564 BUG_ON(ring->ring[offset] != 0x55aa55aa);
6565
6566 cur = (ring->wptr & ring->buf_mask) - 1;
6567 if (likely(cur > offset))
6568 ring->ring[offset] = cur - offset;
6569 else
6570 ring->ring[offset] = (ring->ring_size >> 2) - offset + cur;
6571}
6572
6573static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
6574{
6575 struct amdgpu_device *adev = ring->adev;
6576
6577 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
6578 amdgpu_ring_write(ring, 0 | /* src: register*/
6579 (5 << 8) | /* dst: memory */
6580 (1 << 20)); /* write confirm */
6581 amdgpu_ring_write(ring, reg);
6582 amdgpu_ring_write(ring, 0);
6583 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
6584 adev->virt.reg_val_offs * 4));
6585 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
6586 adev->virt.reg_val_offs * 4));
6587}
6588
6589static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
6590 uint32_t val)
6591{
6592 uint32_t cmd;
6593
6594 switch (ring->funcs->type) {
6595 case AMDGPU_RING_TYPE_GFX:
6596 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
6597 break;
6598 case AMDGPU_RING_TYPE_KIQ:
6599 cmd = 1 << 16; /* no inc addr */
6600 break;
6601 default:
6602 cmd = WR_CONFIRM;
6603 break;
6604 }
6605
6606 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6607 amdgpu_ring_write(ring, cmd);
6608 amdgpu_ring_write(ring, reg);
6609 amdgpu_ring_write(ring, 0);
6610 amdgpu_ring_write(ring, val);
6611}
6612
6613static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
6614 enum amdgpu_interrupt_state state)
6615{
6616 WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
6617 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6618}
6619
6620static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
6621 int me, int pipe,
6622 enum amdgpu_interrupt_state state)
6623{
6624 u32 mec_int_cntl, mec_int_cntl_reg;
6625
6626 /*
6627 * amdgpu controls only the first MEC. That's why this function only
6628 * handles the setting of interrupts for this specific MEC. All other
6629 * pipes' interrupts are set by amdkfd.
6630 */
6631
6632 if (me == 1) {
6633 switch (pipe) {
6634 case 0:
6635 mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
6636 break;
6637 case 1:
6638 mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
6639 break;
6640 case 2:
6641 mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
6642 break;
6643 case 3:
6644 mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
6645 break;
6646 default:
6647 DRM_DEBUG("invalid pipe %d\n", pipe);
6648 return;
6649 }
6650 } else {
6651 DRM_DEBUG("invalid me %d\n", me);
6652 return;
6653 }
6654
6655 switch (state) {
6656 case AMDGPU_IRQ_STATE_DISABLE:
6657 mec_int_cntl = RREG32(mec_int_cntl_reg);
6658 mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
6659 WREG32(mec_int_cntl_reg, mec_int_cntl);
6660 break;
6661 case AMDGPU_IRQ_STATE_ENABLE:
6662 mec_int_cntl = RREG32(mec_int_cntl_reg);
6663 mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
6664 WREG32(mec_int_cntl_reg, mec_int_cntl);
6665 break;
6666 default:
6667 break;
6668 }
6669}
6670
6671static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6672 struct amdgpu_irq_src *source,
6673 unsigned type,
6674 enum amdgpu_interrupt_state state)
6675{
6676 WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
6677 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6678
6679 return 0;
6680}
6681
6682static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6683 struct amdgpu_irq_src *source,
6684 unsigned type,
6685 enum amdgpu_interrupt_state state)
6686{
6687 WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
6688 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6689
6690 return 0;
6691}
6692
6693static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6694 struct amdgpu_irq_src *src,
6695 unsigned type,
6696 enum amdgpu_interrupt_state state)
6697{
6698 switch (type) {
6699 case AMDGPU_CP_IRQ_GFX_EOP:
6700 gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
6701 break;
6702 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6703 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6704 break;
6705 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6706 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6707 break;
6708 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6709 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6710 break;
6711 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6712 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6713 break;
6714 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
6715 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
6716 break;
6717 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
6718 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
6719 break;
6720 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
6721 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
6722 break;
6723 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
6724 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
6725 break;
6726 default:
6727 break;
6728 }
6729 return 0;
6730}
6731
6732static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
6733 struct amdgpu_irq_src *source,
6734 struct amdgpu_iv_entry *entry)
6735{
6736 int i;
6737 u8 me_id, pipe_id, queue_id;
6738 struct amdgpu_ring *ring;
6739
6740 DRM_DEBUG("IH: CP EOP\n");
6741 me_id = (entry->ring_id & 0x0c) >> 2;
6742 pipe_id = (entry->ring_id & 0x03) >> 0;
6743 queue_id = (entry->ring_id & 0x70) >> 4;
6744
6745 switch (me_id) {
6746 case 0:
6747 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6748 break;
6749 case 1:
6750 case 2:
6751 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6752 ring = &adev->gfx.compute_ring[i];
6753 /* Per-queue interrupt is supported for MEC starting from VI.
6754 * The interrupt can only be enabled/disabled per pipe instead of per queue.
6755 */
6756 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
6757 amdgpu_fence_process(ring);
6758 }
6759 break;
6760 }
6761 return 0;
6762}
6763
6764static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
6765 struct amdgpu_irq_src *source,
6766 struct amdgpu_iv_entry *entry)
6767{
6768 DRM_ERROR("Illegal register access in command stream\n");
6769 schedule_work(&adev->reset_work);
6770 return 0;
6771}
6772
6773static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
6774 struct amdgpu_irq_src *source,
6775 struct amdgpu_iv_entry *entry)
6776{
6777 DRM_ERROR("Illegal instruction in command stream\n");
6778 schedule_work(&adev->reset_work);
6779 return 0;
6780}
6781
6782static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
6783 struct amdgpu_irq_src *src,
6784 unsigned int type,
6785 enum amdgpu_interrupt_state state)
6786{
6787 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
6788
6789 switch (type) {
6790 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
6791 WREG32_FIELD(CPC_INT_CNTL, GENERIC2_INT_ENABLE,
6792 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6793 if (ring->me == 1)
6794 WREG32_FIELD_OFFSET(CP_ME1_PIPE0_INT_CNTL,
6795 ring->pipe,
6796 GENERIC2_INT_ENABLE,
6797 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6798 else
6799 WREG32_FIELD_OFFSET(CP_ME2_PIPE0_INT_CNTL,
6800 ring->pipe,
6801 GENERIC2_INT_ENABLE,
6802 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6803 break;
6804 default:
6805 BUG(); /* kiq only support GENERIC2_INT now */
6806 break;
6807 }
6808 return 0;
6809}
6810
6811static int gfx_v8_0_kiq_irq(struct amdgpu_device *adev,
6812 struct amdgpu_irq_src *source,
6813 struct amdgpu_iv_entry *entry)
6814{
6815 u8 me_id, pipe_id, queue_id;
6816 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
6817
6818 me_id = (entry->ring_id & 0x0c) >> 2;
6819 pipe_id = (entry->ring_id & 0x03) >> 0;
6820 queue_id = (entry->ring_id & 0x70) >> 4;
6821 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
6822 me_id, pipe_id, queue_id);
6823
6824 amdgpu_fence_process(ring);
6825 return 0;
6826}
6827
6828static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
6829 .name = "gfx_v8_0",
6830 .early_init = gfx_v8_0_early_init,
6831 .late_init = gfx_v8_0_late_init,
6832 .sw_init = gfx_v8_0_sw_init,
6833 .sw_fini = gfx_v8_0_sw_fini,
6834 .hw_init = gfx_v8_0_hw_init,
6835 .hw_fini = gfx_v8_0_hw_fini,
6836 .suspend = gfx_v8_0_suspend,
6837 .resume = gfx_v8_0_resume,
6838 .is_idle = gfx_v8_0_is_idle,
6839 .wait_for_idle = gfx_v8_0_wait_for_idle,
6840 .check_soft_reset = gfx_v8_0_check_soft_reset,
6841 .pre_soft_reset = gfx_v8_0_pre_soft_reset,
6842 .soft_reset = gfx_v8_0_soft_reset,
6843 .post_soft_reset = gfx_v8_0_post_soft_reset,
6844 .set_clockgating_state = gfx_v8_0_set_clockgating_state,
6845 .set_powergating_state = gfx_v8_0_set_powergating_state,
6846 .get_clockgating_state = gfx_v8_0_get_clockgating_state,
6847};
6848
6849static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
6850 .type = AMDGPU_RING_TYPE_GFX,
6851 .align_mask = 0xff,
6852 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6853 .support_64bit_ptrs = false,
6854 .get_rptr = gfx_v8_0_ring_get_rptr,
6855 .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
6856 .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
6857 .emit_frame_size = /* maximum 215dw if count 16 IBs in */
6858 5 + /* COND_EXEC */
6859 7 + /* PIPELINE_SYNC */
6860 VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
6861 8 + /* FENCE for VM_FLUSH */
6862 20 + /* GDS switch */
6863 4 + /* double SWITCH_BUFFER,
6864 the first COND_EXEC jump to the place just
6865 prior to this double SWITCH_BUFFER */
6866 5 + /* COND_EXEC */
6867 7 + /* HDP_flush */
6868 4 + /* VGT_flush */
6869 14 + /* CE_META */
6870 31 + /* DE_META */
6871 3 + /* CNTX_CTRL */
6872 5 + /* HDP_INVL */
6873 8 + 8 + /* FENCE x2 */
6874 2, /* SWITCH_BUFFER */
6875 .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
6876 .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
6877 .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
6878 .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
6879 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6880 .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6881 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
6882 .test_ring = gfx_v8_0_ring_test_ring,
6883 .test_ib = gfx_v8_0_ring_test_ib,
6884 .insert_nop = amdgpu_ring_insert_nop,
6885 .pad_ib = amdgpu_ring_generic_pad_ib,
6886 .emit_switch_buffer = gfx_v8_ring_emit_sb,
6887 .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
6888 .init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
6889 .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
6890 .emit_wreg = gfx_v8_0_ring_emit_wreg,
6891};
6892
6893static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
6894 .type = AMDGPU_RING_TYPE_COMPUTE,
6895 .align_mask = 0xff,
6896 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6897 .support_64bit_ptrs = false,
6898 .get_rptr = gfx_v8_0_ring_get_rptr,
6899 .get_wptr = gfx_v8_0_ring_get_wptr_compute,
6900 .set_wptr = gfx_v8_0_ring_set_wptr_compute,
6901 .emit_frame_size =
6902 20 + /* gfx_v8_0_ring_emit_gds_switch */
6903 7 + /* gfx_v8_0_ring_emit_hdp_flush */
6904 5 + /* hdp_invalidate */
6905 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6906 VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
6907 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
6908 .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
6909 .emit_ib = gfx_v8_0_ring_emit_ib_compute,
6910 .emit_fence = gfx_v8_0_ring_emit_fence_compute,
6911 .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
6912 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6913 .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6914 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
6915 .test_ring = gfx_v8_0_ring_test_ring,
6916 .test_ib = gfx_v8_0_ring_test_ib,
6917 .insert_nop = amdgpu_ring_insert_nop,
6918 .pad_ib = amdgpu_ring_generic_pad_ib,
6919 .set_priority = gfx_v8_0_ring_set_priority_compute,
6920 .emit_wreg = gfx_v8_0_ring_emit_wreg,
6921};
6922
6923static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
6924 .type = AMDGPU_RING_TYPE_KIQ,
6925 .align_mask = 0xff,
6926 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6927 .support_64bit_ptrs = false,
6928 .get_rptr = gfx_v8_0_ring_get_rptr,
6929 .get_wptr = gfx_v8_0_ring_get_wptr_compute,
6930 .set_wptr = gfx_v8_0_ring_set_wptr_compute,
6931 .emit_frame_size =
6932 20 + /* gfx_v8_0_ring_emit_gds_switch */
6933 7 + /* gfx_v8_0_ring_emit_hdp_flush */
6934 5 + /* hdp_invalidate */
6935 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6936 17 + /* gfx_v8_0_ring_emit_vm_flush */
6937 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6938 .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
6939 .emit_ib = gfx_v8_0_ring_emit_ib_compute,
6940 .emit_fence = gfx_v8_0_ring_emit_fence_kiq,
6941 .test_ring = gfx_v8_0_ring_test_ring,
6942 .test_ib = gfx_v8_0_ring_test_ib,
6943 .insert_nop = amdgpu_ring_insert_nop,
6944 .pad_ib = amdgpu_ring_generic_pad_ib,
6945 .emit_rreg = gfx_v8_0_ring_emit_rreg,
6946 .emit_wreg = gfx_v8_0_ring_emit_wreg,
6947};
6948
6949static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
6950{
6951 int i;
6952
6953 adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq;
6954
6955 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6956 adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
6957
6958 for (i = 0; i < adev->gfx.num_compute_rings; i++)
6959 adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
6960}
6961
6962static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = {
6963 .set = gfx_v8_0_set_eop_interrupt_state,
6964 .process = gfx_v8_0_eop_irq,
6965};
6966
6967static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = {
6968 .set = gfx_v8_0_set_priv_reg_fault_state,
6969 .process = gfx_v8_0_priv_reg_irq,
6970};
6971
6972static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
6973 .set = gfx_v8_0_set_priv_inst_fault_state,
6974 .process = gfx_v8_0_priv_inst_irq,
6975};
6976
6977static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = {
6978 .set = gfx_v8_0_kiq_set_interrupt_state,
6979 .process = gfx_v8_0_kiq_irq,
6980};
6981
6982static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
6983{
6984 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6985 adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
6986
6987 adev->gfx.priv_reg_irq.num_types = 1;
6988 adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
6989
6990 adev->gfx.priv_inst_irq.num_types = 1;
6991 adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
6992
6993 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
6994 adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs;
6995}
6996
6997static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
6998{
6999 adev->gfx.rlc.funcs = &iceland_rlc_funcs;
7000}
7001
7002static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
7003{
7004 /* init asci gds info */
7005 adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
7006 adev->gds.gws.total_size = 64;
7007 adev->gds.oa.total_size = 16;
7008
7009 if (adev->gds.mem.total_size == 64 * 1024) {
7010 adev->gds.mem.gfx_partition_size = 4096;
7011 adev->gds.mem.cs_partition_size = 4096;
7012
7013 adev->gds.gws.gfx_partition_size = 4;
7014 adev->gds.gws.cs_partition_size = 4;
7015
7016 adev->gds.oa.gfx_partition_size = 4;
7017 adev->gds.oa.cs_partition_size = 1;
7018 } else {
7019 adev->gds.mem.gfx_partition_size = 1024;
7020 adev->gds.mem.cs_partition_size = 1024;
7021
7022 adev->gds.gws.gfx_partition_size = 16;
7023 adev->gds.gws.cs_partition_size = 16;
7024
7025 adev->gds.oa.gfx_partition_size = 4;
7026 adev->gds.oa.cs_partition_size = 4;
7027 }
7028}
7029
7030static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7031 u32 bitmap)
7032{
7033 u32 data;
7034
7035 if (!bitmap)
7036 return;
7037
7038 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7039 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7040
7041 WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
7042}
7043
7044static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7045{
7046 u32 data, mask;
7047
7048 data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
7049 RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
7050
7051 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7052
7053 return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
7054}
7055
7056static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
7057{
7058 int i, j, k, counter, active_cu_number = 0;
7059 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7060 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
7061 unsigned disable_masks[4 * 2];
7062 u32 ao_cu_num;
7063
7064 memset(cu_info, 0, sizeof(*cu_info));
7065
7066 if (adev->flags & AMD_IS_APU)
7067 ao_cu_num = 2;
7068 else
7069 ao_cu_num = adev->gfx.config.max_cu_per_sh;
7070
7071 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
7072
7073 mutex_lock(&adev->grbm_idx_mutex);
7074 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7075 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7076 mask = 1;
7077 ao_bitmap = 0;
7078 counter = 0;
7079 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
7080 if (i < 4 && j < 2)
7081 gfx_v8_0_set_user_cu_inactive_bitmap(
7082 adev, disable_masks[i * 2 + j]);
7083 bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
7084 cu_info->bitmap[i][j] = bitmap;
7085
7086 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7087 if (bitmap & mask) {
7088 if (counter < ao_cu_num)
7089 ao_bitmap |= mask;
7090 counter ++;
7091 }
7092 mask <<= 1;
7093 }
7094 active_cu_number += counter;
7095 if (i < 2 && j < 2)
7096 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7097 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
7098 }
7099 }
7100 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
7101 mutex_unlock(&adev->grbm_idx_mutex);
7102
7103 cu_info->number = active_cu_number;
7104 cu_info->ao_cu_mask = ao_cu_mask;
7105 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7106 cu_info->max_waves_per_simd = 10;
7107 cu_info->max_scratch_slots_per_cu = 32;
7108 cu_info->wave_front_size = 64;
7109 cu_info->lds_size = 64;
7110}
7111
7112const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
7113{
7114 .type = AMD_IP_BLOCK_TYPE_GFX,
7115 .major = 8,
7116 .minor = 0,
7117 .rev = 0,
7118 .funcs = &gfx_v8_0_ip_funcs,
7119};
7120
7121const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
7122{
7123 .type = AMD_IP_BLOCK_TYPE_GFX,
7124 .major = 8,
7125 .minor = 1,
7126 .rev = 0,
7127 .funcs = &gfx_v8_0_ip_funcs,
7128};
7129
7130static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
7131{
7132 uint64_t ce_payload_addr;
7133 int cnt_ce;
7134 union {
7135 struct vi_ce_ib_state regular;
7136 struct vi_ce_ib_state_chained_ib chained;
7137 } ce_payload = {};
7138
7139 if (ring->adev->virt.chained_ib_support) {
7140 ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7141 offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload);
7142 cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2;
7143 } else {
7144 ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7145 offsetof(struct vi_gfx_meta_data, ce_payload);
7146 cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2;
7147 }
7148
7149 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce));
7150 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
7151 WRITE_DATA_DST_SEL(8) |
7152 WR_CONFIRM) |
7153 WRITE_DATA_CACHE_POLICY(0));
7154 amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr));
7155 amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr));
7156 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2);
7157}
7158
7159static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
7160{
7161 uint64_t de_payload_addr, gds_addr, csa_addr;
7162 int cnt_de;
7163 union {
7164 struct vi_de_ib_state regular;
7165 struct vi_de_ib_state_chained_ib chained;
7166 } de_payload = {};
7167
7168 csa_addr = amdgpu_csa_vaddr(ring->adev);
7169 gds_addr = csa_addr + 4096;
7170 if (ring->adev->virt.chained_ib_support) {
7171 de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr);
7172 de_payload.chained.gds_backup_addrhi = upper_32_bits(gds_addr);
7173 de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data_chained_ib, de_payload);
7174 cnt_de = (sizeof(de_payload.chained) >> 2) + 4 - 2;
7175 } else {
7176 de_payload.regular.gds_backup_addrlo = lower_32_bits(gds_addr);
7177 de_payload.regular.gds_backup_addrhi = upper_32_bits(gds_addr);
7178 de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data, de_payload);
7179 cnt_de = (sizeof(de_payload.regular) >> 2) + 4 - 2;
7180 }
7181
7182 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de));
7183 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
7184 WRITE_DATA_DST_SEL(8) |
7185 WR_CONFIRM) |
7186 WRITE_DATA_CACHE_POLICY(0));
7187 amdgpu_ring_write(ring, lower_32_bits(de_payload_addr));
7188 amdgpu_ring_write(ring, upper_32_bits(de_payload_addr));
7189 amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2);
7190}