Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu.h"
24#include "amdgpu_atombios.h"
25#include "nbio_v2_3.h"
26
27#include "nbio/nbio_2_3_default.h"
28#include "nbio/nbio_2_3_offset.h"
29#include "nbio/nbio_2_3_sh_mask.h"
30#include <uapi/linux/kfd_ioctl.h>
31#include <linux/pci.h>
32
33#define smnPCIE_CONFIG_CNTL 0x11180044
34#define smnCPM_CONTROL 0x11180460
35#define smnPCIE_CNTL2 0x11180070
36#define smnPCIE_LC_CNTL 0x11140280
37#define smnPCIE_LC_CNTL3 0x111402d4
38#define smnPCIE_LC_CNTL6 0x111402ec
39#define smnPCIE_LC_CNTL7 0x111402f0
40#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
41#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538
42#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
43#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
44#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
45
46#define mmBIF_SDMA2_DOORBELL_RANGE 0x01d6
47#define mmBIF_SDMA2_DOORBELL_RANGE_BASE_IDX 2
48#define mmBIF_SDMA3_DOORBELL_RANGE 0x01d7
49#define mmBIF_SDMA3_DOORBELL_RANGE_BASE_IDX 2
50
51#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01d8
52#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
53
54#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
55
56static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
57{
58 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
59 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
60 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
61 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
62}
63
64static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev)
65{
66 u32 tmp;
67
68 /*
69 * guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
70 * therefore we force rev_id to 0 (which is the default value)
71 */
72 if (amdgpu_sriov_vf(adev)) {
73 return 0;
74 }
75
76 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
77 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
78 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
79
80 return tmp;
81}
82
83static void nbio_v2_3_mc_access_enable(struct amdgpu_device *adev, bool enable)
84{
85 if (enable)
86 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
87 BIF_FB_EN__FB_READ_EN_MASK |
88 BIF_FB_EN__FB_WRITE_EN_MASK);
89 else
90 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
91}
92
93static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
94{
95 return RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
96}
97
98static void nbio_v2_3_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
99 bool use_doorbell, int doorbell_index,
100 int doorbell_size)
101{
102 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
103 instance == 1 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE) :
104 instance == 2 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA2_DOORBELL_RANGE) :
105 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA3_DOORBELL_RANGE);
106
107 u32 doorbell_range = RREG32(reg);
108
109 if (use_doorbell) {
110 doorbell_range = REG_SET_FIELD(doorbell_range,
111 BIF_SDMA0_DOORBELL_RANGE, OFFSET,
112 doorbell_index);
113 doorbell_range = REG_SET_FIELD(doorbell_range,
114 BIF_SDMA0_DOORBELL_RANGE, SIZE,
115 doorbell_size);
116 } else
117 doorbell_range = REG_SET_FIELD(doorbell_range,
118 BIF_SDMA0_DOORBELL_RANGE, SIZE,
119 0);
120
121 WREG32(reg, doorbell_range);
122}
123
124static void nbio_v2_3_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
125 int doorbell_index, int instance)
126{
127 u32 reg = instance ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE) :
128 SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
129
130 u32 doorbell_range = RREG32(reg);
131
132 if (use_doorbell) {
133 doorbell_range = REG_SET_FIELD(doorbell_range,
134 BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
135 doorbell_index);
136 doorbell_range = REG_SET_FIELD(doorbell_range,
137 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
138 } else
139 doorbell_range = REG_SET_FIELD(doorbell_range,
140 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
141
142 WREG32(reg, doorbell_range);
143}
144
145static void nbio_v2_3_enable_doorbell_aperture(struct amdgpu_device *adev,
146 bool enable)
147{
148 WREG32_FIELD15(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN,
149 enable ? 1 : 0);
150}
151
152static void nbio_v2_3_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
153 bool enable)
154{
155 u32 tmp = 0;
156
157 if (enable) {
158 tmp = REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
159 DOORBELL_SELFRING_GPA_APER_EN, 1) |
160 REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
161 DOORBELL_SELFRING_GPA_APER_MODE, 1) |
162 REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
163 DOORBELL_SELFRING_GPA_APER_SIZE, 0);
164
165 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
166 lower_32_bits(adev->doorbell.base));
167 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
168 upper_32_bits(adev->doorbell.base));
169 }
170
171 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
172 tmp);
173}
174
175
176static void nbio_v2_3_ih_doorbell_range(struct amdgpu_device *adev,
177 bool use_doorbell, int doorbell_index)
178{
179 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE);
180
181 if (use_doorbell) {
182 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
183 BIF_IH_DOORBELL_RANGE, OFFSET,
184 doorbell_index);
185 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
186 BIF_IH_DOORBELL_RANGE, SIZE,
187 2);
188 } else
189 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
190 BIF_IH_DOORBELL_RANGE, SIZE,
191 0);
192
193 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
194}
195
196static void nbio_v2_3_ih_control(struct amdgpu_device *adev)
197{
198 u32 interrupt_cntl;
199
200 /* setup interrupt control */
201 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
202
203 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
204 /*
205 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
206 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
207 */
208 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL,
209 IH_DUMMY_RD_OVERRIDE, 0);
210
211 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
212 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL,
213 IH_REQ_NONSNOOP_EN, 0);
214
215 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
216}
217
218static void nbio_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
219 bool enable)
220{
221 uint32_t def, data;
222
223 if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
224 return;
225
226 def = data = RREG32_PCIE(smnCPM_CONTROL);
227 if (enable) {
228 data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
229 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
230 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
231 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
232 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
233 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
234 } else {
235 data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
236 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
237 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
238 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
239 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
240 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
241 }
242
243 if (def != data)
244 WREG32_PCIE(smnCPM_CONTROL, data);
245}
246
247static void nbio_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
248 bool enable)
249{
250 uint32_t def, data;
251
252 if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
253 return;
254
255 def = data = RREG32_PCIE(smnPCIE_CNTL2);
256 if (enable) {
257 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
258 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
259 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
260 } else {
261 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
262 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
263 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
264 }
265
266 if (def != data)
267 WREG32_PCIE(smnPCIE_CNTL2, data);
268}
269
270static void nbio_v2_3_get_clockgating_state(struct amdgpu_device *adev,
271 u32 *flags)
272{
273 int data;
274
275 /* AMD_CG_SUPPORT_BIF_MGCG */
276 data = RREG32_PCIE(smnCPM_CONTROL);
277 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
278 *flags |= AMD_CG_SUPPORT_BIF_MGCG;
279
280 /* AMD_CG_SUPPORT_BIF_LS */
281 data = RREG32_PCIE(smnPCIE_CNTL2);
282 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
283 *flags |= AMD_CG_SUPPORT_BIF_LS;
284}
285
286static u32 nbio_v2_3_get_hdp_flush_req_offset(struct amdgpu_device *adev)
287{
288 return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_GPU_HDP_FLUSH_REQ);
289}
290
291static u32 nbio_v2_3_get_hdp_flush_done_offset(struct amdgpu_device *adev)
292{
293 return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_GPU_HDP_FLUSH_DONE);
294}
295
296static u32 nbio_v2_3_get_pcie_index_offset(struct amdgpu_device *adev)
297{
298 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
299}
300
301static u32 nbio_v2_3_get_pcie_data_offset(struct amdgpu_device *adev)
302{
303 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
304}
305
306const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
307 .ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK,
308 .ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK,
309 .ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK,
310 .ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK,
311 .ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK,
312 .ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK,
313 .ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK,
314 .ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK,
315 .ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK,
316 .ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK,
317 .ref_and_mask_sdma0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
318 .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
319};
320
321static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
322{
323 uint32_t def, data;
324
325 def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL);
326 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
327 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
328
329 if (def != data)
330 WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
331}
332
333#define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1
334#define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x00000009 // 1=1us, 9=1ms
335#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 4ms
336
337static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
338 bool enable)
339{
340 uint32_t def, data;
341
342 def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
343
344 if (enable) {
345 /* Disable ASPM L0s/L1 first */
346 data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK | PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
347
348 data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
349
350 if (pci_is_thunderbolt_attached(adev->pdev))
351 data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
352 else
353 data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
354
355 data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
356 } else {
357 /* Disbale ASPM L1 */
358 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
359 /* Disable ASPM TxL0s */
360 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
361 /* Disable ACPI L1 */
362 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
363 }
364
365 if (def != data)
366 WREG32_PCIE(smnPCIE_LC_CNTL, data);
367}
368
369static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
370{
371 uint32_t def, data;
372
373 WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
374
375 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2);
376 data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
377 if (def != data)
378 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2, data);
379
380 def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
381 data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
382 if (def != data)
383 WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
384
385 def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
386 data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
387 if (def != data)
388 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
389}
390
391static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
392{
393 uint32_t def, data;
394
395 def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
396 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
397 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
398 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
399 if (def != data)
400 WREG32_PCIE(smnPCIE_LC_CNTL, data);
401
402 def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
403 data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
404 if (def != data)
405 WREG32_PCIE(smnPCIE_LC_CNTL7, data);
406
407 def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
408 data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
409 if (def != data)
410 WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
411
412 def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
413 data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
414 if (def != data)
415 WREG32_PCIE(smnPCIE_LC_CNTL3, data);
416
417 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
418 data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
419 data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
420 if (def != data)
421 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
422
423 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
424 data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
425 if (def != data)
426 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
427
428 def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
429 data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
430 if (def != data)
431 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
432
433 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
434
435 def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
436 data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
437 PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
438 data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
439 if (def != data)
440 WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
441
442 def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
443 data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
444 PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
445 if (def != data)
446 WREG32_PCIE(smnPCIE_LC_CNTL6, data);
447
448 nbio_v2_3_program_ltr(adev);
449
450 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
451 data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
452 data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
453 if (def != data)
454 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
455
456 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
457 data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
458 if (def != data)
459 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
460
461 def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
462 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
463 data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
464 data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
465 if (def != data)
466 WREG32_PCIE(smnPCIE_LC_CNTL, data);
467
468 def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
469 data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
470 if (def != data)
471 WREG32_PCIE(smnPCIE_LC_CNTL3, data);
472}
473
474static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
475{
476 uint32_t reg_data = 0;
477 uint32_t link_width = 0;
478
479 if (!((adev->asic_type >= CHIP_NAVI10) &&
480 (adev->asic_type <= CHIP_NAVI12)))
481 return;
482
483 reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL);
484 link_width = (reg_data & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
485 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
486
487 /*
488 * Program PCIE_LC_CNTL6.LC_SPC_MODE_8GT to 0x2 (4 symbols per clock data)
489 * if link_width is 0x3 (x4)
490 */
491 if (0x3 == link_width) {
492 reg_data = RREG32_PCIE(smnPCIE_LC_CNTL6);
493 reg_data &= ~PCIE_LC_CNTL6__LC_SPC_MODE_8GT_MASK;
494 reg_data |= (0x2 << PCIE_LC_CNTL6__LC_SPC_MODE_8GT__SHIFT);
495 WREG32_PCIE(smnPCIE_LC_CNTL6, reg_data);
496 }
497}
498
499static void nbio_v2_3_apply_l1_link_width_reconfig_wa(struct amdgpu_device *adev)
500{
501 uint32_t reg_data = 0;
502
503 if (adev->asic_type != CHIP_NAVI10)
504 return;
505
506 reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL);
507 reg_data |= PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN_MASK;
508 WREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL, reg_data);
509}
510
511const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
512 .get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
513 .get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
514 .get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
515 .get_pcie_data_offset = nbio_v2_3_get_pcie_data_offset,
516 .get_rev_id = nbio_v2_3_get_rev_id,
517 .mc_access_enable = nbio_v2_3_mc_access_enable,
518 .get_memsize = nbio_v2_3_get_memsize,
519 .sdma_doorbell_range = nbio_v2_3_sdma_doorbell_range,
520 .vcn_doorbell_range = nbio_v2_3_vcn_doorbell_range,
521 .enable_doorbell_aperture = nbio_v2_3_enable_doorbell_aperture,
522 .enable_doorbell_selfring_aperture = nbio_v2_3_enable_doorbell_selfring_aperture,
523 .ih_doorbell_range = nbio_v2_3_ih_doorbell_range,
524 .update_medium_grain_clock_gating = nbio_v2_3_update_medium_grain_clock_gating,
525 .update_medium_grain_light_sleep = nbio_v2_3_update_medium_grain_light_sleep,
526 .get_clockgating_state = nbio_v2_3_get_clockgating_state,
527 .ih_control = nbio_v2_3_ih_control,
528 .init_registers = nbio_v2_3_init_registers,
529 .remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
530 .enable_aspm = nbio_v2_3_enable_aspm,
531 .program_aspm = nbio_v2_3_program_aspm,
532 .apply_lc_spc_mode_wa = nbio_v2_3_apply_lc_spc_mode_wa,
533 .apply_l1_link_width_reconfig_wa = nbio_v2_3_apply_l1_link_width_reconfig_wa,
534};
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu.h"
24#include "amdgpu_atombios.h"
25#include "nbio_v2_3.h"
26
27#include "nbio/nbio_2_3_default.h"
28#include "nbio/nbio_2_3_offset.h"
29#include "nbio/nbio_2_3_sh_mask.h"
30#include <uapi/linux/kfd_ioctl.h>
31#include <linux/pci.h>
32
33#define smnPCIE_CONFIG_CNTL 0x11180044
34#define smnCPM_CONTROL 0x11180460
35#define smnPCIE_CNTL2 0x11180070
36#define smnPCIE_LC_CNTL 0x11140280
37#define smnPCIE_LC_CNTL3 0x111402d4
38#define smnPCIE_LC_CNTL6 0x111402ec
39#define smnPCIE_LC_CNTL7 0x111402f0
40#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
41#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538
42#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
43#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
44#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
45
46#define mmBIF_SDMA2_DOORBELL_RANGE 0x01d6
47#define mmBIF_SDMA2_DOORBELL_RANGE_BASE_IDX 2
48#define mmBIF_SDMA3_DOORBELL_RANGE 0x01d7
49#define mmBIF_SDMA3_DOORBELL_RANGE_BASE_IDX 2
50
51#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01d8
52#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
53
54#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
55
56#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */
57#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
58#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
59#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
60#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
61#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
62#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
63#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
64#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
65
66static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
67{
68 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
69 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
70 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
71 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
72}
73
74static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev)
75{
76 u32 tmp;
77
78 /*
79 * guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
80 * therefore we force rev_id to 0 (which is the default value)
81 */
82 if (amdgpu_sriov_vf(adev)) {
83 return 0;
84 }
85
86 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
87 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
88 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
89
90 return tmp;
91}
92
93static void nbio_v2_3_mc_access_enable(struct amdgpu_device *adev, bool enable)
94{
95 if (enable)
96 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
97 BIF_FB_EN__FB_READ_EN_MASK |
98 BIF_FB_EN__FB_WRITE_EN_MASK);
99 else
100 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
101}
102
103static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
104{
105 return RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
106}
107
108static void nbio_v2_3_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
109 bool use_doorbell, int doorbell_index,
110 int doorbell_size)
111{
112 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
113 instance == 1 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE) :
114 instance == 2 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA2_DOORBELL_RANGE) :
115 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA3_DOORBELL_RANGE);
116
117 u32 doorbell_range = RREG32(reg);
118
119 if (use_doorbell) {
120 doorbell_range = REG_SET_FIELD(doorbell_range,
121 BIF_SDMA0_DOORBELL_RANGE, OFFSET,
122 doorbell_index);
123 doorbell_range = REG_SET_FIELD(doorbell_range,
124 BIF_SDMA0_DOORBELL_RANGE, SIZE,
125 doorbell_size);
126 } else
127 doorbell_range = REG_SET_FIELD(doorbell_range,
128 BIF_SDMA0_DOORBELL_RANGE, SIZE,
129 0);
130
131 WREG32(reg, doorbell_range);
132}
133
134static void nbio_v2_3_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
135 int doorbell_index, int instance)
136{
137 u32 reg = instance ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE) :
138 SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
139
140 u32 doorbell_range = RREG32(reg);
141
142 if (use_doorbell) {
143 doorbell_range = REG_SET_FIELD(doorbell_range,
144 BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
145 doorbell_index);
146 doorbell_range = REG_SET_FIELD(doorbell_range,
147 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
148 } else
149 doorbell_range = REG_SET_FIELD(doorbell_range,
150 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
151
152 WREG32(reg, doorbell_range);
153}
154
155static void nbio_v2_3_enable_doorbell_aperture(struct amdgpu_device *adev,
156 bool enable)
157{
158 WREG32_FIELD15(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN,
159 enable ? 1 : 0);
160}
161
162static void nbio_v2_3_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
163 bool enable)
164{
165 u32 tmp = 0;
166
167 if (enable) {
168 tmp = REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
169 DOORBELL_SELFRING_GPA_APER_EN, 1) |
170 REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
171 DOORBELL_SELFRING_GPA_APER_MODE, 1) |
172 REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
173 DOORBELL_SELFRING_GPA_APER_SIZE, 0);
174
175 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
176 lower_32_bits(adev->doorbell.base));
177 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
178 upper_32_bits(adev->doorbell.base));
179 }
180
181 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
182 tmp);
183}
184
185
186static void nbio_v2_3_ih_doorbell_range(struct amdgpu_device *adev,
187 bool use_doorbell, int doorbell_index)
188{
189 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE);
190
191 if (use_doorbell) {
192 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
193 BIF_IH_DOORBELL_RANGE, OFFSET,
194 doorbell_index);
195 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
196 BIF_IH_DOORBELL_RANGE, SIZE,
197 2);
198 } else
199 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
200 BIF_IH_DOORBELL_RANGE, SIZE,
201 0);
202
203 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
204}
205
206static void nbio_v2_3_ih_control(struct amdgpu_device *adev)
207{
208 u32 interrupt_cntl;
209
210 /* setup interrupt control */
211 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
212
213 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
214 /*
215 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
216 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
217 */
218 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL,
219 IH_DUMMY_RD_OVERRIDE, 0);
220
221 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
222 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL,
223 IH_REQ_NONSNOOP_EN, 0);
224
225 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
226}
227
228static void nbio_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
229 bool enable)
230{
231 uint32_t def, data;
232
233 if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
234 return;
235
236 def = data = RREG32_PCIE(smnCPM_CONTROL);
237 if (enable) {
238 data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
239 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
240 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
241 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
242 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
243 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
244 } else {
245 data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
246 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
247 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
248 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
249 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
250 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
251 }
252
253 if (def != data)
254 WREG32_PCIE(smnCPM_CONTROL, data);
255}
256
257static void nbio_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
258 bool enable)
259{
260 uint32_t def, data;
261
262 if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
263 return;
264
265 def = data = RREG32_PCIE(smnPCIE_CNTL2);
266 if (enable) {
267 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
268 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
269 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
270 } else {
271 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
272 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
273 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
274 }
275
276 if (def != data)
277 WREG32_PCIE(smnPCIE_CNTL2, data);
278}
279
280static void nbio_v2_3_get_clockgating_state(struct amdgpu_device *adev,
281 u64 *flags)
282{
283 int data;
284
285 /* AMD_CG_SUPPORT_BIF_MGCG */
286 data = RREG32_PCIE(smnCPM_CONTROL);
287 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
288 *flags |= AMD_CG_SUPPORT_BIF_MGCG;
289
290 /* AMD_CG_SUPPORT_BIF_LS */
291 data = RREG32_PCIE(smnPCIE_CNTL2);
292 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
293 *flags |= AMD_CG_SUPPORT_BIF_LS;
294}
295
296static u32 nbio_v2_3_get_hdp_flush_req_offset(struct amdgpu_device *adev)
297{
298 return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_GPU_HDP_FLUSH_REQ);
299}
300
301static u32 nbio_v2_3_get_hdp_flush_done_offset(struct amdgpu_device *adev)
302{
303 return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_GPU_HDP_FLUSH_DONE);
304}
305
306static u32 nbio_v2_3_get_pcie_index_offset(struct amdgpu_device *adev)
307{
308 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
309}
310
311static u32 nbio_v2_3_get_pcie_data_offset(struct amdgpu_device *adev)
312{
313 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
314}
315
316const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
317 .ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK,
318 .ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK,
319 .ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK,
320 .ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK,
321 .ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK,
322 .ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK,
323 .ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK,
324 .ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK,
325 .ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK,
326 .ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK,
327 .ref_and_mask_sdma0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
328 .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
329};
330
331static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
332{
333 uint32_t def, data;
334
335 def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL);
336 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
337 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
338
339 if (def != data)
340 WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
341
342 if (amdgpu_sriov_vf(adev))
343 adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
344 mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
345}
346
347#define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1
348#define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x00000009 // 1=1us, 9=1ms
349#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 4ms
350
351static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
352 bool enable)
353{
354 uint32_t def, data;
355
356 def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
357
358 if (enable) {
359 /* Disable ASPM L0s/L1 first */
360 data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK | PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
361
362 data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
363
364 if (pci_is_thunderbolt_attached(adev->pdev))
365 data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
366 else
367 data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
368
369 data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
370 } else {
371 /* Disbale ASPM L1 */
372 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
373 /* Disable ASPM TxL0s */
374 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
375 /* Disable ACPI L1 */
376 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
377 }
378
379 if (def != data)
380 WREG32_PCIE(smnPCIE_LC_CNTL, data);
381}
382
383#ifdef CONFIG_PCIEASPM
384static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
385{
386 uint32_t def, data;
387
388 WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
389
390 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2);
391 data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
392 if (def != data)
393 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2, data);
394
395 def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
396 data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
397 if (def != data)
398 WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
399
400 def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
401 data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
402 if (def != data)
403 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
404}
405#endif
406
407static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
408{
409#ifdef CONFIG_PCIEASPM
410 uint32_t def, data;
411
412 def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
413 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
414 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
415 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
416 if (def != data)
417 WREG32_PCIE(smnPCIE_LC_CNTL, data);
418
419 def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
420 data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
421 if (def != data)
422 WREG32_PCIE(smnPCIE_LC_CNTL7, data);
423
424 def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
425 data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
426 if (def != data)
427 WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
428
429 def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
430 data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
431 if (def != data)
432 WREG32_PCIE(smnPCIE_LC_CNTL3, data);
433
434 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
435 data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
436 data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
437 if (def != data)
438 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
439
440 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
441 data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
442 if (def != data)
443 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
444
445 def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
446 data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
447 if (def != data)
448 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
449
450 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
451
452 def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
453 data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
454 PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
455 data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
456 if (def != data)
457 WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
458
459 def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
460 data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
461 PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
462 if (def != data)
463 WREG32_PCIE(smnPCIE_LC_CNTL6, data);
464
465 /* Don't bother about LTR if LTR is not enabled
466 * in the path */
467 if (adev->pdev->ltr_path)
468 nbio_v2_3_program_ltr(adev);
469
470 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
471 data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
472 data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
473 if (def != data)
474 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
475
476 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
477 data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
478 if (def != data)
479 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
480
481 def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
482 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
483 data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
484 data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
485 if (def != data)
486 WREG32_PCIE(smnPCIE_LC_CNTL, data);
487
488 def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
489 data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
490 if (def != data)
491 WREG32_PCIE(smnPCIE_LC_CNTL3, data);
492#endif
493}
494
495static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
496{
497 uint32_t reg_data = 0;
498 uint32_t link_width = 0;
499
500 if (!((adev->asic_type >= CHIP_NAVI10) &&
501 (adev->asic_type <= CHIP_NAVI12)))
502 return;
503
504 reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL);
505 link_width = (reg_data & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
506 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
507
508 /*
509 * Program PCIE_LC_CNTL6.LC_SPC_MODE_8GT to 0x2 (4 symbols per clock data)
510 * if link_width is 0x3 (x4)
511 */
512 if (0x3 == link_width) {
513 reg_data = RREG32_PCIE(smnPCIE_LC_CNTL6);
514 reg_data &= ~PCIE_LC_CNTL6__LC_SPC_MODE_8GT_MASK;
515 reg_data |= (0x2 << PCIE_LC_CNTL6__LC_SPC_MODE_8GT__SHIFT);
516 WREG32_PCIE(smnPCIE_LC_CNTL6, reg_data);
517 }
518}
519
520static void nbio_v2_3_apply_l1_link_width_reconfig_wa(struct amdgpu_device *adev)
521{
522 uint32_t reg_data = 0;
523
524 if (adev->asic_type != CHIP_NAVI10)
525 return;
526
527 reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL);
528 reg_data |= PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN_MASK;
529 WREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL, reg_data);
530}
531
532static void nbio_v2_3_clear_doorbell_interrupt(struct amdgpu_device *adev)
533{
534 uint32_t reg, reg_data;
535
536 if (adev->ip_versions[NBIO_HWIP][0] != IP_VERSION(3, 3, 0))
537 return;
538
539 reg = RREG32_SOC15(NBIO, 0, mmBIF_RB_CNTL);
540
541 /* Clear Interrupt Status
542 */
543 if ((reg & BIF_RB_CNTL__RB_ENABLE_MASK) == 0) {
544 reg = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
545 if (reg & BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS_MASK) {
546 reg_data = 1 << BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR__SHIFT;
547 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, reg_data);
548 }
549 }
550}
551
552const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
553 .get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
554 .get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
555 .get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
556 .get_pcie_data_offset = nbio_v2_3_get_pcie_data_offset,
557 .get_rev_id = nbio_v2_3_get_rev_id,
558 .mc_access_enable = nbio_v2_3_mc_access_enable,
559 .get_memsize = nbio_v2_3_get_memsize,
560 .sdma_doorbell_range = nbio_v2_3_sdma_doorbell_range,
561 .vcn_doorbell_range = nbio_v2_3_vcn_doorbell_range,
562 .enable_doorbell_aperture = nbio_v2_3_enable_doorbell_aperture,
563 .enable_doorbell_selfring_aperture = nbio_v2_3_enable_doorbell_selfring_aperture,
564 .ih_doorbell_range = nbio_v2_3_ih_doorbell_range,
565 .update_medium_grain_clock_gating = nbio_v2_3_update_medium_grain_clock_gating,
566 .update_medium_grain_light_sleep = nbio_v2_3_update_medium_grain_light_sleep,
567 .get_clockgating_state = nbio_v2_3_get_clockgating_state,
568 .ih_control = nbio_v2_3_ih_control,
569 .init_registers = nbio_v2_3_init_registers,
570 .remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
571 .enable_aspm = nbio_v2_3_enable_aspm,
572 .program_aspm = nbio_v2_3_program_aspm,
573 .apply_lc_spc_mode_wa = nbio_v2_3_apply_lc_spc_mode_wa,
574 .apply_l1_link_width_reconfig_wa = nbio_v2_3_apply_l1_link_width_reconfig_wa,
575 .clear_doorbell_interrupt = nbio_v2_3_clear_doorbell_interrupt,
576};