Loading...
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu.h"
24#include "amdgpu_atombios.h"
25#include "nbio_v7_4.h"
26#include "amdgpu_ras.h"
27
28#include "nbio/nbio_7_4_offset.h"
29#include "nbio/nbio_7_4_sh_mask.h"
30#include "nbio/nbio_7_4_0_smn.h"
31#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
32#include <uapi/linux/kfd_ioctl.h>
33
34#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
35
36/*
37 * These are nbio v7_4_1 registers mask. Temporarily define these here since
38 * nbio v7_4_1 header is incomplete.
39 */
40#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
41#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
42#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
43#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
44#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
45#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
46
47#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc
48#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
49//BIF_MMSCH1_DOORBELL_RANGE
50#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET__SHIFT 0x2
51#define BIF_MMSCH1_DOORBELL_RANGE__SIZE__SHIFT 0x10
52#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
53#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
54
55static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
56 void *ras_error_status);
57
58static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
59{
60 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
61 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
62 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
63 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
64}
65
66static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
67{
68 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
69
70 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
71 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
72
73 return tmp;
74}
75
76static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
77{
78 if (enable)
79 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
80 BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
81 else
82 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
83}
84
85static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev,
86 struct amdgpu_ring *ring)
87{
88 if (!ring || !ring->funcs->emit_wreg)
89 WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
90 else
91 amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
92}
93
94static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
95{
96 return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
97}
98
99static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
100 bool use_doorbell, int doorbell_index, int doorbell_size)
101{
102 u32 reg, doorbell_range;
103
104 if (instance < 2)
105 reg = instance +
106 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
107 else
108 /*
109 * These registers address of SDMA2~7 is not consecutive
110 * from SDMA0~1. Need plus 4 dwords offset.
111 *
112 * BIF_SDMA0_DOORBELL_RANGE: 0x3bc0
113 * BIF_SDMA1_DOORBELL_RANGE: 0x3bc4
114 * BIF_SDMA2_DOORBELL_RANGE: 0x3bd8
115 */
116 reg = instance + 0x4 +
117 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
118
119 doorbell_range = RREG32(reg);
120
121 if (use_doorbell) {
122 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
123 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
124 } else
125 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
126
127 WREG32(reg, doorbell_range);
128}
129
130static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
131 int doorbell_index, int instance)
132{
133 u32 reg;
134 u32 doorbell_range;
135
136 if (instance)
137 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE);
138 else
139 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
140
141 doorbell_range = RREG32(reg);
142
143 if (use_doorbell) {
144 doorbell_range = REG_SET_FIELD(doorbell_range,
145 BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
146 doorbell_index);
147 doorbell_range = REG_SET_FIELD(doorbell_range,
148 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
149 } else
150 doorbell_range = REG_SET_FIELD(doorbell_range,
151 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
152
153 WREG32(reg, doorbell_range);
154}
155
156static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
157 bool enable)
158{
159 WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
160}
161
162static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
163 bool enable)
164{
165 u32 tmp = 0;
166
167 if (enable) {
168 tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
169 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
170 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
171
172 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
173 lower_32_bits(adev->doorbell.base));
174 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
175 upper_32_bits(adev->doorbell.base));
176 }
177
178 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
179}
180
181static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
182 bool use_doorbell, int doorbell_index)
183{
184 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
185
186 if (use_doorbell) {
187 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
188 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4);
189 } else
190 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
191
192 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
193}
194
195
196static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
197 bool enable)
198{
199 //TODO: Add support for v7.4
200}
201
202static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
203 bool enable)
204{
205 uint32_t def, data;
206
207 def = data = RREG32_PCIE(smnPCIE_CNTL2);
208 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
209 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
210 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
211 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
212 } else {
213 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
214 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
215 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
216 }
217
218 if (def != data)
219 WREG32_PCIE(smnPCIE_CNTL2, data);
220}
221
222static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev,
223 u32 *flags)
224{
225 int data;
226
227 /* AMD_CG_SUPPORT_BIF_MGCG */
228 data = RREG32_PCIE(smnCPM_CONTROL);
229 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
230 *flags |= AMD_CG_SUPPORT_BIF_MGCG;
231
232 /* AMD_CG_SUPPORT_BIF_LS */
233 data = RREG32_PCIE(smnPCIE_CNTL2);
234 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
235 *flags |= AMD_CG_SUPPORT_BIF_LS;
236}
237
238static void nbio_v7_4_ih_control(struct amdgpu_device *adev)
239{
240 u32 interrupt_cntl;
241
242 /* setup interrupt control */
243 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
244 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
245 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
246 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
247 */
248 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
249 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
250 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
251 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
252}
253
254static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev)
255{
256 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
257}
258
259static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev)
260{
261 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
262}
263
264static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev)
265{
266 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
267}
268
269static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
270{
271 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
272}
273
274const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
275 .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
276 .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
277 .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
278 .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
279 .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
280 .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
281 .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
282 .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
283 .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
284 .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
285 .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
286 .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
287 .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK,
288 .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
289 .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
290 .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
291 .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
292 .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
293};
294
295static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
296{
297
298}
299
300static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
301{
302 uint32_t bif_doorbell_intr_cntl;
303 struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
304 struct ras_err_data err_data = {0, 0, 0, NULL};
305
306 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
307 if (REG_GET_FIELD(bif_doorbell_intr_cntl,
308 BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
309 /* driver has to clear the interrupt status when bif ring is disabled */
310 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
311 BIF_DOORBELL_INT_CNTL,
312 RAS_CNTLR_INTERRUPT_CLEAR, 1);
313 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
314
315 /*
316 * clear error status after ras_controller_intr according to
317 * hw team and count ue number for query
318 */
319 nbio_v7_4_query_ras_error_count(adev, &err_data);
320
321 /* logging on error counter and printing for awareness */
322 obj->err_data.ue_count += err_data.ue_count;
323 obj->err_data.ce_count += err_data.ce_count;
324
325 if (err_data.ce_count)
326 dev_info(adev->dev, "%ld correctable hardware "
327 "errors detected in %s block, "
328 "no user action is needed.\n",
329 obj->err_data.ce_count,
330 adev->nbio.ras_if->name);
331
332 if (err_data.ue_count)
333 dev_info(adev->dev, "%ld uncorrectable hardware "
334 "errors detected in %s block\n",
335 obj->err_data.ue_count,
336 adev->nbio.ras_if->name);
337
338 dev_info(adev->dev, "RAS controller interrupt triggered "
339 "by NBIF error\n");
340
341 /* ras_controller_int is dedicated for nbif ras error,
342 * not the global interrupt for sync flood
343 */
344 amdgpu_ras_reset_gpu(adev);
345 }
346}
347
348static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
349{
350 uint32_t bif_doorbell_intr_cntl;
351
352 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
353 if (REG_GET_FIELD(bif_doorbell_intr_cntl,
354 BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
355 /* driver has to clear the interrupt status when bif ring is disabled */
356 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
357 BIF_DOORBELL_INT_CNTL,
358 RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
359 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
360
361 amdgpu_ras_global_ras_isr(adev);
362 }
363}
364
365
366static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
367 struct amdgpu_irq_src *src,
368 unsigned type,
369 enum amdgpu_interrupt_state state)
370{
371 /* The ras_controller_irq enablement should be done in psp bl when it
372 * tries to enable ras feature. Driver only need to set the correct interrupt
373 * vector for bare-metal and sriov use case respectively
374 */
375 uint32_t bif_intr_cntl;
376
377 bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
378 if (state == AMDGPU_IRQ_STATE_ENABLE) {
379 /* set interrupt vector select bit to 0 to select
380 * vetcor 1 for bare metal case */
381 bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
382 BIF_INTR_CNTL,
383 RAS_INTR_VEC_SEL, 0);
384 WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
385 }
386
387 return 0;
388}
389
390static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev,
391 struct amdgpu_irq_src *source,
392 struct amdgpu_iv_entry *entry)
393{
394 /* By design, the ih cookie for ras_controller_irq should be written
395 * to BIFring instead of general iv ring. However, due to known bif ring
396 * hw bug, it has to be disabled. There is no chance the process function
397 * will be involked. Just left it as a dummy one.
398 */
399 return 0;
400}
401
402static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
403 struct amdgpu_irq_src *src,
404 unsigned type,
405 enum amdgpu_interrupt_state state)
406{
407 /* The ras_controller_irq enablement should be done in psp bl when it
408 * tries to enable ras feature. Driver only need to set the correct interrupt
409 * vector for bare-metal and sriov use case respectively
410 */
411 uint32_t bif_intr_cntl;
412
413 bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
414 if (state == AMDGPU_IRQ_STATE_ENABLE) {
415 /* set interrupt vector select bit to 0 to select
416 * vetcor 1 for bare metal case */
417 bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
418 BIF_INTR_CNTL,
419 RAS_INTR_VEC_SEL, 0);
420 WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
421 }
422
423 return 0;
424}
425
426static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev,
427 struct amdgpu_irq_src *source,
428 struct amdgpu_iv_entry *entry)
429{
430 /* By design, the ih cookie for err_event_athub_irq should be written
431 * to BIFring instead of general iv ring. However, due to known bif ring
432 * hw bug, it has to be disabled. There is no chance the process function
433 * will be involked. Just left it as a dummy one.
434 */
435 return 0;
436}
437
438static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = {
439 .set = nbio_v7_4_set_ras_controller_irq_state,
440 .process = nbio_v7_4_process_ras_controller_irq,
441};
442
443static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = {
444 .set = nbio_v7_4_set_ras_err_event_athub_irq_state,
445 .process = nbio_v7_4_process_err_event_athub_irq,
446};
447
448static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
449{
450 int r;
451
452 /* init the irq funcs */
453 adev->nbio.ras_controller_irq.funcs =
454 &nbio_v7_4_ras_controller_irq_funcs;
455 adev->nbio.ras_controller_irq.num_types = 1;
456
457 /* register ras controller interrupt */
458 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
459 NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
460 &adev->nbio.ras_controller_irq);
461
462 return r;
463}
464
465static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
466{
467
468 int r;
469
470 /* init the irq funcs */
471 adev->nbio.ras_err_event_athub_irq.funcs =
472 &nbio_v7_4_ras_err_event_athub_irq_funcs;
473 adev->nbio.ras_err_event_athub_irq.num_types = 1;
474
475 /* register ras err event athub interrupt */
476 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
477 NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
478 &adev->nbio.ras_err_event_athub_irq);
479
480 return r;
481}
482
483#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
484
485static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
486 void *ras_error_status)
487{
488 uint32_t global_sts, central_sts, int_eoi, parity_sts;
489 uint32_t corr, fatal, non_fatal;
490 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
491
492 global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
493 corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
494 fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
495 non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
496 ParityErrNonFatal);
497 parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
498
499 if (corr)
500 err_data->ce_count++;
501 if (fatal)
502 err_data->ue_count++;
503
504 if (corr || fatal || non_fatal) {
505 central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
506 /* clear error status register */
507 WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
508
509 if (fatal)
510 /* clear parity fatal error indication field */
511 WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2,
512 parity_sts);
513
514 if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
515 BIFL_RasContller_Intr_Recv)) {
516 /* clear interrupt status register */
517 WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts);
518 int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI);
519 int_eoi = REG_SET_FIELD(int_eoi,
520 IOHC_INTERRUPT_EOI, SMI_EOI, 1);
521 WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi);
522 }
523 }
524}
525
526static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
527 bool enable)
528{
529 WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
530 DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
531}
532
533const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
534 .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
535 .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
536 .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
537 .get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
538 .get_rev_id = nbio_v7_4_get_rev_id,
539 .mc_access_enable = nbio_v7_4_mc_access_enable,
540 .hdp_flush = nbio_v7_4_hdp_flush,
541 .get_memsize = nbio_v7_4_get_memsize,
542 .sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
543 .vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range,
544 .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
545 .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
546 .ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
547 .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt,
548 .update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
549 .update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
550 .get_clockgating_state = nbio_v7_4_get_clockgating_state,
551 .ih_control = nbio_v7_4_ih_control,
552 .init_registers = nbio_v7_4_init_registers,
553 .remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
554 .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
555 .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
556 .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
557 .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
558 .query_ras_error_count = nbio_v7_4_query_ras_error_count,
559 .ras_late_init = amdgpu_nbio_ras_late_init,
560};
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu.h"
24#include "amdgpu_atombios.h"
25#include "nbio_v7_4.h"
26#include "amdgpu_ras.h"
27
28#include "nbio/nbio_7_4_offset.h"
29#include "nbio/nbio_7_4_sh_mask.h"
30#include "nbio/nbio_7_4_0_smn.h"
31#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
32#include <uapi/linux/kfd_ioctl.h>
33
34#define smnPCIE_LC_CNTL 0x11140280
35#define smnPCIE_LC_CNTL3 0x111402d4
36#define smnPCIE_LC_CNTL6 0x111402ec
37#define smnPCIE_LC_CNTL7 0x111402f0
38#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
39#define smnRCC_BIF_STRAP3 0x1012348c
40#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
41#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
42#define smnRCC_BIF_STRAP5 0x10123494
43#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
44#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
45#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
46#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
47#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
48#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538
49#define smnRCC_BIF_STRAP2 0x10123488
50#define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
51#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
52#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
53#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
54
55/*
56 * These are nbio v7_4_1 registers mask. Temporarily define these here since
57 * nbio v7_4_1 header is incomplete.
58 */
59#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */
60#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
61#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
62#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
63#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
64#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
65#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
66#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
67#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
68
69#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc
70#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
71//BIF_MMSCH1_DOORBELL_RANGE
72#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET__SHIFT 0x2
73#define BIF_MMSCH1_DOORBELL_RANGE__SIZE__SHIFT 0x10
74#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
75#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
76
77#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
78#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
79
80#define mmBIF_MMSCH1_DOORBELL_RANGE_ALDE 0x01d8
81#define mmBIF_MMSCH1_DOORBELL_RANGE_ALDE_BASE_IDX 2
82//BIF_MMSCH1_DOORBELL_ALDE_RANGE
83#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__OFFSET__SHIFT 0x2
84#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__SIZE__SHIFT 0x10
85#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__OFFSET_MASK 0x00000FFCL
86#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__SIZE_MASK 0x001F0000L
87
88#define mmRCC_DEV0_EPF0_STRAP0_ALDE 0x0015
89#define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX 2
90
91#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x00fe
92#define mmBIF_DOORBELL_INT_CNTL_ALDE_BASE_IDX 2
93#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
94#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
95
96#define mmBIF_INTR_CNTL_ALDE 0x0101
97#define mmBIF_INTR_CNTL_ALDE_BASE_IDX 2
98
99static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
100 void *ras_error_status);
101
102static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
103{
104 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
105 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
106 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
107 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
108}
109
110static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
111{
112 u32 tmp;
113
114 if (adev->asic_type == CHIP_ALDEBARAN)
115 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_ALDE);
116 else
117 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
118
119 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
120 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
121
122 return tmp;
123}
124
125static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
126{
127 if (enable)
128 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
129 BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
130 else
131 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
132}
133
134static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
135{
136 return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
137}
138
139static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
140 bool use_doorbell, int doorbell_index, int doorbell_size)
141{
142 u32 reg, doorbell_range;
143
144 if (instance < 2) {
145 reg = instance +
146 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
147 } else {
148 /*
149 * These registers address of SDMA2~7 is not consecutive
150 * from SDMA0~1. Need plus 4 dwords offset.
151 *
152 * BIF_SDMA0_DOORBELL_RANGE: 0x3bc0
153 * BIF_SDMA1_DOORBELL_RANGE: 0x3bc4
154 * BIF_SDMA2_DOORBELL_RANGE: 0x3bd8
155+ * BIF_SDMA4_DOORBELL_RANGE:
156+ * ARCTURUS: 0x3be0
157+ * ALDEBARAN: 0x3be4
158 */
159 if (adev->asic_type == CHIP_ALDEBARAN && instance == 4)
160 reg = instance + 0x4 + 0x1 +
161 SOC15_REG_OFFSET(NBIO, 0,
162 mmBIF_SDMA0_DOORBELL_RANGE);
163 else
164 reg = instance + 0x4 +
165 SOC15_REG_OFFSET(NBIO, 0,
166 mmBIF_SDMA0_DOORBELL_RANGE);
167 }
168
169 doorbell_range = RREG32(reg);
170
171 if (use_doorbell) {
172 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
173 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
174 } else
175 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
176
177 WREG32(reg, doorbell_range);
178}
179
180static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
181 int doorbell_index, int instance)
182{
183 u32 reg;
184 u32 doorbell_range;
185
186 if (instance) {
187 if (adev->asic_type == CHIP_ALDEBARAN)
188 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE_ALDE);
189 else
190 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE);
191 } else
192 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
193
194 doorbell_range = RREG32(reg);
195
196 if (use_doorbell) {
197 doorbell_range = REG_SET_FIELD(doorbell_range,
198 BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
199 doorbell_index);
200 doorbell_range = REG_SET_FIELD(doorbell_range,
201 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
202 } else
203 doorbell_range = REG_SET_FIELD(doorbell_range,
204 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
205
206 WREG32(reg, doorbell_range);
207}
208
209static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
210 bool enable)
211{
212 WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
213}
214
215static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
216 bool enable)
217{
218 u32 tmp = 0;
219
220 if (enable) {
221 tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
222 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
223 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
224
225 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
226 lower_32_bits(adev->doorbell.base));
227 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
228 upper_32_bits(adev->doorbell.base));
229 }
230
231 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
232}
233
234static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
235 bool use_doorbell, int doorbell_index)
236{
237 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
238
239 if (use_doorbell) {
240 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
241 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 8);
242 } else
243 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
244
245 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
246}
247
248
249static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
250 bool enable)
251{
252 //TODO: Add support for v7.4
253}
254
255static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
256 bool enable)
257{
258 uint32_t def, data;
259
260 def = data = RREG32_PCIE(smnPCIE_CNTL2);
261 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
262 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
263 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
264 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
265 } else {
266 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
267 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
268 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
269 }
270
271 if (def != data)
272 WREG32_PCIE(smnPCIE_CNTL2, data);
273}
274
275static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev,
276 u64 *flags)
277{
278 int data;
279
280 /* AMD_CG_SUPPORT_BIF_MGCG */
281 data = RREG32_PCIE(smnCPM_CONTROL);
282 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
283 *flags |= AMD_CG_SUPPORT_BIF_MGCG;
284
285 /* AMD_CG_SUPPORT_BIF_LS */
286 data = RREG32_PCIE(smnPCIE_CNTL2);
287 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
288 *flags |= AMD_CG_SUPPORT_BIF_LS;
289}
290
291static void nbio_v7_4_ih_control(struct amdgpu_device *adev)
292{
293 u32 interrupt_cntl;
294
295 /* setup interrupt control */
296 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
297 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
298 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
299 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
300 */
301 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
302 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
303 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
304 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
305}
306
307static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev)
308{
309 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
310}
311
312static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev)
313{
314 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
315}
316
317static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev)
318{
319 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
320}
321
322static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
323{
324 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
325}
326
327const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
328 .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
329 .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
330 .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
331 .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
332 .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
333 .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
334 .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
335 .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
336 .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
337 .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
338 .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
339 .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
340};
341
342static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
343{
344 uint32_t baco_cntl;
345
346 if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 4, 4) &&
347 !amdgpu_sriov_vf(adev)) {
348 baco_cntl = RREG32_SOC15(NBIO, 0, mmBACO_CNTL);
349 if (baco_cntl &
350 (BACO_CNTL__BACO_DUMMY_EN_MASK | BACO_CNTL__BACO_EN_MASK)) {
351 baco_cntl &= ~(BACO_CNTL__BACO_DUMMY_EN_MASK |
352 BACO_CNTL__BACO_EN_MASK);
353 dev_dbg(adev->dev, "Unsetting baco dummy mode %x",
354 baco_cntl);
355 WREG32_SOC15(NBIO, 0, mmBACO_CNTL, baco_cntl);
356 }
357 }
358}
359
360static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
361{
362 uint32_t bif_doorbell_intr_cntl;
363 struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
364 struct ras_err_data err_data;
365 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
366
367 if (amdgpu_ras_error_data_init(&err_data))
368 return;
369
370 if (adev->asic_type == CHIP_ALDEBARAN)
371 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
372 else
373 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
374
375 if (REG_GET_FIELD(bif_doorbell_intr_cntl,
376 BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
377 /* driver has to clear the interrupt status when bif ring is disabled */
378 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
379 BIF_DOORBELL_INT_CNTL,
380 RAS_CNTLR_INTERRUPT_CLEAR, 1);
381 if (adev->asic_type == CHIP_ALDEBARAN)
382 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
383 else
384 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
385
386 if (ras && !ras->disable_ras_err_cnt_harvest && obj) {
387 /*
388 * clear error status after ras_controller_intr
389 * according to hw team and count ue number
390 * for query
391 */
392 nbio_v7_4_query_ras_error_count(adev, &err_data);
393
394 /* logging on error cnt and printing for awareness */
395 obj->err_data.ue_count += err_data.ue_count;
396 obj->err_data.ce_count += err_data.ce_count;
397
398 if (err_data.ce_count)
399 dev_info(adev->dev, "%ld correctable hardware "
400 "errors detected in %s block\n",
401 obj->err_data.ce_count,
402 get_ras_block_str(adev->nbio.ras_if));
403
404 if (err_data.ue_count)
405 dev_info(adev->dev, "%ld uncorrectable hardware "
406 "errors detected in %s block\n",
407 obj->err_data.ue_count,
408 get_ras_block_str(adev->nbio.ras_if));
409 }
410
411 dev_info(adev->dev, "RAS controller interrupt triggered "
412 "by NBIF error\n");
413
414 /* ras_controller_int is dedicated for nbif ras error,
415 * not the global interrupt for sync flood
416 */
417 amdgpu_ras_global_ras_isr(adev);
418 }
419
420 amdgpu_ras_error_data_fini(&err_data);
421}
422
423static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
424{
425 uint32_t bif_doorbell_intr_cntl;
426
427 if (adev->asic_type == CHIP_ALDEBARAN)
428 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
429 else
430 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
431
432 if (REG_GET_FIELD(bif_doorbell_intr_cntl,
433 BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
434 /* driver has to clear the interrupt status when bif ring is disabled */
435 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
436 BIF_DOORBELL_INT_CNTL,
437 RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
438
439 if (adev->asic_type == CHIP_ALDEBARAN)
440 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
441 else
442 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
443
444 amdgpu_ras_global_ras_isr(adev);
445 }
446}
447
448
449static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
450 struct amdgpu_irq_src *src,
451 unsigned type,
452 enum amdgpu_interrupt_state state)
453{
454 /* The ras_controller_irq enablement should be done in psp bl when it
455 * tries to enable ras feature. Driver only need to set the correct interrupt
456 * vector for bare-metal and sriov use case respectively
457 */
458 uint32_t bif_intr_cntl;
459
460 if (adev->asic_type == CHIP_ALDEBARAN)
461 bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
462 else
463 bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
464
465 if (state == AMDGPU_IRQ_STATE_ENABLE) {
466 /* set interrupt vector select bit to 0 to select
467 * vetcor 1 for bare metal case */
468 bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
469 BIF_INTR_CNTL,
470 RAS_INTR_VEC_SEL, 0);
471
472 if (adev->asic_type == CHIP_ALDEBARAN)
473 WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
474 else
475 WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
476
477 }
478
479 return 0;
480}
481
482static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev,
483 struct amdgpu_irq_src *source,
484 struct amdgpu_iv_entry *entry)
485{
486 /* By design, the ih cookie for ras_controller_irq should be written
487 * to BIFring instead of general iv ring. However, due to known bif ring
488 * hw bug, it has to be disabled. There is no chance the process function
489 * will be involked. Just left it as a dummy one.
490 */
491 return 0;
492}
493
494static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
495 struct amdgpu_irq_src *src,
496 unsigned type,
497 enum amdgpu_interrupt_state state)
498{
499 /* The ras_controller_irq enablement should be done in psp bl when it
500 * tries to enable ras feature. Driver only need to set the correct interrupt
501 * vector for bare-metal and sriov use case respectively
502 */
503 uint32_t bif_intr_cntl;
504
505 if (adev->asic_type == CHIP_ALDEBARAN)
506 bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
507 else
508 bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
509
510 if (state == AMDGPU_IRQ_STATE_ENABLE) {
511 /* set interrupt vector select bit to 0 to select
512 * vetcor 1 for bare metal case */
513 bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
514 BIF_INTR_CNTL,
515 RAS_INTR_VEC_SEL, 0);
516
517 if (adev->asic_type == CHIP_ALDEBARAN)
518 WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
519 else
520 WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
521 }
522
523 return 0;
524}
525
526static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev,
527 struct amdgpu_irq_src *source,
528 struct amdgpu_iv_entry *entry)
529{
530 /* By design, the ih cookie for err_event_athub_irq should be written
531 * to BIFring instead of general iv ring. However, due to known bif ring
532 * hw bug, it has to be disabled. There is no chance the process function
533 * will be involked. Just left it as a dummy one.
534 */
535 return 0;
536}
537
538static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = {
539 .set = nbio_v7_4_set_ras_controller_irq_state,
540 .process = nbio_v7_4_process_ras_controller_irq,
541};
542
543static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = {
544 .set = nbio_v7_4_set_ras_err_event_athub_irq_state,
545 .process = nbio_v7_4_process_err_event_athub_irq,
546};
547
548static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
549{
550 int r;
551
552 /* init the irq funcs */
553 adev->nbio.ras_controller_irq.funcs =
554 &nbio_v7_4_ras_controller_irq_funcs;
555 adev->nbio.ras_controller_irq.num_types = 1;
556
557 /* register ras controller interrupt */
558 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
559 NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
560 &adev->nbio.ras_controller_irq);
561
562 return r;
563}
564
565static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
566{
567
568 int r;
569
570 /* init the irq funcs */
571 adev->nbio.ras_err_event_athub_irq.funcs =
572 &nbio_v7_4_ras_err_event_athub_irq_funcs;
573 adev->nbio.ras_err_event_athub_irq.num_types = 1;
574
575 /* register ras err event athub interrupt */
576 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
577 NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
578 &adev->nbio.ras_err_event_athub_irq);
579
580 return r;
581}
582
583#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
584#define smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE 0x13b20030
585#define smnRAS_GLOBAL_STATUS_LO_ALDE 0x13b20020
586
587static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
588 void *ras_error_status)
589{
590 uint32_t global_sts, central_sts, int_eoi, parity_sts;
591 uint32_t corr, fatal, non_fatal;
592 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
593
594 if (adev->asic_type == CHIP_ALDEBARAN)
595 global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE);
596 else
597 global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
598
599 corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
600 fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
601 non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
602 ParityErrNonFatal);
603
604 if (adev->asic_type == CHIP_ALDEBARAN)
605 parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE);
606 else
607 parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
608
609 if (corr)
610 err_data->ce_count++;
611 if (fatal)
612 err_data->ue_count++;
613
614 if (corr || fatal || non_fatal) {
615 central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
616
617 /* clear error status register */
618 if (adev->asic_type == CHIP_ALDEBARAN)
619 WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE, global_sts);
620 else
621 WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
622
623 if (fatal)
624 {
625 /* clear parity fatal error indication field */
626 if (adev->asic_type == CHIP_ALDEBARAN)
627 WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE, parity_sts);
628 else
629 WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, parity_sts);
630 }
631
632 if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
633 BIFL_RasContller_Intr_Recv)) {
634 /* clear interrupt status register */
635 WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts);
636 int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI);
637 int_eoi = REG_SET_FIELD(int_eoi,
638 IOHC_INTERRUPT_EOI, SMI_EOI, 1);
639 WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi);
640 }
641 }
642}
643
644static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
645 bool enable)
646{
647 if (adev->asic_type == CHIP_ALDEBARAN)
648 WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL_ALDE,
649 DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
650 else
651 WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
652 DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
653}
654
655const struct amdgpu_ras_block_hw_ops nbio_v7_4_ras_hw_ops = {
656 .query_ras_error_count = nbio_v7_4_query_ras_error_count,
657};
658
659struct amdgpu_nbio_ras nbio_v7_4_ras = {
660 .ras_block = {
661 .ras_comm = {
662 .name = "pcie_bif",
663 .block = AMDGPU_RAS_BLOCK__PCIE_BIF,
664 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
665 },
666 .hw_ops = &nbio_v7_4_ras_hw_ops,
667 .ras_late_init = amdgpu_nbio_ras_late_init,
668 },
669 .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
670 .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
671 .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
672 .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
673};
674
675
676#ifdef CONFIG_PCIEASPM
677static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
678{
679 uint32_t def, data;
680
681 WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
682
683 def = data = RREG32_PCIE(smnRCC_BIF_STRAP2);
684 data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
685 if (def != data)
686 WREG32_PCIE(smnRCC_BIF_STRAP2, data);
687
688 def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
689 data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
690 if (def != data)
691 WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
692
693 def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
694 data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
695 if (def != data)
696 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
697}
698#endif
699
700static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
701{
702#ifdef CONFIG_PCIEASPM
703 uint32_t def, data;
704
705 if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 4, 4))
706 return;
707
708 def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
709 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
710 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
711 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
712 if (def != data)
713 WREG32_PCIE(smnPCIE_LC_CNTL, data);
714
715 def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
716 data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
717 if (def != data)
718 WREG32_PCIE(smnPCIE_LC_CNTL7, data);
719
720 def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
721 data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
722 if (def != data)
723 WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
724
725 def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
726 data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
727 if (def != data)
728 WREG32_PCIE(smnPCIE_LC_CNTL3, data);
729
730 def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
731 data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
732 data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
733 if (def != data)
734 WREG32_PCIE(smnRCC_BIF_STRAP3, data);
735
736 def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
737 data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
738 if (def != data)
739 WREG32_PCIE(smnRCC_BIF_STRAP5, data);
740
741 def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
742 data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
743 if (def != data)
744 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
745
746 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
747
748 def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
749 data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
750 PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
751 data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
752 if (def != data)
753 WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
754
755 def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
756 data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
757 PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
758 if (def != data)
759 WREG32_PCIE(smnPCIE_LC_CNTL6, data);
760
761 /* Don't bother about LTR if LTR is not enabled
762 * in the path */
763 if (adev->pdev->ltr_path)
764 nbio_v7_4_program_ltr(adev);
765
766 def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
767 data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
768 data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
769 if (def != data)
770 WREG32_PCIE(smnRCC_BIF_STRAP3, data);
771
772 def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
773 data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
774 if (def != data)
775 WREG32_PCIE(smnRCC_BIF_STRAP5, data);
776
777 def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
778 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
779 data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
780 data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
781 if (def != data)
782 WREG32_PCIE(smnPCIE_LC_CNTL, data);
783
784 def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
785 data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
786 if (def != data)
787 WREG32_PCIE(smnPCIE_LC_CNTL3, data);
788#endif
789}
790
791#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
792
793static void nbio_v7_4_set_reg_remap(struct amdgpu_device *adev)
794{
795 if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) {
796 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
797 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
798 } else {
799 adev->rmmio_remap.reg_offset =
800 SOC15_REG_OFFSET(NBIO, 0,
801 mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
802 adev->rmmio_remap.bus_addr = 0;
803 }
804}
805
806const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
807 .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
808 .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
809 .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
810 .get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
811 .get_rev_id = nbio_v7_4_get_rev_id,
812 .mc_access_enable = nbio_v7_4_mc_access_enable,
813 .get_memsize = nbio_v7_4_get_memsize,
814 .sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
815 .vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range,
816 .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
817 .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
818 .ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
819 .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt,
820 .update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
821 .update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
822 .get_clockgating_state = nbio_v7_4_get_clockgating_state,
823 .ih_control = nbio_v7_4_ih_control,
824 .init_registers = nbio_v7_4_init_registers,
825 .remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
826 .program_aspm = nbio_v7_4_program_aspm,
827 .set_reg_remap = nbio_v7_4_set_reg_remap,
828};