Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include <drm/amdgpu_drm.h>
29
30#include "amdgpu.h"
31#include "amdgpu_atombios.h"
32#include "amdgpu_ih.h"
33#include "amdgpu_uvd.h"
34#include "amdgpu_vce.h"
35#include "amdgpu_ucode.h"
36#include "amdgpu_psp.h"
37#include "atom.h"
38#include "amd_pcie.h"
39
40#include "gc/gc_10_1_0_offset.h"
41#include "gc/gc_10_1_0_sh_mask.h"
42#include "mp/mp_11_0_offset.h"
43
44#include "soc15.h"
45#include "soc15_common.h"
46#include "gmc_v10_0.h"
47#include "gfxhub_v2_0.h"
48#include "mmhub_v2_0.h"
49#include "nbio_v2_3.h"
50#include "nbio_v7_2.h"
51#include "hdp_v5_0.h"
52#include "nv.h"
53#include "navi10_ih.h"
54#include "gfx_v10_0.h"
55#include "sdma_v5_0.h"
56#include "sdma_v5_2.h"
57#include "vcn_v2_0.h"
58#include "jpeg_v2_0.h"
59#include "vcn_v3_0.h"
60#include "jpeg_v3_0.h"
61#include "dce_virtual.h"
62#include "mes_v10_1.h"
63#include "mxgpu_nv.h"
64#include "smuio_v11_0.h"
65#include "smuio_v11_0_6.h"
66
67static const struct amd_ip_funcs nv_common_ip_funcs;
68
69/* Navi */
70static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
71{
72 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
73 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
74};
75
76static const struct amdgpu_video_codecs nv_video_codecs_encode =
77{
78 .codec_count = ARRAY_SIZE(nv_video_codecs_encode_array),
79 .codec_array = nv_video_codecs_encode_array,
80};
81
82/* Navi1x */
83static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
84{
85 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
86 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
87 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
88 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
89 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
90 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
91 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
92};
93
94static const struct amdgpu_video_codecs nv_video_codecs_decode =
95{
96 .codec_count = ARRAY_SIZE(nv_video_codecs_decode_array),
97 .codec_array = nv_video_codecs_decode_array,
98};
99
100/* Sienna Cichlid */
101static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
102{
103 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
104 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
105 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
106 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
107 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
108 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
109 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
110 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
111};
112
113static const struct amdgpu_video_codecs sc_video_codecs_decode =
114{
115 .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array),
116 .codec_array = sc_video_codecs_decode_array,
117};
118
119/* SRIOV Sienna Cichlid, not const since data is controlled by host */
120static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
121{
122 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
123 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
124};
125
126static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
127{
128 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
129 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
130 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
131 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
132 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
133 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
134 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
135 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
136};
137
138static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
139{
140 .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
141 .codec_array = sriov_sc_video_codecs_encode_array,
142};
143
144static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
145{
146 .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array),
147 .codec_array = sriov_sc_video_codecs_decode_array,
148};
149
150/* Beige Goby*/
151static const struct amdgpu_video_codec_info bg_video_codecs_decode_array[] = {
152 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
153 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
154 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
155};
156
157static const struct amdgpu_video_codecs bg_video_codecs_decode = {
158 .codec_count = ARRAY_SIZE(bg_video_codecs_decode_array),
159 .codec_array = bg_video_codecs_decode_array,
160};
161
162static const struct amdgpu_video_codecs bg_video_codecs_encode = {
163 .codec_count = 0,
164 .codec_array = NULL,
165};
166
167/* Yellow Carp*/
168static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
169 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
170 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
171 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
172 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
173};
174
175static const struct amdgpu_video_codecs yc_video_codecs_decode = {
176 .codec_count = ARRAY_SIZE(yc_video_codecs_decode_array),
177 .codec_array = yc_video_codecs_decode_array,
178};
179
180static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
181 const struct amdgpu_video_codecs **codecs)
182{
183 switch (adev->asic_type) {
184 case CHIP_SIENNA_CICHLID:
185 if (amdgpu_sriov_vf(adev)) {
186 if (encode)
187 *codecs = &sriov_sc_video_codecs_encode;
188 else
189 *codecs = &sriov_sc_video_codecs_decode;
190 } else {
191 if (encode)
192 *codecs = &nv_video_codecs_encode;
193 else
194 *codecs = &sc_video_codecs_decode;
195 }
196 return 0;
197 case CHIP_NAVY_FLOUNDER:
198 case CHIP_DIMGREY_CAVEFISH:
199 case CHIP_VANGOGH:
200 if (encode)
201 *codecs = &nv_video_codecs_encode;
202 else
203 *codecs = &sc_video_codecs_decode;
204 return 0;
205 case CHIP_YELLOW_CARP:
206 if (encode)
207 *codecs = &nv_video_codecs_encode;
208 else
209 *codecs = &yc_video_codecs_decode;
210 return 0;
211 case CHIP_BEIGE_GOBY:
212 if (encode)
213 *codecs = &bg_video_codecs_encode;
214 else
215 *codecs = &bg_video_codecs_decode;
216 return 0;
217 case CHIP_NAVI10:
218 case CHIP_NAVI14:
219 case CHIP_NAVI12:
220 if (encode)
221 *codecs = &nv_video_codecs_encode;
222 else
223 *codecs = &nv_video_codecs_decode;
224 return 0;
225 default:
226 return -EINVAL;
227 }
228}
229
230/*
231 * Indirect registers accessor
232 */
233static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
234{
235 unsigned long address, data;
236 address = adev->nbio.funcs->get_pcie_index_offset(adev);
237 data = adev->nbio.funcs->get_pcie_data_offset(adev);
238
239 return amdgpu_device_indirect_rreg(adev, address, data, reg);
240}
241
242static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
243{
244 unsigned long address, data;
245
246 address = adev->nbio.funcs->get_pcie_index_offset(adev);
247 data = adev->nbio.funcs->get_pcie_data_offset(adev);
248
249 amdgpu_device_indirect_wreg(adev, address, data, reg, v);
250}
251
252static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
253{
254 unsigned long address, data;
255 address = adev->nbio.funcs->get_pcie_index_offset(adev);
256 data = adev->nbio.funcs->get_pcie_data_offset(adev);
257
258 return amdgpu_device_indirect_rreg64(adev, address, data, reg);
259}
260
261static u32 nv_pcie_port_rreg(struct amdgpu_device *adev, u32 reg)
262{
263 unsigned long flags, address, data;
264 u32 r;
265 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
266 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
267
268 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
269 WREG32(address, reg * 4);
270 (void)RREG32(address);
271 r = RREG32(data);
272 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
273 return r;
274}
275
276static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
277{
278 unsigned long address, data;
279
280 address = adev->nbio.funcs->get_pcie_index_offset(adev);
281 data = adev->nbio.funcs->get_pcie_data_offset(adev);
282
283 amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
284}
285
286static void nv_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
287{
288 unsigned long flags, address, data;
289
290 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
291 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
292
293 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
294 WREG32(address, reg * 4);
295 (void)RREG32(address);
296 WREG32(data, v);
297 (void)RREG32(data);
298 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
299}
300
301static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
302{
303 unsigned long flags, address, data;
304 u32 r;
305
306 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
307 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
308
309 spin_lock_irqsave(&adev->didt_idx_lock, flags);
310 WREG32(address, (reg));
311 r = RREG32(data);
312 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
313 return r;
314}
315
316static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
317{
318 unsigned long flags, address, data;
319
320 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
321 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
322
323 spin_lock_irqsave(&adev->didt_idx_lock, flags);
324 WREG32(address, (reg));
325 WREG32(data, (v));
326 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
327}
328
329static u32 nv_get_config_memsize(struct amdgpu_device *adev)
330{
331 return adev->nbio.funcs->get_memsize(adev);
332}
333
334static u32 nv_get_xclk(struct amdgpu_device *adev)
335{
336 return adev->clock.spll.reference_freq;
337}
338
339
340void nv_grbm_select(struct amdgpu_device *adev,
341 u32 me, u32 pipe, u32 queue, u32 vmid)
342{
343 u32 grbm_gfx_cntl = 0;
344 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
345 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
346 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
347 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
348
349 WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
350}
351
352static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
353{
354 /* todo */
355}
356
357static bool nv_read_disabled_bios(struct amdgpu_device *adev)
358{
359 /* todo */
360 return false;
361}
362
363static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
364 u8 *bios, u32 length_bytes)
365{
366 u32 *dw_ptr;
367 u32 i, length_dw;
368 u32 rom_index_offset, rom_data_offset;
369
370 if (bios == NULL)
371 return false;
372 if (length_bytes == 0)
373 return false;
374 /* APU vbios image is part of sbios image */
375 if (adev->flags & AMD_IS_APU)
376 return false;
377
378 dw_ptr = (u32 *)bios;
379 length_dw = ALIGN(length_bytes, 4) / 4;
380
381 rom_index_offset =
382 adev->smuio.funcs->get_rom_index_offset(adev);
383 rom_data_offset =
384 adev->smuio.funcs->get_rom_data_offset(adev);
385
386 /* set rom index to 0 */
387 WREG32(rom_index_offset, 0);
388 /* read out the rom data */
389 for (i = 0; i < length_dw; i++)
390 dw_ptr[i] = RREG32(rom_data_offset);
391
392 return true;
393}
394
395static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
396 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
397 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
398 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
399 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
400 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
401 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
402 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
403 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
404 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
405 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
406 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
407 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
408 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
409 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
410 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
411 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
412 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
413 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
414 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
415};
416
417static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
418 u32 sh_num, u32 reg_offset)
419{
420 uint32_t val;
421
422 mutex_lock(&adev->grbm_idx_mutex);
423 if (se_num != 0xffffffff || sh_num != 0xffffffff)
424 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
425
426 val = RREG32(reg_offset);
427
428 if (se_num != 0xffffffff || sh_num != 0xffffffff)
429 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
430 mutex_unlock(&adev->grbm_idx_mutex);
431 return val;
432}
433
434static uint32_t nv_get_register_value(struct amdgpu_device *adev,
435 bool indexed, u32 se_num,
436 u32 sh_num, u32 reg_offset)
437{
438 if (indexed) {
439 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
440 } else {
441 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
442 return adev->gfx.config.gb_addr_config;
443 return RREG32(reg_offset);
444 }
445}
446
447static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
448 u32 sh_num, u32 reg_offset, u32 *value)
449{
450 uint32_t i;
451 struct soc15_allowed_register_entry *en;
452
453 *value = 0;
454 for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
455 en = &nv_allowed_read_registers[i];
456 if ((i == 7 && (adev->sdma.num_instances == 1)) || /* some asics don't have SDMA1 */
457 reg_offset !=
458 (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
459 continue;
460
461 *value = nv_get_register_value(adev,
462 nv_allowed_read_registers[i].grbm_indexed,
463 se_num, sh_num, reg_offset);
464 return 0;
465 }
466 return -EINVAL;
467}
468
469static int nv_asic_mode2_reset(struct amdgpu_device *adev)
470{
471 u32 i;
472 int ret = 0;
473
474 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
475
476 /* disable BM */
477 pci_clear_master(adev->pdev);
478
479 amdgpu_device_cache_pci_state(adev->pdev);
480
481 ret = amdgpu_dpm_mode2_reset(adev);
482 if (ret)
483 dev_err(adev->dev, "GPU mode2 reset failed\n");
484
485 amdgpu_device_load_pci_state(adev->pdev);
486
487 /* wait for asic to come out of reset */
488 for (i = 0; i < adev->usec_timeout; i++) {
489 u32 memsize = adev->nbio.funcs->get_memsize(adev);
490
491 if (memsize != 0xffffffff)
492 break;
493 udelay(1);
494 }
495
496 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
497
498 return ret;
499}
500
501static enum amd_reset_method
502nv_asic_reset_method(struct amdgpu_device *adev)
503{
504 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
505 amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
506 amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
507 amdgpu_reset_method == AMD_RESET_METHOD_PCI)
508 return amdgpu_reset_method;
509
510 if (amdgpu_reset_method != -1)
511 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
512 amdgpu_reset_method);
513
514 switch (adev->asic_type) {
515 case CHIP_VANGOGH:
516 case CHIP_YELLOW_CARP:
517 return AMD_RESET_METHOD_MODE2;
518 case CHIP_SIENNA_CICHLID:
519 case CHIP_NAVY_FLOUNDER:
520 case CHIP_DIMGREY_CAVEFISH:
521 case CHIP_BEIGE_GOBY:
522 return AMD_RESET_METHOD_MODE1;
523 default:
524 if (amdgpu_dpm_is_baco_supported(adev))
525 return AMD_RESET_METHOD_BACO;
526 else
527 return AMD_RESET_METHOD_MODE1;
528 }
529}
530
531static int nv_asic_reset(struct amdgpu_device *adev)
532{
533 int ret = 0;
534
535 switch (nv_asic_reset_method(adev)) {
536 case AMD_RESET_METHOD_PCI:
537 dev_info(adev->dev, "PCI reset\n");
538 ret = amdgpu_device_pci_reset(adev);
539 break;
540 case AMD_RESET_METHOD_BACO:
541 dev_info(adev->dev, "BACO reset\n");
542 ret = amdgpu_dpm_baco_reset(adev);
543 break;
544 case AMD_RESET_METHOD_MODE2:
545 dev_info(adev->dev, "MODE2 reset\n");
546 ret = nv_asic_mode2_reset(adev);
547 break;
548 default:
549 dev_info(adev->dev, "MODE1 reset\n");
550 ret = amdgpu_device_mode1_reset(adev);
551 break;
552 }
553
554 return ret;
555}
556
557static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
558{
559 /* todo */
560 return 0;
561}
562
563static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
564{
565 /* todo */
566 return 0;
567}
568
569static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
570{
571 if (pci_is_root_bus(adev->pdev->bus))
572 return;
573
574 if (amdgpu_pcie_gen2 == 0)
575 return;
576
577 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
578 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
579 return;
580
581 /* todo */
582}
583
584static void nv_program_aspm(struct amdgpu_device *adev)
585{
586 if (!amdgpu_aspm)
587 return;
588
589 if (!(adev->flags & AMD_IS_APU) &&
590 (adev->nbio.funcs->program_aspm))
591 adev->nbio.funcs->program_aspm(adev);
592
593}
594
595static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
596 bool enable)
597{
598 adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
599 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
600}
601
602static const struct amdgpu_ip_block_version nv_common_ip_block =
603{
604 .type = AMD_IP_BLOCK_TYPE_COMMON,
605 .major = 1,
606 .minor = 0,
607 .rev = 0,
608 .funcs = &nv_common_ip_funcs,
609};
610
611static bool nv_is_headless_sku(struct pci_dev *pdev)
612{
613 if ((pdev->device == 0x731E &&
614 (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
615 (pdev->device == 0x7340 && pdev->revision == 0xC9) ||
616 (pdev->device == 0x7360 && pdev->revision == 0xC7))
617 return true;
618 return false;
619}
620
621static int nv_reg_base_init(struct amdgpu_device *adev)
622{
623 int r;
624
625 if (amdgpu_discovery) {
626 r = amdgpu_discovery_reg_base_init(adev);
627 if (r) {
628 DRM_WARN("failed to init reg base from ip discovery table, "
629 "fallback to legacy init method\n");
630 goto legacy_init;
631 }
632
633 amdgpu_discovery_harvest_ip(adev);
634 if (nv_is_headless_sku(adev->pdev)) {
635 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
636 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
637 }
638
639 return 0;
640 }
641
642legacy_init:
643 switch (adev->asic_type) {
644 case CHIP_NAVI10:
645 navi10_reg_base_init(adev);
646 break;
647 case CHIP_NAVI14:
648 navi14_reg_base_init(adev);
649 break;
650 case CHIP_NAVI12:
651 navi12_reg_base_init(adev);
652 break;
653 case CHIP_SIENNA_CICHLID:
654 case CHIP_NAVY_FLOUNDER:
655 sienna_cichlid_reg_base_init(adev);
656 break;
657 case CHIP_VANGOGH:
658 vangogh_reg_base_init(adev);
659 break;
660 case CHIP_DIMGREY_CAVEFISH:
661 dimgrey_cavefish_reg_base_init(adev);
662 break;
663 case CHIP_BEIGE_GOBY:
664 beige_goby_reg_base_init(adev);
665 break;
666 case CHIP_YELLOW_CARP:
667 yellow_carp_reg_base_init(adev);
668 break;
669 default:
670 return -EINVAL;
671 }
672
673 return 0;
674}
675
676void nv_set_virt_ops(struct amdgpu_device *adev)
677{
678 adev->virt.ops = &xgpu_nv_virt_ops;
679}
680
681int nv_set_ip_blocks(struct amdgpu_device *adev)
682{
683 int r;
684
685 if (adev->flags & AMD_IS_APU) {
686 adev->nbio.funcs = &nbio_v7_2_funcs;
687 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
688 } else {
689 adev->nbio.funcs = &nbio_v2_3_funcs;
690 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
691 }
692 adev->hdp.funcs = &hdp_v5_0_funcs;
693
694 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
695 adev->smuio.funcs = &smuio_v11_0_6_funcs;
696 else
697 adev->smuio.funcs = &smuio_v11_0_funcs;
698
699 if (adev->asic_type == CHIP_SIENNA_CICHLID)
700 adev->gmc.xgmi.supported = true;
701
702 /* Set IP register base before any HW register access */
703 r = nv_reg_base_init(adev);
704 if (r)
705 return r;
706
707 switch (adev->asic_type) {
708 case CHIP_NAVI10:
709 case CHIP_NAVI14:
710 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
711 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
712 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
713 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
714 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
715 !amdgpu_sriov_vf(adev))
716 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
717 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
718 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
719#if defined(CONFIG_DRM_AMD_DC)
720 else if (amdgpu_device_has_dc_support(adev))
721 amdgpu_device_ip_block_add(adev, &dm_ip_block);
722#endif
723 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
724 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
725 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
726 !amdgpu_sriov_vf(adev))
727 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
728 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
729 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
730 if (adev->enable_mes)
731 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
732 break;
733 case CHIP_NAVI12:
734 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
735 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
736 if (!amdgpu_sriov_vf(adev)) {
737 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
738 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
739 } else {
740 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
741 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
742 }
743 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
744 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
745 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
746 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
747#if defined(CONFIG_DRM_AMD_DC)
748 else if (amdgpu_device_has_dc_support(adev))
749 amdgpu_device_ip_block_add(adev, &dm_ip_block);
750#endif
751 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
752 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
753 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
754 !amdgpu_sriov_vf(adev))
755 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
756 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
757 if (!amdgpu_sriov_vf(adev))
758 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
759 break;
760 case CHIP_SIENNA_CICHLID:
761 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
762 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
763 if (!amdgpu_sriov_vf(adev)) {
764 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
765 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
766 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
767 } else {
768 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
769 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
770 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
771 }
772 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
773 is_support_sw_smu(adev))
774 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
775 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
776 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
777#if defined(CONFIG_DRM_AMD_DC)
778 else if (amdgpu_device_has_dc_support(adev))
779 amdgpu_device_ip_block_add(adev, &dm_ip_block);
780#endif
781 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
782 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
783 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
784 if (!amdgpu_sriov_vf(adev))
785 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
786 if (adev->enable_mes)
787 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
788 break;
789 case CHIP_NAVY_FLOUNDER:
790 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
791 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
792 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
793 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
794 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
795 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
796 is_support_sw_smu(adev))
797 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
798 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
799 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
800#if defined(CONFIG_DRM_AMD_DC)
801 else if (amdgpu_device_has_dc_support(adev))
802 amdgpu_device_ip_block_add(adev, &dm_ip_block);
803#endif
804 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
805 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
806 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
807 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
808 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
809 is_support_sw_smu(adev))
810 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
811 break;
812 case CHIP_VANGOGH:
813 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
814 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
815 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
816 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
817 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
818 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
819 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
820 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
821#if defined(CONFIG_DRM_AMD_DC)
822 else if (amdgpu_device_has_dc_support(adev))
823 amdgpu_device_ip_block_add(adev, &dm_ip_block);
824#endif
825 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
826 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
827 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
828 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
829 break;
830 case CHIP_DIMGREY_CAVEFISH:
831 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
832 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
833 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
834 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
835 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
836 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
837 is_support_sw_smu(adev))
838 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
839 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
840 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
841#if defined(CONFIG_DRM_AMD_DC)
842 else if (amdgpu_device_has_dc_support(adev))
843 amdgpu_device_ip_block_add(adev, &dm_ip_block);
844#endif
845 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
846 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
847 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
848 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
849 break;
850 case CHIP_BEIGE_GOBY:
851 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
852 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
853 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
854 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
855 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
856 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
857 is_support_sw_smu(adev))
858 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
859 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
860 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
861 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
862 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
863#if defined(CONFIG_DRM_AMD_DC)
864 else if (amdgpu_device_has_dc_support(adev))
865 amdgpu_device_ip_block_add(adev, &dm_ip_block);
866#endif
867 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
868 is_support_sw_smu(adev))
869 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
870 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
871 break;
872 case CHIP_YELLOW_CARP:
873 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
874 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
875 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
876 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
877 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
878 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
879 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
880 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
881 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
882 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
883 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
884 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
885#if defined(CONFIG_DRM_AMD_DC)
886 else if (amdgpu_device_has_dc_support(adev))
887 amdgpu_device_ip_block_add(adev, &dm_ip_block);
888#endif
889 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
890 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
891 break;
892 default:
893 return -EINVAL;
894 }
895
896 return 0;
897}
898
899static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
900{
901 return adev->nbio.funcs->get_rev_id(adev);
902}
903
904static bool nv_need_full_reset(struct amdgpu_device *adev)
905{
906 return true;
907}
908
909static bool nv_need_reset_on_init(struct amdgpu_device *adev)
910{
911 u32 sol_reg;
912
913 if (adev->flags & AMD_IS_APU)
914 return false;
915
916 /* Check sOS sign of life register to confirm sys driver and sOS
917 * are already been loaded.
918 */
919 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
920 if (sol_reg)
921 return true;
922
923 return false;
924}
925
926static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
927{
928
929 /* TODO
930 * dummy implement for pcie_replay_count sysfs interface
931 * */
932
933 return 0;
934}
935
936static void nv_init_doorbell_index(struct amdgpu_device *adev)
937{
938 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
939 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
940 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
941 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
942 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
943 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
944 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
945 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
946 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
947 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
948 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
949 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
950 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
951 adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING;
952 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
953 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
954 adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
955 adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
956 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
957 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
958 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
959 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
960 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
961 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
962 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
963
964 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
965 adev->doorbell_index.sdma_doorbell_range = 20;
966}
967
968static void nv_pre_asic_init(struct amdgpu_device *adev)
969{
970}
971
972static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
973 bool enter)
974{
975 if (enter)
976 amdgpu_gfx_rlc_enter_safe_mode(adev);
977 else
978 amdgpu_gfx_rlc_exit_safe_mode(adev);
979
980 if (adev->gfx.funcs->update_perfmon_mgcg)
981 adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
982
983 if (!(adev->flags & AMD_IS_APU) &&
984 (adev->nbio.funcs->enable_aspm))
985 adev->nbio.funcs->enable_aspm(adev, !enter);
986
987 return 0;
988}
989
990static const struct amdgpu_asic_funcs nv_asic_funcs =
991{
992 .read_disabled_bios = &nv_read_disabled_bios,
993 .read_bios_from_rom = &nv_read_bios_from_rom,
994 .read_register = &nv_read_register,
995 .reset = &nv_asic_reset,
996 .reset_method = &nv_asic_reset_method,
997 .set_vga_state = &nv_vga_set_state,
998 .get_xclk = &nv_get_xclk,
999 .set_uvd_clocks = &nv_set_uvd_clocks,
1000 .set_vce_clocks = &nv_set_vce_clocks,
1001 .get_config_memsize = &nv_get_config_memsize,
1002 .init_doorbell_index = &nv_init_doorbell_index,
1003 .need_full_reset = &nv_need_full_reset,
1004 .need_reset_on_init = &nv_need_reset_on_init,
1005 .get_pcie_replay_count = &nv_get_pcie_replay_count,
1006 .supports_baco = &amdgpu_dpm_is_baco_supported,
1007 .pre_asic_init = &nv_pre_asic_init,
1008 .update_umd_stable_pstate = &nv_update_umd_stable_pstate,
1009 .query_video_codecs = &nv_query_video_codecs,
1010};
1011
1012static int nv_common_early_init(void *handle)
1013{
1014#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1015 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1016
1017 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
1018 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
1019 adev->smc_rreg = NULL;
1020 adev->smc_wreg = NULL;
1021 adev->pcie_rreg = &nv_pcie_rreg;
1022 adev->pcie_wreg = &nv_pcie_wreg;
1023 adev->pcie_rreg64 = &nv_pcie_rreg64;
1024 adev->pcie_wreg64 = &nv_pcie_wreg64;
1025 adev->pciep_rreg = &nv_pcie_port_rreg;
1026 adev->pciep_wreg = &nv_pcie_port_wreg;
1027
1028 /* TODO: will add them during VCN v2 implementation */
1029 adev->uvd_ctx_rreg = NULL;
1030 adev->uvd_ctx_wreg = NULL;
1031
1032 adev->didt_rreg = &nv_didt_rreg;
1033 adev->didt_wreg = &nv_didt_wreg;
1034
1035 adev->asic_funcs = &nv_asic_funcs;
1036
1037 adev->rev_id = nv_get_rev_id(adev);
1038 adev->external_rev_id = 0xff;
1039 switch (adev->asic_type) {
1040 case CHIP_NAVI10:
1041 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1042 AMD_CG_SUPPORT_GFX_CGCG |
1043 AMD_CG_SUPPORT_IH_CG |
1044 AMD_CG_SUPPORT_HDP_MGCG |
1045 AMD_CG_SUPPORT_HDP_LS |
1046 AMD_CG_SUPPORT_SDMA_MGCG |
1047 AMD_CG_SUPPORT_SDMA_LS |
1048 AMD_CG_SUPPORT_MC_MGCG |
1049 AMD_CG_SUPPORT_MC_LS |
1050 AMD_CG_SUPPORT_ATHUB_MGCG |
1051 AMD_CG_SUPPORT_ATHUB_LS |
1052 AMD_CG_SUPPORT_VCN_MGCG |
1053 AMD_CG_SUPPORT_JPEG_MGCG |
1054 AMD_CG_SUPPORT_BIF_MGCG |
1055 AMD_CG_SUPPORT_BIF_LS;
1056 adev->pg_flags = AMD_PG_SUPPORT_VCN |
1057 AMD_PG_SUPPORT_VCN_DPG |
1058 AMD_PG_SUPPORT_JPEG |
1059 AMD_PG_SUPPORT_ATHUB;
1060 adev->external_rev_id = adev->rev_id + 0x1;
1061 break;
1062 case CHIP_NAVI14:
1063 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1064 AMD_CG_SUPPORT_GFX_CGCG |
1065 AMD_CG_SUPPORT_IH_CG |
1066 AMD_CG_SUPPORT_HDP_MGCG |
1067 AMD_CG_SUPPORT_HDP_LS |
1068 AMD_CG_SUPPORT_SDMA_MGCG |
1069 AMD_CG_SUPPORT_SDMA_LS |
1070 AMD_CG_SUPPORT_MC_MGCG |
1071 AMD_CG_SUPPORT_MC_LS |
1072 AMD_CG_SUPPORT_ATHUB_MGCG |
1073 AMD_CG_SUPPORT_ATHUB_LS |
1074 AMD_CG_SUPPORT_VCN_MGCG |
1075 AMD_CG_SUPPORT_JPEG_MGCG |
1076 AMD_CG_SUPPORT_BIF_MGCG |
1077 AMD_CG_SUPPORT_BIF_LS;
1078 adev->pg_flags = AMD_PG_SUPPORT_VCN |
1079 AMD_PG_SUPPORT_JPEG |
1080 AMD_PG_SUPPORT_VCN_DPG;
1081 adev->external_rev_id = adev->rev_id + 20;
1082 break;
1083 case CHIP_NAVI12:
1084 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1085 AMD_CG_SUPPORT_GFX_MGLS |
1086 AMD_CG_SUPPORT_GFX_CGCG |
1087 AMD_CG_SUPPORT_GFX_CP_LS |
1088 AMD_CG_SUPPORT_GFX_RLC_LS |
1089 AMD_CG_SUPPORT_IH_CG |
1090 AMD_CG_SUPPORT_HDP_MGCG |
1091 AMD_CG_SUPPORT_HDP_LS |
1092 AMD_CG_SUPPORT_SDMA_MGCG |
1093 AMD_CG_SUPPORT_SDMA_LS |
1094 AMD_CG_SUPPORT_MC_MGCG |
1095 AMD_CG_SUPPORT_MC_LS |
1096 AMD_CG_SUPPORT_ATHUB_MGCG |
1097 AMD_CG_SUPPORT_ATHUB_LS |
1098 AMD_CG_SUPPORT_VCN_MGCG |
1099 AMD_CG_SUPPORT_JPEG_MGCG;
1100 adev->pg_flags = AMD_PG_SUPPORT_VCN |
1101 AMD_PG_SUPPORT_VCN_DPG |
1102 AMD_PG_SUPPORT_JPEG |
1103 AMD_PG_SUPPORT_ATHUB;
1104 /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
1105 * as a consequence, the rev_id and external_rev_id are wrong.
1106 * workaround it by hardcoding rev_id to 0 (default value).
1107 */
1108 if (amdgpu_sriov_vf(adev))
1109 adev->rev_id = 0;
1110 adev->external_rev_id = adev->rev_id + 0xa;
1111 break;
1112 case CHIP_SIENNA_CICHLID:
1113 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1114 AMD_CG_SUPPORT_GFX_CGCG |
1115 AMD_CG_SUPPORT_GFX_CGLS |
1116 AMD_CG_SUPPORT_GFX_3D_CGCG |
1117 AMD_CG_SUPPORT_MC_MGCG |
1118 AMD_CG_SUPPORT_VCN_MGCG |
1119 AMD_CG_SUPPORT_JPEG_MGCG |
1120 AMD_CG_SUPPORT_HDP_MGCG |
1121 AMD_CG_SUPPORT_HDP_LS |
1122 AMD_CG_SUPPORT_IH_CG |
1123 AMD_CG_SUPPORT_MC_LS;
1124 adev->pg_flags = AMD_PG_SUPPORT_VCN |
1125 AMD_PG_SUPPORT_VCN_DPG |
1126 AMD_PG_SUPPORT_JPEG |
1127 AMD_PG_SUPPORT_ATHUB |
1128 AMD_PG_SUPPORT_MMHUB;
1129 if (amdgpu_sriov_vf(adev)) {
1130 /* hypervisor control CG and PG enablement */
1131 adev->cg_flags = 0;
1132 adev->pg_flags = 0;
1133 }
1134 adev->external_rev_id = adev->rev_id + 0x28;
1135 break;
1136 case CHIP_NAVY_FLOUNDER:
1137 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1138 AMD_CG_SUPPORT_GFX_CGCG |
1139 AMD_CG_SUPPORT_GFX_CGLS |
1140 AMD_CG_SUPPORT_GFX_3D_CGCG |
1141 AMD_CG_SUPPORT_VCN_MGCG |
1142 AMD_CG_SUPPORT_JPEG_MGCG |
1143 AMD_CG_SUPPORT_MC_MGCG |
1144 AMD_CG_SUPPORT_MC_LS |
1145 AMD_CG_SUPPORT_HDP_MGCG |
1146 AMD_CG_SUPPORT_HDP_LS |
1147 AMD_CG_SUPPORT_IH_CG;
1148 adev->pg_flags = AMD_PG_SUPPORT_VCN |
1149 AMD_PG_SUPPORT_VCN_DPG |
1150 AMD_PG_SUPPORT_JPEG |
1151 AMD_PG_SUPPORT_ATHUB |
1152 AMD_PG_SUPPORT_MMHUB;
1153 adev->external_rev_id = adev->rev_id + 0x32;
1154 break;
1155
1156 case CHIP_VANGOGH:
1157 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1158 AMD_CG_SUPPORT_GFX_MGLS |
1159 AMD_CG_SUPPORT_GFX_CP_LS |
1160 AMD_CG_SUPPORT_GFX_RLC_LS |
1161 AMD_CG_SUPPORT_GFX_CGCG |
1162 AMD_CG_SUPPORT_GFX_CGLS |
1163 AMD_CG_SUPPORT_GFX_3D_CGCG |
1164 AMD_CG_SUPPORT_GFX_3D_CGLS |
1165 AMD_CG_SUPPORT_MC_MGCG |
1166 AMD_CG_SUPPORT_MC_LS |
1167 AMD_CG_SUPPORT_GFX_FGCG |
1168 AMD_CG_SUPPORT_VCN_MGCG |
1169 AMD_CG_SUPPORT_SDMA_MGCG |
1170 AMD_CG_SUPPORT_SDMA_LS |
1171 AMD_CG_SUPPORT_JPEG_MGCG;
1172 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1173 AMD_PG_SUPPORT_VCN |
1174 AMD_PG_SUPPORT_VCN_DPG |
1175 AMD_PG_SUPPORT_JPEG;
1176 if (adev->apu_flags & AMD_APU_IS_VANGOGH)
1177 adev->external_rev_id = adev->rev_id + 0x01;
1178 break;
1179 case CHIP_DIMGREY_CAVEFISH:
1180 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1181 AMD_CG_SUPPORT_GFX_CGCG |
1182 AMD_CG_SUPPORT_GFX_CGLS |
1183 AMD_CG_SUPPORT_GFX_3D_CGCG |
1184 AMD_CG_SUPPORT_VCN_MGCG |
1185 AMD_CG_SUPPORT_JPEG_MGCG |
1186 AMD_CG_SUPPORT_MC_MGCG |
1187 AMD_CG_SUPPORT_MC_LS |
1188 AMD_CG_SUPPORT_HDP_MGCG |
1189 AMD_CG_SUPPORT_HDP_LS |
1190 AMD_CG_SUPPORT_IH_CG;
1191 adev->pg_flags = AMD_PG_SUPPORT_VCN |
1192 AMD_PG_SUPPORT_VCN_DPG |
1193 AMD_PG_SUPPORT_JPEG |
1194 AMD_PG_SUPPORT_ATHUB |
1195 AMD_PG_SUPPORT_MMHUB;
1196 adev->external_rev_id = adev->rev_id + 0x3c;
1197 break;
1198 case CHIP_BEIGE_GOBY:
1199 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1200 AMD_CG_SUPPORT_GFX_CGCG |
1201 AMD_CG_SUPPORT_GFX_CGLS |
1202 AMD_CG_SUPPORT_GFX_3D_CGCG |
1203 AMD_CG_SUPPORT_MC_MGCG |
1204 AMD_CG_SUPPORT_MC_LS |
1205 AMD_CG_SUPPORT_HDP_MGCG |
1206 AMD_CG_SUPPORT_HDP_LS |
1207 AMD_CG_SUPPORT_IH_CG |
1208 AMD_CG_SUPPORT_VCN_MGCG;
1209 adev->pg_flags = AMD_PG_SUPPORT_VCN |
1210 AMD_PG_SUPPORT_VCN_DPG |
1211 AMD_PG_SUPPORT_ATHUB |
1212 AMD_PG_SUPPORT_MMHUB;
1213 adev->external_rev_id = adev->rev_id + 0x46;
1214 break;
1215 case CHIP_YELLOW_CARP:
1216 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1217 AMD_CG_SUPPORT_GFX_MGLS |
1218 AMD_CG_SUPPORT_GFX_CGCG |
1219 AMD_CG_SUPPORT_GFX_CGLS |
1220 AMD_CG_SUPPORT_GFX_3D_CGCG |
1221 AMD_CG_SUPPORT_GFX_3D_CGLS |
1222 AMD_CG_SUPPORT_GFX_RLC_LS |
1223 AMD_CG_SUPPORT_GFX_CP_LS |
1224 AMD_CG_SUPPORT_GFX_FGCG |
1225 AMD_CG_SUPPORT_MC_MGCG |
1226 AMD_CG_SUPPORT_MC_LS |
1227 AMD_CG_SUPPORT_SDMA_LS |
1228 AMD_CG_SUPPORT_HDP_MGCG |
1229 AMD_CG_SUPPORT_HDP_LS |
1230 AMD_CG_SUPPORT_ATHUB_MGCG |
1231 AMD_CG_SUPPORT_ATHUB_LS |
1232 AMD_CG_SUPPORT_IH_CG |
1233 AMD_CG_SUPPORT_VCN_MGCG |
1234 AMD_CG_SUPPORT_JPEG_MGCG;
1235 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1236 AMD_PG_SUPPORT_VCN |
1237 AMD_PG_SUPPORT_VCN_DPG |
1238 AMD_PG_SUPPORT_JPEG;
1239 if (adev->pdev->device == 0x1681)
1240 adev->external_rev_id = adev->rev_id + 0x19;
1241 else
1242 adev->external_rev_id = adev->rev_id + 0x01;
1243 break;
1244 default:
1245 /* FIXME: not supported yet */
1246 return -EINVAL;
1247 }
1248
1249 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1250 adev->pg_flags &= ~(AMD_PG_SUPPORT_VCN |
1251 AMD_PG_SUPPORT_VCN_DPG |
1252 AMD_PG_SUPPORT_JPEG);
1253
1254 if (amdgpu_sriov_vf(adev)) {
1255 amdgpu_virt_init_setting(adev);
1256 xgpu_nv_mailbox_set_irq_funcs(adev);
1257 }
1258
1259 return 0;
1260}
1261
1262static int nv_common_late_init(void *handle)
1263{
1264 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1265
1266 if (amdgpu_sriov_vf(adev)) {
1267 xgpu_nv_mailbox_get_irq(adev);
1268 amdgpu_virt_update_sriov_video_codec(adev,
1269 sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
1270 sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array));
1271 }
1272
1273 return 0;
1274}
1275
1276static int nv_common_sw_init(void *handle)
1277{
1278 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1279
1280 if (amdgpu_sriov_vf(adev))
1281 xgpu_nv_mailbox_add_irq_id(adev);
1282
1283 return 0;
1284}
1285
1286static int nv_common_sw_fini(void *handle)
1287{
1288 return 0;
1289}
1290
1291static int nv_common_hw_init(void *handle)
1292{
1293 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1294
1295 if (adev->nbio.funcs->apply_lc_spc_mode_wa)
1296 adev->nbio.funcs->apply_lc_spc_mode_wa(adev);
1297
1298 if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa)
1299 adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev);
1300
1301 /* enable pcie gen2/3 link */
1302 nv_pcie_gen3_enable(adev);
1303 /* enable aspm */
1304 nv_program_aspm(adev);
1305 /* setup nbio registers */
1306 adev->nbio.funcs->init_registers(adev);
1307 /* remap HDP registers to a hole in mmio space,
1308 * for the purpose of expose those registers
1309 * to process space
1310 */
1311 if (adev->nbio.funcs->remap_hdp_registers)
1312 adev->nbio.funcs->remap_hdp_registers(adev);
1313 /* enable the doorbell aperture */
1314 nv_enable_doorbell_aperture(adev, true);
1315
1316 return 0;
1317}
1318
1319static int nv_common_hw_fini(void *handle)
1320{
1321 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1322
1323 /* disable the doorbell aperture */
1324 nv_enable_doorbell_aperture(adev, false);
1325
1326 return 0;
1327}
1328
1329static int nv_common_suspend(void *handle)
1330{
1331 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1332
1333 return nv_common_hw_fini(adev);
1334}
1335
1336static int nv_common_resume(void *handle)
1337{
1338 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1339
1340 return nv_common_hw_init(adev);
1341}
1342
1343static bool nv_common_is_idle(void *handle)
1344{
1345 return true;
1346}
1347
1348static int nv_common_wait_for_idle(void *handle)
1349{
1350 return 0;
1351}
1352
1353static int nv_common_soft_reset(void *handle)
1354{
1355 return 0;
1356}
1357
1358static int nv_common_set_clockgating_state(void *handle,
1359 enum amd_clockgating_state state)
1360{
1361 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1362
1363 if (amdgpu_sriov_vf(adev))
1364 return 0;
1365
1366 switch (adev->asic_type) {
1367 case CHIP_NAVI10:
1368 case CHIP_NAVI14:
1369 case CHIP_NAVI12:
1370 case CHIP_SIENNA_CICHLID:
1371 case CHIP_NAVY_FLOUNDER:
1372 case CHIP_DIMGREY_CAVEFISH:
1373 case CHIP_BEIGE_GOBY:
1374 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1375 state == AMD_CG_STATE_GATE);
1376 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1377 state == AMD_CG_STATE_GATE);
1378 adev->hdp.funcs->update_clock_gating(adev,
1379 state == AMD_CG_STATE_GATE);
1380 adev->smuio.funcs->update_rom_clock_gating(adev,
1381 state == AMD_CG_STATE_GATE);
1382 break;
1383 default:
1384 break;
1385 }
1386 return 0;
1387}
1388
1389static int nv_common_set_powergating_state(void *handle,
1390 enum amd_powergating_state state)
1391{
1392 /* TODO */
1393 return 0;
1394}
1395
1396static void nv_common_get_clockgating_state(void *handle, u32 *flags)
1397{
1398 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1399
1400 if (amdgpu_sriov_vf(adev))
1401 *flags = 0;
1402
1403 adev->nbio.funcs->get_clockgating_state(adev, flags);
1404
1405 adev->hdp.funcs->get_clock_gating_state(adev, flags);
1406
1407 adev->smuio.funcs->get_clock_gating_state(adev, flags);
1408
1409 return;
1410}
1411
1412static const struct amd_ip_funcs nv_common_ip_funcs = {
1413 .name = "nv_common",
1414 .early_init = nv_common_early_init,
1415 .late_init = nv_common_late_init,
1416 .sw_init = nv_common_sw_init,
1417 .sw_fini = nv_common_sw_fini,
1418 .hw_init = nv_common_hw_init,
1419 .hw_fini = nv_common_hw_fini,
1420 .suspend = nv_common_suspend,
1421 .resume = nv_common_resume,
1422 .is_idle = nv_common_is_idle,
1423 .wait_for_idle = nv_common_wait_for_idle,
1424 .soft_reset = nv_common_soft_reset,
1425 .set_clockgating_state = nv_common_set_clockgating_state,
1426 .set_powergating_state = nv_common_set_powergating_state,
1427 .get_clockgating_state = nv_common_get_clockgating_state,
1428};
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include "amdgpu.h"
29#include "amdgpu_atombios.h"
30#include "amdgpu_ih.h"
31#include "amdgpu_uvd.h"
32#include "amdgpu_vce.h"
33#include "amdgpu_ucode.h"
34#include "amdgpu_psp.h"
35#include "amdgpu_smu.h"
36#include "atom.h"
37#include "amd_pcie.h"
38
39#include "gc/gc_10_1_0_offset.h"
40#include "gc/gc_10_1_0_sh_mask.h"
41#include "hdp/hdp_5_0_0_offset.h"
42#include "hdp/hdp_5_0_0_sh_mask.h"
43
44#include "soc15.h"
45#include "soc15_common.h"
46#include "gmc_v10_0.h"
47#include "gfxhub_v2_0.h"
48#include "mmhub_v2_0.h"
49#include "nv.h"
50#include "navi10_ih.h"
51#include "gfx_v10_0.h"
52#include "sdma_v5_0.h"
53#include "vcn_v2_0.h"
54#include "dce_virtual.h"
55#include "mes_v10_1.h"
56
57static const struct amd_ip_funcs nv_common_ip_funcs;
58
59/*
60 * Indirect registers accessor
61 */
62static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
63{
64 unsigned long flags, address, data;
65 u32 r;
66 address = adev->nbio_funcs->get_pcie_index_offset(adev);
67 data = adev->nbio_funcs->get_pcie_data_offset(adev);
68
69 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
70 WREG32(address, reg);
71 (void)RREG32(address);
72 r = RREG32(data);
73 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
74 return r;
75}
76
77static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
78{
79 unsigned long flags, address, data;
80
81 address = adev->nbio_funcs->get_pcie_index_offset(adev);
82 data = adev->nbio_funcs->get_pcie_data_offset(adev);
83
84 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
85 WREG32(address, reg);
86 (void)RREG32(address);
87 WREG32(data, v);
88 (void)RREG32(data);
89 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
90}
91
92static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
93{
94 unsigned long flags, address, data;
95 u32 r;
96
97 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
98 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
99
100 spin_lock_irqsave(&adev->didt_idx_lock, flags);
101 WREG32(address, (reg));
102 r = RREG32(data);
103 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
104 return r;
105}
106
107static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
108{
109 unsigned long flags, address, data;
110
111 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
112 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
113
114 spin_lock_irqsave(&adev->didt_idx_lock, flags);
115 WREG32(address, (reg));
116 WREG32(data, (v));
117 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
118}
119
120static u32 nv_get_config_memsize(struct amdgpu_device *adev)
121{
122 return adev->nbio_funcs->get_memsize(adev);
123}
124
125static u32 nv_get_xclk(struct amdgpu_device *adev)
126{
127 return adev->clock.spll.reference_freq;
128}
129
130
131void nv_grbm_select(struct amdgpu_device *adev,
132 u32 me, u32 pipe, u32 queue, u32 vmid)
133{
134 u32 grbm_gfx_cntl = 0;
135 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
136 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
137 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
138 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
139
140 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
141}
142
143static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
144{
145 /* todo */
146}
147
148static bool nv_read_disabled_bios(struct amdgpu_device *adev)
149{
150 /* todo */
151 return false;
152}
153
154static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
155 u8 *bios, u32 length_bytes)
156{
157 /* TODO: will implement it when SMU header is available */
158 return false;
159}
160
161static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
162 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
163 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
164 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
165 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
166 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
167 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
168#if 0 /* TODO: will set it when SDMA header is available */
169 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
170 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
171#endif
172 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
173 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
174 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
175 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
176 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
177 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
178 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
179 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
180 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
181 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
182};
183
184static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
185 u32 sh_num, u32 reg_offset)
186{
187 uint32_t val;
188
189 mutex_lock(&adev->grbm_idx_mutex);
190 if (se_num != 0xffffffff || sh_num != 0xffffffff)
191 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
192
193 val = RREG32(reg_offset);
194
195 if (se_num != 0xffffffff || sh_num != 0xffffffff)
196 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
197 mutex_unlock(&adev->grbm_idx_mutex);
198 return val;
199}
200
201static uint32_t nv_get_register_value(struct amdgpu_device *adev,
202 bool indexed, u32 se_num,
203 u32 sh_num, u32 reg_offset)
204{
205 if (indexed) {
206 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
207 } else {
208 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
209 return adev->gfx.config.gb_addr_config;
210 return RREG32(reg_offset);
211 }
212}
213
214static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
215 u32 sh_num, u32 reg_offset, u32 *value)
216{
217 uint32_t i;
218 struct soc15_allowed_register_entry *en;
219
220 *value = 0;
221 for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
222 en = &nv_allowed_read_registers[i];
223 if (reg_offset !=
224 (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
225 continue;
226
227 *value = nv_get_register_value(adev,
228 nv_allowed_read_registers[i].grbm_indexed,
229 se_num, sh_num, reg_offset);
230 return 0;
231 }
232 return -EINVAL;
233}
234
235#if 0
236static void nv_gpu_pci_config_reset(struct amdgpu_device *adev)
237{
238 u32 i;
239
240 dev_info(adev->dev, "GPU pci config reset\n");
241
242 /* disable BM */
243 pci_clear_master(adev->pdev);
244 /* reset */
245 amdgpu_pci_config_reset(adev);
246
247 udelay(100);
248
249 /* wait for asic to come out of reset */
250 for (i = 0; i < adev->usec_timeout; i++) {
251 u32 memsize = nbio_v2_3_get_memsize(adev);
252 if (memsize != 0xffffffff)
253 break;
254 udelay(1);
255 }
256
257}
258#endif
259
260static int nv_asic_mode1_reset(struct amdgpu_device *adev)
261{
262 u32 i;
263 int ret = 0;
264
265 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
266
267 dev_info(adev->dev, "GPU mode1 reset\n");
268
269 /* disable BM */
270 pci_clear_master(adev->pdev);
271
272 pci_save_state(adev->pdev);
273
274 ret = psp_gpu_reset(adev);
275 if (ret)
276 dev_err(adev->dev, "GPU mode1 reset failed\n");
277
278 pci_restore_state(adev->pdev);
279
280 /* wait for asic to come out of reset */
281 for (i = 0; i < adev->usec_timeout; i++) {
282 u32 memsize = adev->nbio_funcs->get_memsize(adev);
283
284 if (memsize != 0xffffffff)
285 break;
286 udelay(1);
287 }
288
289 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
290
291 return ret;
292}
293
294static enum amd_reset_method
295nv_asic_reset_method(struct amdgpu_device *adev)
296{
297 struct smu_context *smu = &adev->smu;
298
299 if (smu_baco_is_support(smu))
300 return AMD_RESET_METHOD_BACO;
301 else
302 return AMD_RESET_METHOD_MODE1;
303}
304
305static int nv_asic_reset(struct amdgpu_device *adev)
306{
307
308 /* FIXME: it doesn't work since vega10 */
309#if 0
310 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
311
312 nv_gpu_pci_config_reset(adev);
313
314 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
315#endif
316 int ret = 0;
317 struct smu_context *smu = &adev->smu;
318
319 if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
320 if (!adev->in_suspend)
321 amdgpu_inc_vram_lost(adev);
322 ret = smu_baco_reset(smu);
323 } else {
324 if (!adev->in_suspend)
325 amdgpu_inc_vram_lost(adev);
326 ret = nv_asic_mode1_reset(adev);
327 }
328
329 return ret;
330}
331
332static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
333{
334 /* todo */
335 return 0;
336}
337
338static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
339{
340 /* todo */
341 return 0;
342}
343
344static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
345{
346 if (pci_is_root_bus(adev->pdev->bus))
347 return;
348
349 if (amdgpu_pcie_gen2 == 0)
350 return;
351
352 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
353 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
354 return;
355
356 /* todo */
357}
358
359static void nv_program_aspm(struct amdgpu_device *adev)
360{
361
362 if (amdgpu_aspm == 0)
363 return;
364
365 /* todo */
366}
367
368static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
369 bool enable)
370{
371 adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
372 adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
373}
374
375static const struct amdgpu_ip_block_version nv_common_ip_block =
376{
377 .type = AMD_IP_BLOCK_TYPE_COMMON,
378 .major = 1,
379 .minor = 0,
380 .rev = 0,
381 .funcs = &nv_common_ip_funcs,
382};
383
384static int nv_reg_base_init(struct amdgpu_device *adev)
385{
386 int r;
387
388 if (amdgpu_discovery) {
389 r = amdgpu_discovery_reg_base_init(adev);
390 if (r) {
391 DRM_WARN("failed to init reg base from ip discovery table, "
392 "fallback to legacy init method\n");
393 goto legacy_init;
394 }
395
396 return 0;
397 }
398
399legacy_init:
400 switch (adev->asic_type) {
401 case CHIP_NAVI10:
402 navi10_reg_base_init(adev);
403 break;
404 case CHIP_NAVI14:
405 navi14_reg_base_init(adev);
406 break;
407 case CHIP_NAVI12:
408 navi12_reg_base_init(adev);
409 break;
410 default:
411 return -EINVAL;
412 }
413
414 return 0;
415}
416
417int nv_set_ip_blocks(struct amdgpu_device *adev)
418{
419 int r;
420
421 /* Set IP register base before any HW register access */
422 r = nv_reg_base_init(adev);
423 if (r)
424 return r;
425
426 adev->nbio_funcs = &nbio_v2_3_funcs;
427
428 adev->nbio_funcs->detect_hw_virt(adev);
429
430 switch (adev->asic_type) {
431 case CHIP_NAVI10:
432 case CHIP_NAVI14:
433 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
434 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
435 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
436 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
437 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
438 is_support_sw_smu(adev))
439 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
440 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
441 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
442#if defined(CONFIG_DRM_AMD_DC)
443 else if (amdgpu_device_has_dc_support(adev))
444 amdgpu_device_ip_block_add(adev, &dm_ip_block);
445#endif
446 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
447 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
448 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
449 is_support_sw_smu(adev))
450 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
451 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
452 if (adev->enable_mes)
453 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
454 break;
455 case CHIP_NAVI12:
456 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
457 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
458 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
459 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
460 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
461 is_support_sw_smu(adev))
462 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
463 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
464 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
465#if defined(CONFIG_DRM_AMD_DC)
466 else if (amdgpu_device_has_dc_support(adev))
467 amdgpu_device_ip_block_add(adev, &dm_ip_block);
468#endif
469 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
470 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
471 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
472 is_support_sw_smu(adev))
473 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
474 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
475 break;
476 default:
477 return -EINVAL;
478 }
479
480 return 0;
481}
482
483static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
484{
485 return adev->nbio_funcs->get_rev_id(adev);
486}
487
488static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
489{
490 adev->nbio_funcs->hdp_flush(adev, ring);
491}
492
493static void nv_invalidate_hdp(struct amdgpu_device *adev,
494 struct amdgpu_ring *ring)
495{
496 if (!ring || !ring->funcs->emit_wreg) {
497 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
498 } else {
499 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
500 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
501 }
502}
503
504static bool nv_need_full_reset(struct amdgpu_device *adev)
505{
506 return true;
507}
508
509static void nv_get_pcie_usage(struct amdgpu_device *adev,
510 uint64_t *count0,
511 uint64_t *count1)
512{
513 /*TODO*/
514}
515
516static bool nv_need_reset_on_init(struct amdgpu_device *adev)
517{
518#if 0
519 u32 sol_reg;
520
521 if (adev->flags & AMD_IS_APU)
522 return false;
523
524 /* Check sOS sign of life register to confirm sys driver and sOS
525 * are already been loaded.
526 */
527 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
528 if (sol_reg)
529 return true;
530#endif
531 /* TODO: re-enable it when mode1 reset is functional */
532 return false;
533}
534
535static void nv_init_doorbell_index(struct amdgpu_device *adev)
536{
537 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
538 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
539 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
540 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
541 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
542 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
543 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
544 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
545 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
546 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
547 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
548 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
549 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
550 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
551 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
552 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
553 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
554 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
555 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
556 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
557 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
558 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
559
560 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
561 adev->doorbell_index.sdma_doorbell_range = 20;
562}
563
564static const struct amdgpu_asic_funcs nv_asic_funcs =
565{
566 .read_disabled_bios = &nv_read_disabled_bios,
567 .read_bios_from_rom = &nv_read_bios_from_rom,
568 .read_register = &nv_read_register,
569 .reset = &nv_asic_reset,
570 .reset_method = &nv_asic_reset_method,
571 .set_vga_state = &nv_vga_set_state,
572 .get_xclk = &nv_get_xclk,
573 .set_uvd_clocks = &nv_set_uvd_clocks,
574 .set_vce_clocks = &nv_set_vce_clocks,
575 .get_config_memsize = &nv_get_config_memsize,
576 .flush_hdp = &nv_flush_hdp,
577 .invalidate_hdp = &nv_invalidate_hdp,
578 .init_doorbell_index = &nv_init_doorbell_index,
579 .need_full_reset = &nv_need_full_reset,
580 .get_pcie_usage = &nv_get_pcie_usage,
581 .need_reset_on_init = &nv_need_reset_on_init,
582};
583
584static int nv_common_early_init(void *handle)
585{
586 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
587
588 adev->smc_rreg = NULL;
589 adev->smc_wreg = NULL;
590 adev->pcie_rreg = &nv_pcie_rreg;
591 adev->pcie_wreg = &nv_pcie_wreg;
592
593 /* TODO: will add them during VCN v2 implementation */
594 adev->uvd_ctx_rreg = NULL;
595 adev->uvd_ctx_wreg = NULL;
596
597 adev->didt_rreg = &nv_didt_rreg;
598 adev->didt_wreg = &nv_didt_wreg;
599
600 adev->asic_funcs = &nv_asic_funcs;
601
602 adev->rev_id = nv_get_rev_id(adev);
603 adev->external_rev_id = 0xff;
604 switch (adev->asic_type) {
605 case CHIP_NAVI10:
606 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
607 AMD_CG_SUPPORT_GFX_CGCG |
608 AMD_CG_SUPPORT_IH_CG |
609 AMD_CG_SUPPORT_HDP_MGCG |
610 AMD_CG_SUPPORT_HDP_LS |
611 AMD_CG_SUPPORT_SDMA_MGCG |
612 AMD_CG_SUPPORT_SDMA_LS |
613 AMD_CG_SUPPORT_MC_MGCG |
614 AMD_CG_SUPPORT_MC_LS |
615 AMD_CG_SUPPORT_ATHUB_MGCG |
616 AMD_CG_SUPPORT_ATHUB_LS |
617 AMD_CG_SUPPORT_VCN_MGCG |
618 AMD_CG_SUPPORT_BIF_MGCG |
619 AMD_CG_SUPPORT_BIF_LS;
620 adev->pg_flags = AMD_PG_SUPPORT_VCN |
621 AMD_PG_SUPPORT_VCN_DPG |
622 AMD_PG_SUPPORT_ATHUB;
623 adev->external_rev_id = adev->rev_id + 0x1;
624 break;
625 case CHIP_NAVI14:
626 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
627 AMD_CG_SUPPORT_GFX_CGCG |
628 AMD_CG_SUPPORT_IH_CG |
629 AMD_CG_SUPPORT_HDP_MGCG |
630 AMD_CG_SUPPORT_HDP_LS |
631 AMD_CG_SUPPORT_SDMA_MGCG |
632 AMD_CG_SUPPORT_SDMA_LS |
633 AMD_CG_SUPPORT_MC_MGCG |
634 AMD_CG_SUPPORT_MC_LS |
635 AMD_CG_SUPPORT_ATHUB_MGCG |
636 AMD_CG_SUPPORT_ATHUB_LS |
637 AMD_CG_SUPPORT_VCN_MGCG |
638 AMD_CG_SUPPORT_BIF_MGCG |
639 AMD_CG_SUPPORT_BIF_LS;
640 adev->pg_flags = AMD_PG_SUPPORT_VCN |
641 AMD_PG_SUPPORT_VCN_DPG;
642 adev->external_rev_id = adev->rev_id + 20;
643 break;
644 case CHIP_NAVI12:
645 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
646 AMD_CG_SUPPORT_GFX_MGLS |
647 AMD_CG_SUPPORT_GFX_CGCG |
648 AMD_CG_SUPPORT_GFX_CP_LS |
649 AMD_CG_SUPPORT_GFX_RLC_LS |
650 AMD_CG_SUPPORT_IH_CG |
651 AMD_CG_SUPPORT_HDP_MGCG |
652 AMD_CG_SUPPORT_HDP_LS |
653 AMD_CG_SUPPORT_SDMA_MGCG |
654 AMD_CG_SUPPORT_SDMA_LS |
655 AMD_CG_SUPPORT_MC_MGCG |
656 AMD_CG_SUPPORT_MC_LS |
657 AMD_CG_SUPPORT_ATHUB_MGCG |
658 AMD_CG_SUPPORT_ATHUB_LS |
659 AMD_CG_SUPPORT_VCN_MGCG;
660 adev->pg_flags = AMD_PG_SUPPORT_VCN |
661 AMD_PG_SUPPORT_VCN_DPG |
662 AMD_PG_SUPPORT_ATHUB;
663 adev->external_rev_id = adev->rev_id + 0xa;
664 break;
665 default:
666 /* FIXME: not supported yet */
667 return -EINVAL;
668 }
669
670 return 0;
671}
672
673static int nv_common_late_init(void *handle)
674{
675 return 0;
676}
677
678static int nv_common_sw_init(void *handle)
679{
680 return 0;
681}
682
683static int nv_common_sw_fini(void *handle)
684{
685 return 0;
686}
687
688static int nv_common_hw_init(void *handle)
689{
690 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
691
692 /* enable pcie gen2/3 link */
693 nv_pcie_gen3_enable(adev);
694 /* enable aspm */
695 nv_program_aspm(adev);
696 /* setup nbio registers */
697 adev->nbio_funcs->init_registers(adev);
698 /* enable the doorbell aperture */
699 nv_enable_doorbell_aperture(adev, true);
700
701 return 0;
702}
703
704static int nv_common_hw_fini(void *handle)
705{
706 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
707
708 /* disable the doorbell aperture */
709 nv_enable_doorbell_aperture(adev, false);
710
711 return 0;
712}
713
714static int nv_common_suspend(void *handle)
715{
716 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
717
718 return nv_common_hw_fini(adev);
719}
720
721static int nv_common_resume(void *handle)
722{
723 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
724
725 return nv_common_hw_init(adev);
726}
727
728static bool nv_common_is_idle(void *handle)
729{
730 return true;
731}
732
733static int nv_common_wait_for_idle(void *handle)
734{
735 return 0;
736}
737
738static int nv_common_soft_reset(void *handle)
739{
740 return 0;
741}
742
743static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
744 bool enable)
745{
746 uint32_t hdp_clk_cntl, hdp_clk_cntl1;
747 uint32_t hdp_mem_pwr_cntl;
748
749 if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
750 AMD_CG_SUPPORT_HDP_DS |
751 AMD_CG_SUPPORT_HDP_SD)))
752 return;
753
754 hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
755 hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
756
757 /* Before doing clock/power mode switch,
758 * forced on IPH & RC clock */
759 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
760 IPH_MEM_CLK_SOFT_OVERRIDE, 1);
761 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
762 RC_MEM_CLK_SOFT_OVERRIDE, 1);
763 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
764
765 /* HDP 5.0 doesn't support dynamic power mode switch,
766 * disable clock and power gating before any changing */
767 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
768 IPH_MEM_POWER_CTRL_EN, 0);
769 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
770 IPH_MEM_POWER_LS_EN, 0);
771 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
772 IPH_MEM_POWER_DS_EN, 0);
773 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
774 IPH_MEM_POWER_SD_EN, 0);
775 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
776 RC_MEM_POWER_CTRL_EN, 0);
777 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
778 RC_MEM_POWER_LS_EN, 0);
779 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
780 RC_MEM_POWER_DS_EN, 0);
781 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
782 RC_MEM_POWER_SD_EN, 0);
783 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
784
785 /* only one clock gating mode (LS/DS/SD) can be enabled */
786 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
787 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
788 HDP_MEM_POWER_CTRL,
789 IPH_MEM_POWER_LS_EN, enable);
790 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
791 HDP_MEM_POWER_CTRL,
792 RC_MEM_POWER_LS_EN, enable);
793 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
794 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
795 HDP_MEM_POWER_CTRL,
796 IPH_MEM_POWER_DS_EN, enable);
797 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
798 HDP_MEM_POWER_CTRL,
799 RC_MEM_POWER_DS_EN, enable);
800 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
801 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
802 HDP_MEM_POWER_CTRL,
803 IPH_MEM_POWER_SD_EN, enable);
804 /* RC should not use shut down mode, fallback to ds */
805 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
806 HDP_MEM_POWER_CTRL,
807 RC_MEM_POWER_DS_EN, enable);
808 }
809
810 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
811
812 /* restore IPH & RC clock override after clock/power mode changing */
813 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
814}
815
816static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
817 bool enable)
818{
819 uint32_t hdp_clk_cntl;
820
821 if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
822 return;
823
824 hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
825
826 if (enable) {
827 hdp_clk_cntl &=
828 ~(uint32_t)
829 (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
830 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
831 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
832 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
833 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
834 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
835 } else {
836 hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
837 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
838 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
839 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
840 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
841 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
842 }
843
844 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
845}
846
847static int nv_common_set_clockgating_state(void *handle,
848 enum amd_clockgating_state state)
849{
850 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
851
852 if (amdgpu_sriov_vf(adev))
853 return 0;
854
855 switch (adev->asic_type) {
856 case CHIP_NAVI10:
857 case CHIP_NAVI14:
858 case CHIP_NAVI12:
859 adev->nbio_funcs->update_medium_grain_clock_gating(adev,
860 state == AMD_CG_STATE_GATE ? true : false);
861 adev->nbio_funcs->update_medium_grain_light_sleep(adev,
862 state == AMD_CG_STATE_GATE ? true : false);
863 nv_update_hdp_mem_power_gating(adev,
864 state == AMD_CG_STATE_GATE ? true : false);
865 nv_update_hdp_clock_gating(adev,
866 state == AMD_CG_STATE_GATE ? true : false);
867 break;
868 default:
869 break;
870 }
871 return 0;
872}
873
874static int nv_common_set_powergating_state(void *handle,
875 enum amd_powergating_state state)
876{
877 /* TODO */
878 return 0;
879}
880
881static void nv_common_get_clockgating_state(void *handle, u32 *flags)
882{
883 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
884 uint32_t tmp;
885
886 if (amdgpu_sriov_vf(adev))
887 *flags = 0;
888
889 adev->nbio_funcs->get_clockgating_state(adev, flags);
890
891 /* AMD_CG_SUPPORT_HDP_MGCG */
892 tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
893 if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
894 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
895 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
896 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
897 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
898 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
899 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
900
901 /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
902 tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
903 if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
904 *flags |= AMD_CG_SUPPORT_HDP_LS;
905 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
906 *flags |= AMD_CG_SUPPORT_HDP_DS;
907 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
908 *flags |= AMD_CG_SUPPORT_HDP_SD;
909
910 return;
911}
912
913static const struct amd_ip_funcs nv_common_ip_funcs = {
914 .name = "nv_common",
915 .early_init = nv_common_early_init,
916 .late_init = nv_common_late_init,
917 .sw_init = nv_common_sw_init,
918 .sw_fini = nv_common_sw_fini,
919 .hw_init = nv_common_hw_init,
920 .hw_fini = nv_common_hw_fini,
921 .suspend = nv_common_suspend,
922 .resume = nv_common_resume,
923 .is_idle = nv_common_is_idle,
924 .wait_for_idle = nv_common_wait_for_idle,
925 .soft_reset = nv_common_soft_reset,
926 .set_clockgating_state = nv_common_set_clockgating_state,
927 .set_powergating_state = nv_common_set_powergating_state,
928 .get_clockgating_state = nv_common_get_clockgating_state,
929};