Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include "amdgpu.h"
29#include "amdgpu_atombios.h"
30#include "amdgpu_ih.h"
31#include "amdgpu_uvd.h"
32#include "amdgpu_vce.h"
33#include "amdgpu_ucode.h"
34#include "amdgpu_psp.h"
35#include "amdgpu_smu.h"
36#include "atom.h"
37#include "amd_pcie.h"
38
39#include "gc/gc_10_1_0_offset.h"
40#include "gc/gc_10_1_0_sh_mask.h"
41#include "hdp/hdp_5_0_0_offset.h"
42#include "hdp/hdp_5_0_0_sh_mask.h"
43#include "smuio/smuio_11_0_0_offset.h"
44#include "mp/mp_11_0_offset.h"
45
46#include "soc15.h"
47#include "soc15_common.h"
48#include "gmc_v10_0.h"
49#include "gfxhub_v2_0.h"
50#include "mmhub_v2_0.h"
51#include "nbio_v2_3.h"
52#include "nv.h"
53#include "navi10_ih.h"
54#include "gfx_v10_0.h"
55#include "sdma_v5_0.h"
56#include "sdma_v5_2.h"
57#include "vcn_v2_0.h"
58#include "jpeg_v2_0.h"
59#include "vcn_v3_0.h"
60#include "jpeg_v3_0.h"
61#include "dce_virtual.h"
62#include "mes_v10_1.h"
63#include "mxgpu_nv.h"
64
65static const struct amd_ip_funcs nv_common_ip_funcs;
66
67/*
68 * Indirect registers accessor
69 */
70static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
71{
72 unsigned long flags, address, data;
73 u32 r;
74 address = adev->nbio.funcs->get_pcie_index_offset(adev);
75 data = adev->nbio.funcs->get_pcie_data_offset(adev);
76
77 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
78 WREG32(address, reg);
79 (void)RREG32(address);
80 r = RREG32(data);
81 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
82 return r;
83}
84
85static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
86{
87 unsigned long flags, address, data;
88
89 address = adev->nbio.funcs->get_pcie_index_offset(adev);
90 data = adev->nbio.funcs->get_pcie_data_offset(adev);
91
92 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
93 WREG32(address, reg);
94 (void)RREG32(address);
95 WREG32(data, v);
96 (void)RREG32(data);
97 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
98}
99
100static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
101{
102 unsigned long flags, address, data;
103 u64 r;
104 address = adev->nbio.funcs->get_pcie_index_offset(adev);
105 data = adev->nbio.funcs->get_pcie_data_offset(adev);
106
107 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
108 /* read low 32 bit */
109 WREG32(address, reg);
110 (void)RREG32(address);
111 r = RREG32(data);
112
113 /* read high 32 bit*/
114 WREG32(address, reg + 4);
115 (void)RREG32(address);
116 r |= ((u64)RREG32(data) << 32);
117 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
118 return r;
119}
120
121static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
122{
123 unsigned long flags, address, data;
124
125 address = adev->nbio.funcs->get_pcie_index_offset(adev);
126 data = adev->nbio.funcs->get_pcie_data_offset(adev);
127
128 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
129 /* write low 32 bit */
130 WREG32(address, reg);
131 (void)RREG32(address);
132 WREG32(data, (u32)(v & 0xffffffffULL));
133 (void)RREG32(data);
134
135 /* write high 32 bit */
136 WREG32(address, reg + 4);
137 (void)RREG32(address);
138 WREG32(data, (u32)(v >> 32));
139 (void)RREG32(data);
140 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
141}
142
143static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
144{
145 unsigned long flags, address, data;
146 u32 r;
147
148 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
149 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
150
151 spin_lock_irqsave(&adev->didt_idx_lock, flags);
152 WREG32(address, (reg));
153 r = RREG32(data);
154 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
155 return r;
156}
157
158static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
159{
160 unsigned long flags, address, data;
161
162 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
163 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
164
165 spin_lock_irqsave(&adev->didt_idx_lock, flags);
166 WREG32(address, (reg));
167 WREG32(data, (v));
168 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
169}
170
171static u32 nv_get_config_memsize(struct amdgpu_device *adev)
172{
173 return adev->nbio.funcs->get_memsize(adev);
174}
175
176static u32 nv_get_xclk(struct amdgpu_device *adev)
177{
178 return adev->clock.spll.reference_freq;
179}
180
181
182void nv_grbm_select(struct amdgpu_device *adev,
183 u32 me, u32 pipe, u32 queue, u32 vmid)
184{
185 u32 grbm_gfx_cntl = 0;
186 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
187 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
188 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
189 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
190
191 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
192}
193
194static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
195{
196 /* todo */
197}
198
199static bool nv_read_disabled_bios(struct amdgpu_device *adev)
200{
201 /* todo */
202 return false;
203}
204
205static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
206 u8 *bios, u32 length_bytes)
207{
208 u32 *dw_ptr;
209 u32 i, length_dw;
210
211 if (bios == NULL)
212 return false;
213 if (length_bytes == 0)
214 return false;
215 /* APU vbios image is part of sbios image */
216 if (adev->flags & AMD_IS_APU)
217 return false;
218
219 dw_ptr = (u32 *)bios;
220 length_dw = ALIGN(length_bytes, 4) / 4;
221
222 /* set rom index to 0 */
223 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
224 /* read out the rom data */
225 for (i = 0; i < length_dw; i++)
226 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
227
228 return true;
229}
230
231static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
232 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
233 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
234 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
235 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
236 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
237 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
238 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
239 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
240 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
241 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
242 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
243 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
244 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
245 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
246 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
247 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
248 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
249 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
250 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
251};
252
253static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
254 u32 sh_num, u32 reg_offset)
255{
256 uint32_t val;
257
258 mutex_lock(&adev->grbm_idx_mutex);
259 if (se_num != 0xffffffff || sh_num != 0xffffffff)
260 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
261
262 val = RREG32(reg_offset);
263
264 if (se_num != 0xffffffff || sh_num != 0xffffffff)
265 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
266 mutex_unlock(&adev->grbm_idx_mutex);
267 return val;
268}
269
270static uint32_t nv_get_register_value(struct amdgpu_device *adev,
271 bool indexed, u32 se_num,
272 u32 sh_num, u32 reg_offset)
273{
274 if (indexed) {
275 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
276 } else {
277 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
278 return adev->gfx.config.gb_addr_config;
279 return RREG32(reg_offset);
280 }
281}
282
283static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
284 u32 sh_num, u32 reg_offset, u32 *value)
285{
286 uint32_t i;
287 struct soc15_allowed_register_entry *en;
288
289 *value = 0;
290 for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
291 en = &nv_allowed_read_registers[i];
292 if (reg_offset !=
293 (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
294 continue;
295
296 *value = nv_get_register_value(adev,
297 nv_allowed_read_registers[i].grbm_indexed,
298 se_num, sh_num, reg_offset);
299 return 0;
300 }
301 return -EINVAL;
302}
303
304static int nv_asic_mode1_reset(struct amdgpu_device *adev)
305{
306 u32 i;
307 int ret = 0;
308
309 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
310
311 /* disable BM */
312 pci_clear_master(adev->pdev);
313
314 pci_save_state(adev->pdev);
315
316 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
317 dev_info(adev->dev, "GPU smu mode1 reset\n");
318 ret = amdgpu_dpm_mode1_reset(adev);
319 } else {
320 dev_info(adev->dev, "GPU psp mode1 reset\n");
321 ret = psp_gpu_reset(adev);
322 }
323
324 if (ret)
325 dev_err(adev->dev, "GPU mode1 reset failed\n");
326 pci_restore_state(adev->pdev);
327
328 /* wait for asic to come out of reset */
329 for (i = 0; i < adev->usec_timeout; i++) {
330 u32 memsize = adev->nbio.funcs->get_memsize(adev);
331
332 if (memsize != 0xffffffff)
333 break;
334 udelay(1);
335 }
336
337 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
338
339 return ret;
340}
341
342static bool nv_asic_supports_baco(struct amdgpu_device *adev)
343{
344 struct smu_context *smu = &adev->smu;
345
346 if (smu_baco_is_support(smu))
347 return true;
348 else
349 return false;
350}
351
352static enum amd_reset_method
353nv_asic_reset_method(struct amdgpu_device *adev)
354{
355 struct smu_context *smu = &adev->smu;
356
357 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
358 amdgpu_reset_method == AMD_RESET_METHOD_BACO)
359 return amdgpu_reset_method;
360
361 if (amdgpu_reset_method != -1)
362 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
363 amdgpu_reset_method);
364
365 switch (adev->asic_type) {
366 case CHIP_SIENNA_CICHLID:
367 case CHIP_NAVY_FLOUNDER:
368 return AMD_RESET_METHOD_MODE1;
369 default:
370 if (smu_baco_is_support(smu))
371 return AMD_RESET_METHOD_BACO;
372 else
373 return AMD_RESET_METHOD_MODE1;
374 }
375}
376
377static int nv_asic_reset(struct amdgpu_device *adev)
378{
379 int ret = 0;
380 struct smu_context *smu = &adev->smu;
381
382 if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
383 dev_info(adev->dev, "GPU BACO reset\n");
384
385 ret = smu_baco_enter(smu);
386 if (ret)
387 return ret;
388 ret = smu_baco_exit(smu);
389 if (ret)
390 return ret;
391 } else
392 ret = nv_asic_mode1_reset(adev);
393
394 return ret;
395}
396
397static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
398{
399 /* todo */
400 return 0;
401}
402
403static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
404{
405 /* todo */
406 return 0;
407}
408
409static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
410{
411 if (pci_is_root_bus(adev->pdev->bus))
412 return;
413
414 if (amdgpu_pcie_gen2 == 0)
415 return;
416
417 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
418 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
419 return;
420
421 /* todo */
422}
423
424static void nv_program_aspm(struct amdgpu_device *adev)
425{
426
427 if (amdgpu_aspm == 0)
428 return;
429
430 /* todo */
431}
432
433static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
434 bool enable)
435{
436 adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
437 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
438}
439
440static const struct amdgpu_ip_block_version nv_common_ip_block =
441{
442 .type = AMD_IP_BLOCK_TYPE_COMMON,
443 .major = 1,
444 .minor = 0,
445 .rev = 0,
446 .funcs = &nv_common_ip_funcs,
447};
448
449static int nv_reg_base_init(struct amdgpu_device *adev)
450{
451 int r;
452
453 if (amdgpu_discovery) {
454 r = amdgpu_discovery_reg_base_init(adev);
455 if (r) {
456 DRM_WARN("failed to init reg base from ip discovery table, "
457 "fallback to legacy init method\n");
458 goto legacy_init;
459 }
460
461 return 0;
462 }
463
464legacy_init:
465 switch (adev->asic_type) {
466 case CHIP_NAVI10:
467 navi10_reg_base_init(adev);
468 break;
469 case CHIP_NAVI14:
470 navi14_reg_base_init(adev);
471 break;
472 case CHIP_NAVI12:
473 navi12_reg_base_init(adev);
474 break;
475 case CHIP_SIENNA_CICHLID:
476 case CHIP_NAVY_FLOUNDER:
477 sienna_cichlid_reg_base_init(adev);
478 break;
479 default:
480 return -EINVAL;
481 }
482
483 return 0;
484}
485
486void nv_set_virt_ops(struct amdgpu_device *adev)
487{
488 adev->virt.ops = &xgpu_nv_virt_ops;
489}
490
491int nv_set_ip_blocks(struct amdgpu_device *adev)
492{
493 int r;
494
495 adev->nbio.funcs = &nbio_v2_3_funcs;
496 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
497
498 if (adev->asic_type == CHIP_SIENNA_CICHLID)
499 adev->gmc.xgmi.supported = true;
500
501 /* Set IP register base before any HW register access */
502 r = nv_reg_base_init(adev);
503 if (r)
504 return r;
505
506 switch (adev->asic_type) {
507 case CHIP_NAVI10:
508 case CHIP_NAVI14:
509 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
510 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
511 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
512 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
513 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
514 !amdgpu_sriov_vf(adev))
515 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
516 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
517 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
518#if defined(CONFIG_DRM_AMD_DC)
519 else if (amdgpu_device_has_dc_support(adev))
520 amdgpu_device_ip_block_add(adev, &dm_ip_block);
521#endif
522 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
523 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
524 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
525 !amdgpu_sriov_vf(adev))
526 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
527 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
528 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
529 if (adev->enable_mes)
530 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
531 break;
532 case CHIP_NAVI12:
533 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
534 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
535 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
536 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
537 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
538 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
539 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
540 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
541#if defined(CONFIG_DRM_AMD_DC)
542 else if (amdgpu_device_has_dc_support(adev))
543 amdgpu_device_ip_block_add(adev, &dm_ip_block);
544#endif
545 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
546 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
547 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
548 !amdgpu_sriov_vf(adev))
549 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
550 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
551 if (!amdgpu_sriov_vf(adev))
552 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
553 break;
554 case CHIP_SIENNA_CICHLID:
555 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
556 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
557 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
558 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
559 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
560 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
561 is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
562 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
563 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
564 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
565#if defined(CONFIG_DRM_AMD_DC)
566 else if (amdgpu_device_has_dc_support(adev))
567 amdgpu_device_ip_block_add(adev, &dm_ip_block);
568#endif
569 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
570 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
571 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
572 if (!amdgpu_sriov_vf(adev))
573 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
574
575 if (adev->enable_mes)
576 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
577 break;
578 case CHIP_NAVY_FLOUNDER:
579 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
580 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
581 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
582 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
583 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
584 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
585 is_support_sw_smu(adev))
586 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
587 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
588 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
589#if defined(CONFIG_DRM_AMD_DC)
590 else if (amdgpu_device_has_dc_support(adev))
591 amdgpu_device_ip_block_add(adev, &dm_ip_block);
592#endif
593 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
594 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
595 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
596 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
597 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
598 is_support_sw_smu(adev))
599 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
600 break;
601 default:
602 return -EINVAL;
603 }
604
605 return 0;
606}
607
608static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
609{
610 return adev->nbio.funcs->get_rev_id(adev);
611}
612
613static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
614{
615 adev->nbio.funcs->hdp_flush(adev, ring);
616}
617
618static void nv_invalidate_hdp(struct amdgpu_device *adev,
619 struct amdgpu_ring *ring)
620{
621 if (!ring || !ring->funcs->emit_wreg) {
622 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
623 } else {
624 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
625 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
626 }
627}
628
629static bool nv_need_full_reset(struct amdgpu_device *adev)
630{
631 return true;
632}
633
634static bool nv_need_reset_on_init(struct amdgpu_device *adev)
635{
636 u32 sol_reg;
637
638 if (adev->flags & AMD_IS_APU)
639 return false;
640
641 /* Check sOS sign of life register to confirm sys driver and sOS
642 * are already been loaded.
643 */
644 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
645 if (sol_reg)
646 return true;
647
648 return false;
649}
650
651static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
652{
653
654 /* TODO
655 * dummy implement for pcie_replay_count sysfs interface
656 * */
657
658 return 0;
659}
660
661static void nv_init_doorbell_index(struct amdgpu_device *adev)
662{
663 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
664 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
665 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
666 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
667 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
668 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
669 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
670 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
671 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
672 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
673 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
674 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
675 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
676 adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING;
677 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
678 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
679 adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
680 adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
681 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
682 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
683 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
684 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
685 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
686 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
687 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
688
689 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
690 adev->doorbell_index.sdma_doorbell_range = 20;
691}
692
693static const struct amdgpu_asic_funcs nv_asic_funcs =
694{
695 .read_disabled_bios = &nv_read_disabled_bios,
696 .read_bios_from_rom = &nv_read_bios_from_rom,
697 .read_register = &nv_read_register,
698 .reset = &nv_asic_reset,
699 .reset_method = &nv_asic_reset_method,
700 .set_vga_state = &nv_vga_set_state,
701 .get_xclk = &nv_get_xclk,
702 .set_uvd_clocks = &nv_set_uvd_clocks,
703 .set_vce_clocks = &nv_set_vce_clocks,
704 .get_config_memsize = &nv_get_config_memsize,
705 .flush_hdp = &nv_flush_hdp,
706 .invalidate_hdp = &nv_invalidate_hdp,
707 .init_doorbell_index = &nv_init_doorbell_index,
708 .need_full_reset = &nv_need_full_reset,
709 .need_reset_on_init = &nv_need_reset_on_init,
710 .get_pcie_replay_count = &nv_get_pcie_replay_count,
711 .supports_baco = &nv_asic_supports_baco,
712};
713
714static int nv_common_early_init(void *handle)
715{
716#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
717 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
718
719 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
720 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
721 adev->smc_rreg = NULL;
722 adev->smc_wreg = NULL;
723 adev->pcie_rreg = &nv_pcie_rreg;
724 adev->pcie_wreg = &nv_pcie_wreg;
725 adev->pcie_rreg64 = &nv_pcie_rreg64;
726 adev->pcie_wreg64 = &nv_pcie_wreg64;
727
728 /* TODO: will add them during VCN v2 implementation */
729 adev->uvd_ctx_rreg = NULL;
730 adev->uvd_ctx_wreg = NULL;
731
732 adev->didt_rreg = &nv_didt_rreg;
733 adev->didt_wreg = &nv_didt_wreg;
734
735 adev->asic_funcs = &nv_asic_funcs;
736
737 adev->rev_id = nv_get_rev_id(adev);
738 adev->external_rev_id = 0xff;
739 switch (adev->asic_type) {
740 case CHIP_NAVI10:
741 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
742 AMD_CG_SUPPORT_GFX_CGCG |
743 AMD_CG_SUPPORT_IH_CG |
744 AMD_CG_SUPPORT_HDP_MGCG |
745 AMD_CG_SUPPORT_HDP_LS |
746 AMD_CG_SUPPORT_SDMA_MGCG |
747 AMD_CG_SUPPORT_SDMA_LS |
748 AMD_CG_SUPPORT_MC_MGCG |
749 AMD_CG_SUPPORT_MC_LS |
750 AMD_CG_SUPPORT_ATHUB_MGCG |
751 AMD_CG_SUPPORT_ATHUB_LS |
752 AMD_CG_SUPPORT_VCN_MGCG |
753 AMD_CG_SUPPORT_JPEG_MGCG |
754 AMD_CG_SUPPORT_BIF_MGCG |
755 AMD_CG_SUPPORT_BIF_LS;
756 adev->pg_flags = AMD_PG_SUPPORT_VCN |
757 AMD_PG_SUPPORT_VCN_DPG |
758 AMD_PG_SUPPORT_JPEG |
759 AMD_PG_SUPPORT_ATHUB;
760 adev->external_rev_id = adev->rev_id + 0x1;
761 break;
762 case CHIP_NAVI14:
763 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
764 AMD_CG_SUPPORT_GFX_CGCG |
765 AMD_CG_SUPPORT_IH_CG |
766 AMD_CG_SUPPORT_HDP_MGCG |
767 AMD_CG_SUPPORT_HDP_LS |
768 AMD_CG_SUPPORT_SDMA_MGCG |
769 AMD_CG_SUPPORT_SDMA_LS |
770 AMD_CG_SUPPORT_MC_MGCG |
771 AMD_CG_SUPPORT_MC_LS |
772 AMD_CG_SUPPORT_ATHUB_MGCG |
773 AMD_CG_SUPPORT_ATHUB_LS |
774 AMD_CG_SUPPORT_VCN_MGCG |
775 AMD_CG_SUPPORT_JPEG_MGCG |
776 AMD_CG_SUPPORT_BIF_MGCG |
777 AMD_CG_SUPPORT_BIF_LS;
778 adev->pg_flags = AMD_PG_SUPPORT_VCN |
779 AMD_PG_SUPPORT_JPEG |
780 AMD_PG_SUPPORT_VCN_DPG;
781 adev->external_rev_id = adev->rev_id + 20;
782 break;
783 case CHIP_NAVI12:
784 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
785 AMD_CG_SUPPORT_GFX_MGLS |
786 AMD_CG_SUPPORT_GFX_CGCG |
787 AMD_CG_SUPPORT_GFX_CP_LS |
788 AMD_CG_SUPPORT_GFX_RLC_LS |
789 AMD_CG_SUPPORT_IH_CG |
790 AMD_CG_SUPPORT_HDP_MGCG |
791 AMD_CG_SUPPORT_HDP_LS |
792 AMD_CG_SUPPORT_SDMA_MGCG |
793 AMD_CG_SUPPORT_SDMA_LS |
794 AMD_CG_SUPPORT_MC_MGCG |
795 AMD_CG_SUPPORT_MC_LS |
796 AMD_CG_SUPPORT_ATHUB_MGCG |
797 AMD_CG_SUPPORT_ATHUB_LS |
798 AMD_CG_SUPPORT_VCN_MGCG |
799 AMD_CG_SUPPORT_JPEG_MGCG;
800 adev->pg_flags = AMD_PG_SUPPORT_VCN |
801 AMD_PG_SUPPORT_VCN_DPG |
802 AMD_PG_SUPPORT_JPEG |
803 AMD_PG_SUPPORT_ATHUB;
804 /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
805 * as a consequence, the rev_id and external_rev_id are wrong.
806 * workaround it by hardcoding rev_id to 0 (default value).
807 */
808 if (amdgpu_sriov_vf(adev))
809 adev->rev_id = 0;
810 adev->external_rev_id = adev->rev_id + 0xa;
811 break;
812 case CHIP_SIENNA_CICHLID:
813 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
814 AMD_CG_SUPPORT_GFX_CGCG |
815 AMD_CG_SUPPORT_GFX_3D_CGCG |
816 AMD_CG_SUPPORT_MC_MGCG |
817 AMD_CG_SUPPORT_VCN_MGCG |
818 AMD_CG_SUPPORT_JPEG_MGCG |
819 AMD_CG_SUPPORT_HDP_MGCG |
820 AMD_CG_SUPPORT_HDP_LS |
821 AMD_CG_SUPPORT_IH_CG |
822 AMD_CG_SUPPORT_MC_LS;
823 adev->pg_flags = AMD_PG_SUPPORT_VCN |
824 AMD_PG_SUPPORT_VCN_DPG |
825 AMD_PG_SUPPORT_JPEG |
826 AMD_PG_SUPPORT_ATHUB |
827 AMD_PG_SUPPORT_MMHUB;
828 if (amdgpu_sriov_vf(adev)) {
829 /* hypervisor control CG and PG enablement */
830 adev->cg_flags = 0;
831 adev->pg_flags = 0;
832 }
833 adev->external_rev_id = adev->rev_id + 0x28;
834 break;
835 case CHIP_NAVY_FLOUNDER:
836 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
837 AMD_CG_SUPPORT_GFX_CGCG |
838 AMD_CG_SUPPORT_GFX_3D_CGCG |
839 AMD_CG_SUPPORT_VCN_MGCG |
840 AMD_CG_SUPPORT_JPEG_MGCG |
841 AMD_CG_SUPPORT_MC_MGCG |
842 AMD_CG_SUPPORT_MC_LS |
843 AMD_CG_SUPPORT_HDP_MGCG |
844 AMD_CG_SUPPORT_HDP_LS |
845 AMD_CG_SUPPORT_IH_CG;
846 adev->pg_flags = AMD_PG_SUPPORT_VCN |
847 AMD_PG_SUPPORT_VCN_DPG |
848 AMD_PG_SUPPORT_JPEG |
849 AMD_PG_SUPPORT_ATHUB |
850 AMD_PG_SUPPORT_MMHUB;
851 adev->external_rev_id = adev->rev_id + 0x32;
852 break;
853
854 default:
855 /* FIXME: not supported yet */
856 return -EINVAL;
857 }
858
859 if (amdgpu_sriov_vf(adev)) {
860 amdgpu_virt_init_setting(adev);
861 xgpu_nv_mailbox_set_irq_funcs(adev);
862 }
863
864 return 0;
865}
866
867static int nv_common_late_init(void *handle)
868{
869 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
870
871 if (amdgpu_sriov_vf(adev))
872 xgpu_nv_mailbox_get_irq(adev);
873
874 return 0;
875}
876
877static int nv_common_sw_init(void *handle)
878{
879 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
880
881 if (amdgpu_sriov_vf(adev))
882 xgpu_nv_mailbox_add_irq_id(adev);
883
884 return 0;
885}
886
887static int nv_common_sw_fini(void *handle)
888{
889 return 0;
890}
891
892static int nv_common_hw_init(void *handle)
893{
894 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
895
896 /* enable pcie gen2/3 link */
897 nv_pcie_gen3_enable(adev);
898 /* enable aspm */
899 nv_program_aspm(adev);
900 /* setup nbio registers */
901 adev->nbio.funcs->init_registers(adev);
902 /* remap HDP registers to a hole in mmio space,
903 * for the purpose of expose those registers
904 * to process space
905 */
906 if (adev->nbio.funcs->remap_hdp_registers)
907 adev->nbio.funcs->remap_hdp_registers(adev);
908 /* enable the doorbell aperture */
909 nv_enable_doorbell_aperture(adev, true);
910
911 return 0;
912}
913
914static int nv_common_hw_fini(void *handle)
915{
916 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
917
918 /* disable the doorbell aperture */
919 nv_enable_doorbell_aperture(adev, false);
920
921 return 0;
922}
923
924static int nv_common_suspend(void *handle)
925{
926 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
927
928 return nv_common_hw_fini(adev);
929}
930
931static int nv_common_resume(void *handle)
932{
933 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
934
935 return nv_common_hw_init(adev);
936}
937
938static bool nv_common_is_idle(void *handle)
939{
940 return true;
941}
942
943static int nv_common_wait_for_idle(void *handle)
944{
945 return 0;
946}
947
948static int nv_common_soft_reset(void *handle)
949{
950 return 0;
951}
952
953static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
954 bool enable)
955{
956 uint32_t hdp_clk_cntl, hdp_clk_cntl1;
957 uint32_t hdp_mem_pwr_cntl;
958
959 if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
960 AMD_CG_SUPPORT_HDP_DS |
961 AMD_CG_SUPPORT_HDP_SD)))
962 return;
963
964 hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
965 hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
966
967 /* Before doing clock/power mode switch,
968 * forced on IPH & RC clock */
969 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
970 IPH_MEM_CLK_SOFT_OVERRIDE, 1);
971 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
972 RC_MEM_CLK_SOFT_OVERRIDE, 1);
973 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
974
975 /* HDP 5.0 doesn't support dynamic power mode switch,
976 * disable clock and power gating before any changing */
977 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
978 IPH_MEM_POWER_CTRL_EN, 0);
979 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
980 IPH_MEM_POWER_LS_EN, 0);
981 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
982 IPH_MEM_POWER_DS_EN, 0);
983 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
984 IPH_MEM_POWER_SD_EN, 0);
985 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
986 RC_MEM_POWER_CTRL_EN, 0);
987 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
988 RC_MEM_POWER_LS_EN, 0);
989 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
990 RC_MEM_POWER_DS_EN, 0);
991 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
992 RC_MEM_POWER_SD_EN, 0);
993 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
994
995 /* only one clock gating mode (LS/DS/SD) can be enabled */
996 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
997 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
998 HDP_MEM_POWER_CTRL,
999 IPH_MEM_POWER_LS_EN, enable);
1000 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
1001 HDP_MEM_POWER_CTRL,
1002 RC_MEM_POWER_LS_EN, enable);
1003 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
1004 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
1005 HDP_MEM_POWER_CTRL,
1006 IPH_MEM_POWER_DS_EN, enable);
1007 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
1008 HDP_MEM_POWER_CTRL,
1009 RC_MEM_POWER_DS_EN, enable);
1010 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
1011 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
1012 HDP_MEM_POWER_CTRL,
1013 IPH_MEM_POWER_SD_EN, enable);
1014 /* RC should not use shut down mode, fallback to ds */
1015 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
1016 HDP_MEM_POWER_CTRL,
1017 RC_MEM_POWER_DS_EN, enable);
1018 }
1019
1020 /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
1021 * be set for SRAM LS/DS/SD */
1022 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
1023 AMD_CG_SUPPORT_HDP_SD)) {
1024 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
1025 IPH_MEM_POWER_CTRL_EN, 1);
1026 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
1027 RC_MEM_POWER_CTRL_EN, 1);
1028 }
1029
1030 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
1031
1032 /* restore IPH & RC clock override after clock/power mode changing */
1033 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
1034}
1035
1036static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
1037 bool enable)
1038{
1039 uint32_t hdp_clk_cntl;
1040
1041 if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1042 return;
1043
1044 hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
1045
1046 if (enable) {
1047 hdp_clk_cntl &=
1048 ~(uint32_t)
1049 (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
1050 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
1051 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
1052 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
1053 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
1054 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
1055 } else {
1056 hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
1057 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
1058 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
1059 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
1060 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
1061 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
1062 }
1063
1064 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
1065}
1066
1067static int nv_common_set_clockgating_state(void *handle,
1068 enum amd_clockgating_state state)
1069{
1070 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1071
1072 if (amdgpu_sriov_vf(adev))
1073 return 0;
1074
1075 switch (adev->asic_type) {
1076 case CHIP_NAVI10:
1077 case CHIP_NAVI14:
1078 case CHIP_NAVI12:
1079 case CHIP_SIENNA_CICHLID:
1080 case CHIP_NAVY_FLOUNDER:
1081 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1082 state == AMD_CG_STATE_GATE);
1083 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1084 state == AMD_CG_STATE_GATE);
1085 nv_update_hdp_mem_power_gating(adev,
1086 state == AMD_CG_STATE_GATE);
1087 nv_update_hdp_clock_gating(adev,
1088 state == AMD_CG_STATE_GATE);
1089 break;
1090 default:
1091 break;
1092 }
1093 return 0;
1094}
1095
1096static int nv_common_set_powergating_state(void *handle,
1097 enum amd_powergating_state state)
1098{
1099 /* TODO */
1100 return 0;
1101}
1102
1103static void nv_common_get_clockgating_state(void *handle, u32 *flags)
1104{
1105 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1106 uint32_t tmp;
1107
1108 if (amdgpu_sriov_vf(adev))
1109 *flags = 0;
1110
1111 adev->nbio.funcs->get_clockgating_state(adev, flags);
1112
1113 /* AMD_CG_SUPPORT_HDP_MGCG */
1114 tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
1115 if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
1116 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
1117 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
1118 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
1119 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
1120 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
1121 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
1122
1123 /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
1124 tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
1125 if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
1126 *flags |= AMD_CG_SUPPORT_HDP_LS;
1127 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
1128 *flags |= AMD_CG_SUPPORT_HDP_DS;
1129 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
1130 *flags |= AMD_CG_SUPPORT_HDP_SD;
1131
1132 return;
1133}
1134
1135static const struct amd_ip_funcs nv_common_ip_funcs = {
1136 .name = "nv_common",
1137 .early_init = nv_common_early_init,
1138 .late_init = nv_common_late_init,
1139 .sw_init = nv_common_sw_init,
1140 .sw_fini = nv_common_sw_fini,
1141 .hw_init = nv_common_hw_init,
1142 .hw_fini = nv_common_hw_fini,
1143 .suspend = nv_common_suspend,
1144 .resume = nv_common_resume,
1145 .is_idle = nv_common_is_idle,
1146 .wait_for_idle = nv_common_wait_for_idle,
1147 .soft_reset = nv_common_soft_reset,
1148 .set_clockgating_state = nv_common_set_clockgating_state,
1149 .set_powergating_state = nv_common_set_powergating_state,
1150 .get_clockgating_state = nv_common_get_clockgating_state,
1151};
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include <drm/amdgpu_drm.h>
29
30#include "amdgpu.h"
31#include "amdgpu_atombios.h"
32#include "amdgpu_ih.h"
33#include "amdgpu_uvd.h"
34#include "amdgpu_vce.h"
35#include "amdgpu_ucode.h"
36#include "amdgpu_psp.h"
37#include "atom.h"
38#include "amd_pcie.h"
39
40#include "gc/gc_10_1_0_offset.h"
41#include "gc/gc_10_1_0_sh_mask.h"
42#include "mp/mp_11_0_offset.h"
43
44#include "soc15.h"
45#include "soc15_common.h"
46#include "gmc_v10_0.h"
47#include "gfxhub_v2_0.h"
48#include "mmhub_v2_0.h"
49#include "nbio_v2_3.h"
50#include "nbio_v7_2.h"
51#include "hdp_v5_0.h"
52#include "nv.h"
53#include "navi10_ih.h"
54#include "gfx_v10_0.h"
55#include "sdma_v5_0.h"
56#include "sdma_v5_2.h"
57#include "vcn_v2_0.h"
58#include "jpeg_v2_0.h"
59#include "vcn_v3_0.h"
60#include "jpeg_v3_0.h"
61#include "amdgpu_vkms.h"
62#include "mes_v10_1.h"
63#include "mxgpu_nv.h"
64#include "smuio_v11_0.h"
65#include "smuio_v11_0_6.h"
66
67static const struct amd_ip_funcs nv_common_ip_funcs;
68
69/* Navi */
70static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
71{
72 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
73 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
74};
75
76static const struct amdgpu_video_codecs nv_video_codecs_encode =
77{
78 .codec_count = ARRAY_SIZE(nv_video_codecs_encode_array),
79 .codec_array = nv_video_codecs_encode_array,
80};
81
82/* Navi1x */
83static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
84{
85 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
86 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
87 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
88 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
89 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
90 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
91 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
92};
93
94static const struct amdgpu_video_codecs nv_video_codecs_decode =
95{
96 .codec_count = ARRAY_SIZE(nv_video_codecs_decode_array),
97 .codec_array = nv_video_codecs_decode_array,
98};
99
100/* Sienna Cichlid */
101static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
102{
103 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
104 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
105 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
106 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
107 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
108 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
109 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
110 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
111};
112
113static const struct amdgpu_video_codecs sc_video_codecs_decode =
114{
115 .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array),
116 .codec_array = sc_video_codecs_decode_array,
117};
118
119/* SRIOV Sienna Cichlid, not const since data is controlled by host */
120static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
121{
122 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
123 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
124};
125
126static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
127{
128 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
129 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
130 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
131 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
132 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
133 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
134 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
135 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
136};
137
138static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
139{
140 .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
141 .codec_array = sriov_sc_video_codecs_encode_array,
142};
143
144static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
145{
146 .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array),
147 .codec_array = sriov_sc_video_codecs_decode_array,
148};
149
150/* Beige Goby*/
151static const struct amdgpu_video_codec_info bg_video_codecs_decode_array[] = {
152 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
153 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
154 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
155};
156
157static const struct amdgpu_video_codecs bg_video_codecs_decode = {
158 .codec_count = ARRAY_SIZE(bg_video_codecs_decode_array),
159 .codec_array = bg_video_codecs_decode_array,
160};
161
162static const struct amdgpu_video_codecs bg_video_codecs_encode = {
163 .codec_count = 0,
164 .codec_array = NULL,
165};
166
167/* Yellow Carp*/
168static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
169 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
170 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
171 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
172 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
173 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
174};
175
176static const struct amdgpu_video_codecs yc_video_codecs_decode = {
177 .codec_count = ARRAY_SIZE(yc_video_codecs_decode_array),
178 .codec_array = yc_video_codecs_decode_array,
179};
180
181static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
182 const struct amdgpu_video_codecs **codecs)
183{
184 switch (adev->ip_versions[UVD_HWIP][0]) {
185 case IP_VERSION(3, 0, 0):
186 case IP_VERSION(3, 0, 64):
187 case IP_VERSION(3, 0, 192):
188 if (amdgpu_sriov_vf(adev)) {
189 if (encode)
190 *codecs = &sriov_sc_video_codecs_encode;
191 else
192 *codecs = &sriov_sc_video_codecs_decode;
193 } else {
194 if (encode)
195 *codecs = &nv_video_codecs_encode;
196 else
197 *codecs = &sc_video_codecs_decode;
198 }
199 return 0;
200 case IP_VERSION(3, 0, 16):
201 case IP_VERSION(3, 0, 2):
202 if (encode)
203 *codecs = &nv_video_codecs_encode;
204 else
205 *codecs = &sc_video_codecs_decode;
206 return 0;
207 case IP_VERSION(3, 1, 1):
208 case IP_VERSION(3, 1, 2):
209 if (encode)
210 *codecs = &nv_video_codecs_encode;
211 else
212 *codecs = &yc_video_codecs_decode;
213 return 0;
214 case IP_VERSION(3, 0, 33):
215 if (encode)
216 *codecs = &bg_video_codecs_encode;
217 else
218 *codecs = &bg_video_codecs_decode;
219 return 0;
220 case IP_VERSION(2, 0, 0):
221 case IP_VERSION(2, 0, 2):
222 if (encode)
223 *codecs = &nv_video_codecs_encode;
224 else
225 *codecs = &nv_video_codecs_decode;
226 return 0;
227 default:
228 return -EINVAL;
229 }
230}
231
232/*
233 * Indirect registers accessor
234 */
235static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
236{
237 unsigned long address, data;
238 address = adev->nbio.funcs->get_pcie_index_offset(adev);
239 data = adev->nbio.funcs->get_pcie_data_offset(adev);
240
241 return amdgpu_device_indirect_rreg(adev, address, data, reg);
242}
243
244static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
245{
246 unsigned long address, data;
247
248 address = adev->nbio.funcs->get_pcie_index_offset(adev);
249 data = adev->nbio.funcs->get_pcie_data_offset(adev);
250
251 amdgpu_device_indirect_wreg(adev, address, data, reg, v);
252}
253
254static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
255{
256 unsigned long address, data;
257 address = adev->nbio.funcs->get_pcie_index_offset(adev);
258 data = adev->nbio.funcs->get_pcie_data_offset(adev);
259
260 return amdgpu_device_indirect_rreg64(adev, address, data, reg);
261}
262
263static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
264{
265 unsigned long address, data;
266
267 address = adev->nbio.funcs->get_pcie_index_offset(adev);
268 data = adev->nbio.funcs->get_pcie_data_offset(adev);
269
270 amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
271}
272
273static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
274{
275 unsigned long flags, address, data;
276 u32 r;
277
278 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
279 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
280
281 spin_lock_irqsave(&adev->didt_idx_lock, flags);
282 WREG32(address, (reg));
283 r = RREG32(data);
284 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
285 return r;
286}
287
288static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
289{
290 unsigned long flags, address, data;
291
292 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
293 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
294
295 spin_lock_irqsave(&adev->didt_idx_lock, flags);
296 WREG32(address, (reg));
297 WREG32(data, (v));
298 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
299}
300
301static u32 nv_get_config_memsize(struct amdgpu_device *adev)
302{
303 return adev->nbio.funcs->get_memsize(adev);
304}
305
306static u32 nv_get_xclk(struct amdgpu_device *adev)
307{
308 return adev->clock.spll.reference_freq;
309}
310
311
312void nv_grbm_select(struct amdgpu_device *adev,
313 u32 me, u32 pipe, u32 queue, u32 vmid)
314{
315 u32 grbm_gfx_cntl = 0;
316 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
317 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
318 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
319 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
320
321 WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
322}
323
324static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
325{
326 /* todo */
327}
328
329static bool nv_read_disabled_bios(struct amdgpu_device *adev)
330{
331 /* todo */
332 return false;
333}
334
335static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
336 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
337 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
338 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
339 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
340 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
341 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
342 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
343 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
344 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
345 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
346 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
347 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
348 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
349 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
350 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
351 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
352 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
353 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
354 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
355};
356
357static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
358 u32 sh_num, u32 reg_offset)
359{
360 uint32_t val;
361
362 mutex_lock(&adev->grbm_idx_mutex);
363 if (se_num != 0xffffffff || sh_num != 0xffffffff)
364 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
365
366 val = RREG32(reg_offset);
367
368 if (se_num != 0xffffffff || sh_num != 0xffffffff)
369 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
370 mutex_unlock(&adev->grbm_idx_mutex);
371 return val;
372}
373
374static uint32_t nv_get_register_value(struct amdgpu_device *adev,
375 bool indexed, u32 se_num,
376 u32 sh_num, u32 reg_offset)
377{
378 if (indexed) {
379 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
380 } else {
381 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
382 return adev->gfx.config.gb_addr_config;
383 return RREG32(reg_offset);
384 }
385}
386
387static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
388 u32 sh_num, u32 reg_offset, u32 *value)
389{
390 uint32_t i;
391 struct soc15_allowed_register_entry *en;
392
393 *value = 0;
394 for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
395 en = &nv_allowed_read_registers[i];
396 if (adev->reg_offset[en->hwip][en->inst] &&
397 reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
398 + en->reg_offset))
399 continue;
400
401 *value = nv_get_register_value(adev,
402 nv_allowed_read_registers[i].grbm_indexed,
403 se_num, sh_num, reg_offset);
404 return 0;
405 }
406 return -EINVAL;
407}
408
409static int nv_asic_mode2_reset(struct amdgpu_device *adev)
410{
411 u32 i;
412 int ret = 0;
413
414 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
415
416 /* disable BM */
417 pci_clear_master(adev->pdev);
418
419 amdgpu_device_cache_pci_state(adev->pdev);
420
421 ret = amdgpu_dpm_mode2_reset(adev);
422 if (ret)
423 dev_err(adev->dev, "GPU mode2 reset failed\n");
424
425 amdgpu_device_load_pci_state(adev->pdev);
426
427 /* wait for asic to come out of reset */
428 for (i = 0; i < adev->usec_timeout; i++) {
429 u32 memsize = adev->nbio.funcs->get_memsize(adev);
430
431 if (memsize != 0xffffffff)
432 break;
433 udelay(1);
434 }
435
436 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
437
438 return ret;
439}
440
441static enum amd_reset_method
442nv_asic_reset_method(struct amdgpu_device *adev)
443{
444 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
445 amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
446 amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
447 amdgpu_reset_method == AMD_RESET_METHOD_PCI)
448 return amdgpu_reset_method;
449
450 if (amdgpu_reset_method != -1)
451 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
452 amdgpu_reset_method);
453
454 switch (adev->ip_versions[MP1_HWIP][0]) {
455 case IP_VERSION(11, 5, 0):
456 case IP_VERSION(13, 0, 1):
457 case IP_VERSION(13, 0, 3):
458 case IP_VERSION(13, 0, 5):
459 case IP_VERSION(13, 0, 8):
460 return AMD_RESET_METHOD_MODE2;
461 case IP_VERSION(11, 0, 7):
462 case IP_VERSION(11, 0, 11):
463 case IP_VERSION(11, 0, 12):
464 case IP_VERSION(11, 0, 13):
465 return AMD_RESET_METHOD_MODE1;
466 default:
467 if (amdgpu_dpm_is_baco_supported(adev))
468 return AMD_RESET_METHOD_BACO;
469 else
470 return AMD_RESET_METHOD_MODE1;
471 }
472}
473
474static int nv_asic_reset(struct amdgpu_device *adev)
475{
476 int ret = 0;
477
478 switch (nv_asic_reset_method(adev)) {
479 case AMD_RESET_METHOD_PCI:
480 dev_info(adev->dev, "PCI reset\n");
481 ret = amdgpu_device_pci_reset(adev);
482 break;
483 case AMD_RESET_METHOD_BACO:
484 dev_info(adev->dev, "BACO reset\n");
485 ret = amdgpu_dpm_baco_reset(adev);
486 break;
487 case AMD_RESET_METHOD_MODE2:
488 dev_info(adev->dev, "MODE2 reset\n");
489 ret = nv_asic_mode2_reset(adev);
490 break;
491 default:
492 dev_info(adev->dev, "MODE1 reset\n");
493 ret = amdgpu_device_mode1_reset(adev);
494 break;
495 }
496
497 return ret;
498}
499
500static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
501{
502 /* todo */
503 return 0;
504}
505
506static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
507{
508 /* todo */
509 return 0;
510}
511
512static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
513{
514 if (pci_is_root_bus(adev->pdev->bus))
515 return;
516
517 if (amdgpu_pcie_gen2 == 0)
518 return;
519
520 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
521 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
522 return;
523
524 /* todo */
525}
526
527static void nv_program_aspm(struct amdgpu_device *adev)
528{
529 if (!amdgpu_device_should_use_aspm(adev))
530 return;
531
532 if (!(adev->flags & AMD_IS_APU) &&
533 (adev->nbio.funcs->program_aspm))
534 adev->nbio.funcs->program_aspm(adev);
535
536}
537
538static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
539 bool enable)
540{
541 adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
542 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
543}
544
545const struct amdgpu_ip_block_version nv_common_ip_block =
546{
547 .type = AMD_IP_BLOCK_TYPE_COMMON,
548 .major = 1,
549 .minor = 0,
550 .rev = 0,
551 .funcs = &nv_common_ip_funcs,
552};
553
554void nv_set_virt_ops(struct amdgpu_device *adev)
555{
556 adev->virt.ops = &xgpu_nv_virt_ops;
557}
558
559static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
560{
561 return adev->nbio.funcs->get_rev_id(adev);
562}
563
564static bool nv_need_full_reset(struct amdgpu_device *adev)
565{
566 return true;
567}
568
569static bool nv_need_reset_on_init(struct amdgpu_device *adev)
570{
571 u32 sol_reg;
572
573 if (adev->flags & AMD_IS_APU)
574 return false;
575
576 /* Check sOS sign of life register to confirm sys driver and sOS
577 * are already been loaded.
578 */
579 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
580 if (sol_reg)
581 return true;
582
583 return false;
584}
585
586static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
587{
588
589 /* TODO
590 * dummy implement for pcie_replay_count sysfs interface
591 * */
592
593 return 0;
594}
595
596static void nv_init_doorbell_index(struct amdgpu_device *adev)
597{
598 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
599 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
600 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
601 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
602 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
603 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
604 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
605 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
606 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
607 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
608 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
609 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
610 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
611 adev->doorbell_index.gfx_userqueue_start =
612 AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_START;
613 adev->doorbell_index.gfx_userqueue_end =
614 AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_END;
615 adev->doorbell_index.mes_ring0 = AMDGPU_NAVI10_DOORBELL_MES_RING0;
616 adev->doorbell_index.mes_ring1 = AMDGPU_NAVI10_DOORBELL_MES_RING1;
617 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
618 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
619 adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
620 adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
621 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
622 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
623 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
624 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
625 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
626 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
627 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
628
629 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
630 adev->doorbell_index.sdma_doorbell_range = 20;
631}
632
633static void nv_pre_asic_init(struct amdgpu_device *adev)
634{
635}
636
637static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
638 bool enter)
639{
640 if (enter)
641 amdgpu_gfx_rlc_enter_safe_mode(adev);
642 else
643 amdgpu_gfx_rlc_exit_safe_mode(adev);
644
645 if (adev->gfx.funcs->update_perfmon_mgcg)
646 adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
647
648 if (!(adev->flags & AMD_IS_APU) &&
649 (adev->nbio.funcs->enable_aspm) &&
650 amdgpu_device_should_use_aspm(adev))
651 adev->nbio.funcs->enable_aspm(adev, !enter);
652
653 return 0;
654}
655
656static const struct amdgpu_asic_funcs nv_asic_funcs =
657{
658 .read_disabled_bios = &nv_read_disabled_bios,
659 .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
660 .read_register = &nv_read_register,
661 .reset = &nv_asic_reset,
662 .reset_method = &nv_asic_reset_method,
663 .set_vga_state = &nv_vga_set_state,
664 .get_xclk = &nv_get_xclk,
665 .set_uvd_clocks = &nv_set_uvd_clocks,
666 .set_vce_clocks = &nv_set_vce_clocks,
667 .get_config_memsize = &nv_get_config_memsize,
668 .init_doorbell_index = &nv_init_doorbell_index,
669 .need_full_reset = &nv_need_full_reset,
670 .need_reset_on_init = &nv_need_reset_on_init,
671 .get_pcie_replay_count = &nv_get_pcie_replay_count,
672 .supports_baco = &amdgpu_dpm_is_baco_supported,
673 .pre_asic_init = &nv_pre_asic_init,
674 .update_umd_stable_pstate = &nv_update_umd_stable_pstate,
675 .query_video_codecs = &nv_query_video_codecs,
676};
677
678static int nv_common_early_init(void *handle)
679{
680#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
681 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
682
683 if (!amdgpu_sriov_vf(adev)) {
684 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
685 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
686 }
687 adev->smc_rreg = NULL;
688 adev->smc_wreg = NULL;
689 adev->pcie_rreg = &nv_pcie_rreg;
690 adev->pcie_wreg = &nv_pcie_wreg;
691 adev->pcie_rreg64 = &nv_pcie_rreg64;
692 adev->pcie_wreg64 = &nv_pcie_wreg64;
693 adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
694 adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
695
696 /* TODO: will add them during VCN v2 implementation */
697 adev->uvd_ctx_rreg = NULL;
698 adev->uvd_ctx_wreg = NULL;
699
700 adev->didt_rreg = &nv_didt_rreg;
701 adev->didt_wreg = &nv_didt_wreg;
702
703 adev->asic_funcs = &nv_asic_funcs;
704
705 adev->rev_id = nv_get_rev_id(adev);
706 adev->external_rev_id = 0xff;
707 /* TODO: split the GC and PG flags based on the relevant IP version for which
708 * they are relevant.
709 */
710 switch (adev->ip_versions[GC_HWIP][0]) {
711 case IP_VERSION(10, 1, 10):
712 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
713 AMD_CG_SUPPORT_GFX_CGCG |
714 AMD_CG_SUPPORT_IH_CG |
715 AMD_CG_SUPPORT_HDP_MGCG |
716 AMD_CG_SUPPORT_HDP_LS |
717 AMD_CG_SUPPORT_SDMA_MGCG |
718 AMD_CG_SUPPORT_SDMA_LS |
719 AMD_CG_SUPPORT_MC_MGCG |
720 AMD_CG_SUPPORT_MC_LS |
721 AMD_CG_SUPPORT_ATHUB_MGCG |
722 AMD_CG_SUPPORT_ATHUB_LS |
723 AMD_CG_SUPPORT_VCN_MGCG |
724 AMD_CG_SUPPORT_JPEG_MGCG |
725 AMD_CG_SUPPORT_BIF_MGCG |
726 AMD_CG_SUPPORT_BIF_LS;
727 adev->pg_flags = AMD_PG_SUPPORT_VCN |
728 AMD_PG_SUPPORT_VCN_DPG |
729 AMD_PG_SUPPORT_JPEG |
730 AMD_PG_SUPPORT_ATHUB;
731 adev->external_rev_id = adev->rev_id + 0x1;
732 break;
733 case IP_VERSION(10, 1, 1):
734 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
735 AMD_CG_SUPPORT_GFX_CGCG |
736 AMD_CG_SUPPORT_IH_CG |
737 AMD_CG_SUPPORT_HDP_MGCG |
738 AMD_CG_SUPPORT_HDP_LS |
739 AMD_CG_SUPPORT_SDMA_MGCG |
740 AMD_CG_SUPPORT_SDMA_LS |
741 AMD_CG_SUPPORT_MC_MGCG |
742 AMD_CG_SUPPORT_MC_LS |
743 AMD_CG_SUPPORT_ATHUB_MGCG |
744 AMD_CG_SUPPORT_ATHUB_LS |
745 AMD_CG_SUPPORT_VCN_MGCG |
746 AMD_CG_SUPPORT_JPEG_MGCG |
747 AMD_CG_SUPPORT_BIF_MGCG |
748 AMD_CG_SUPPORT_BIF_LS;
749 adev->pg_flags = AMD_PG_SUPPORT_VCN |
750 AMD_PG_SUPPORT_JPEG |
751 AMD_PG_SUPPORT_VCN_DPG;
752 adev->external_rev_id = adev->rev_id + 20;
753 break;
754 case IP_VERSION(10, 1, 2):
755 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
756 AMD_CG_SUPPORT_GFX_MGLS |
757 AMD_CG_SUPPORT_GFX_CGCG |
758 AMD_CG_SUPPORT_GFX_CP_LS |
759 AMD_CG_SUPPORT_GFX_RLC_LS |
760 AMD_CG_SUPPORT_IH_CG |
761 AMD_CG_SUPPORT_HDP_MGCG |
762 AMD_CG_SUPPORT_HDP_LS |
763 AMD_CG_SUPPORT_SDMA_MGCG |
764 AMD_CG_SUPPORT_SDMA_LS |
765 AMD_CG_SUPPORT_MC_MGCG |
766 AMD_CG_SUPPORT_MC_LS |
767 AMD_CG_SUPPORT_ATHUB_MGCG |
768 AMD_CG_SUPPORT_ATHUB_LS |
769 AMD_CG_SUPPORT_VCN_MGCG |
770 AMD_CG_SUPPORT_JPEG_MGCG;
771 adev->pg_flags = AMD_PG_SUPPORT_VCN |
772 AMD_PG_SUPPORT_VCN_DPG |
773 AMD_PG_SUPPORT_JPEG |
774 AMD_PG_SUPPORT_ATHUB;
775 /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
776 * as a consequence, the rev_id and external_rev_id are wrong.
777 * workaround it by hardcoding rev_id to 0 (default value).
778 */
779 if (amdgpu_sriov_vf(adev))
780 adev->rev_id = 0;
781 adev->external_rev_id = adev->rev_id + 0xa;
782 break;
783 case IP_VERSION(10, 3, 0):
784 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
785 AMD_CG_SUPPORT_GFX_CGCG |
786 AMD_CG_SUPPORT_GFX_CGLS |
787 AMD_CG_SUPPORT_GFX_3D_CGCG |
788 AMD_CG_SUPPORT_MC_MGCG |
789 AMD_CG_SUPPORT_VCN_MGCG |
790 AMD_CG_SUPPORT_JPEG_MGCG |
791 AMD_CG_SUPPORT_HDP_MGCG |
792 AMD_CG_SUPPORT_HDP_LS |
793 AMD_CG_SUPPORT_IH_CG |
794 AMD_CG_SUPPORT_MC_LS;
795 adev->pg_flags = AMD_PG_SUPPORT_VCN |
796 AMD_PG_SUPPORT_VCN_DPG |
797 AMD_PG_SUPPORT_JPEG |
798 AMD_PG_SUPPORT_ATHUB |
799 AMD_PG_SUPPORT_MMHUB;
800 if (amdgpu_sriov_vf(adev)) {
801 /* hypervisor control CG and PG enablement */
802 adev->cg_flags = 0;
803 adev->pg_flags = 0;
804 }
805 adev->external_rev_id = adev->rev_id + 0x28;
806 break;
807 case IP_VERSION(10, 3, 2):
808 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
809 AMD_CG_SUPPORT_GFX_CGCG |
810 AMD_CG_SUPPORT_GFX_CGLS |
811 AMD_CG_SUPPORT_GFX_3D_CGCG |
812 AMD_CG_SUPPORT_VCN_MGCG |
813 AMD_CG_SUPPORT_JPEG_MGCG |
814 AMD_CG_SUPPORT_MC_MGCG |
815 AMD_CG_SUPPORT_MC_LS |
816 AMD_CG_SUPPORT_HDP_MGCG |
817 AMD_CG_SUPPORT_HDP_LS |
818 AMD_CG_SUPPORT_IH_CG;
819 adev->pg_flags = AMD_PG_SUPPORT_VCN |
820 AMD_PG_SUPPORT_VCN_DPG |
821 AMD_PG_SUPPORT_JPEG |
822 AMD_PG_SUPPORT_ATHUB |
823 AMD_PG_SUPPORT_MMHUB;
824 adev->external_rev_id = adev->rev_id + 0x32;
825 break;
826 case IP_VERSION(10, 3, 1):
827 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
828 AMD_CG_SUPPORT_GFX_MGLS |
829 AMD_CG_SUPPORT_GFX_CP_LS |
830 AMD_CG_SUPPORT_GFX_RLC_LS |
831 AMD_CG_SUPPORT_GFX_CGCG |
832 AMD_CG_SUPPORT_GFX_CGLS |
833 AMD_CG_SUPPORT_GFX_3D_CGCG |
834 AMD_CG_SUPPORT_GFX_3D_CGLS |
835 AMD_CG_SUPPORT_MC_MGCG |
836 AMD_CG_SUPPORT_MC_LS |
837 AMD_CG_SUPPORT_GFX_FGCG |
838 AMD_CG_SUPPORT_VCN_MGCG |
839 AMD_CG_SUPPORT_SDMA_MGCG |
840 AMD_CG_SUPPORT_SDMA_LS |
841 AMD_CG_SUPPORT_JPEG_MGCG;
842 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
843 AMD_PG_SUPPORT_VCN |
844 AMD_PG_SUPPORT_VCN_DPG |
845 AMD_PG_SUPPORT_JPEG;
846 if (adev->apu_flags & AMD_APU_IS_VANGOGH)
847 adev->external_rev_id = adev->rev_id + 0x01;
848 break;
849 case IP_VERSION(10, 3, 4):
850 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
851 AMD_CG_SUPPORT_GFX_CGCG |
852 AMD_CG_SUPPORT_GFX_CGLS |
853 AMD_CG_SUPPORT_GFX_3D_CGCG |
854 AMD_CG_SUPPORT_VCN_MGCG |
855 AMD_CG_SUPPORT_JPEG_MGCG |
856 AMD_CG_SUPPORT_MC_MGCG |
857 AMD_CG_SUPPORT_MC_LS |
858 AMD_CG_SUPPORT_HDP_MGCG |
859 AMD_CG_SUPPORT_HDP_LS |
860 AMD_CG_SUPPORT_IH_CG;
861 adev->pg_flags = AMD_PG_SUPPORT_VCN |
862 AMD_PG_SUPPORT_VCN_DPG |
863 AMD_PG_SUPPORT_JPEG |
864 AMD_PG_SUPPORT_ATHUB |
865 AMD_PG_SUPPORT_MMHUB;
866 adev->external_rev_id = adev->rev_id + 0x3c;
867 break;
868 case IP_VERSION(10, 3, 5):
869 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
870 AMD_CG_SUPPORT_GFX_CGCG |
871 AMD_CG_SUPPORT_GFX_CGLS |
872 AMD_CG_SUPPORT_GFX_3D_CGCG |
873 AMD_CG_SUPPORT_MC_MGCG |
874 AMD_CG_SUPPORT_MC_LS |
875 AMD_CG_SUPPORT_HDP_MGCG |
876 AMD_CG_SUPPORT_HDP_LS |
877 AMD_CG_SUPPORT_IH_CG |
878 AMD_CG_SUPPORT_VCN_MGCG;
879 adev->pg_flags = AMD_PG_SUPPORT_VCN |
880 AMD_PG_SUPPORT_VCN_DPG |
881 AMD_PG_SUPPORT_ATHUB |
882 AMD_PG_SUPPORT_MMHUB;
883 adev->external_rev_id = adev->rev_id + 0x46;
884 break;
885 case IP_VERSION(10, 3, 3):
886 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
887 AMD_CG_SUPPORT_GFX_MGLS |
888 AMD_CG_SUPPORT_GFX_CGCG |
889 AMD_CG_SUPPORT_GFX_CGLS |
890 AMD_CG_SUPPORT_GFX_3D_CGCG |
891 AMD_CG_SUPPORT_GFX_3D_CGLS |
892 AMD_CG_SUPPORT_GFX_RLC_LS |
893 AMD_CG_SUPPORT_GFX_CP_LS |
894 AMD_CG_SUPPORT_GFX_FGCG |
895 AMD_CG_SUPPORT_MC_MGCG |
896 AMD_CG_SUPPORT_MC_LS |
897 AMD_CG_SUPPORT_SDMA_LS |
898 AMD_CG_SUPPORT_HDP_MGCG |
899 AMD_CG_SUPPORT_HDP_LS |
900 AMD_CG_SUPPORT_ATHUB_MGCG |
901 AMD_CG_SUPPORT_ATHUB_LS |
902 AMD_CG_SUPPORT_IH_CG |
903 AMD_CG_SUPPORT_VCN_MGCG |
904 AMD_CG_SUPPORT_JPEG_MGCG;
905 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
906 AMD_PG_SUPPORT_VCN |
907 AMD_PG_SUPPORT_VCN_DPG |
908 AMD_PG_SUPPORT_JPEG;
909 if (adev->pdev->device == 0x1681)
910 adev->external_rev_id = 0x20;
911 else
912 adev->external_rev_id = adev->rev_id + 0x01;
913 break;
914 case IP_VERSION(10, 1, 3):
915 case IP_VERSION(10, 1, 4):
916 adev->cg_flags = 0;
917 adev->pg_flags = 0;
918 adev->external_rev_id = adev->rev_id + 0x82;
919 break;
920 case IP_VERSION(10, 3, 6):
921 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
922 AMD_CG_SUPPORT_GFX_MGLS |
923 AMD_CG_SUPPORT_GFX_CGCG |
924 AMD_CG_SUPPORT_GFX_CGLS |
925 AMD_CG_SUPPORT_GFX_3D_CGCG |
926 AMD_CG_SUPPORT_GFX_3D_CGLS |
927 AMD_CG_SUPPORT_GFX_RLC_LS |
928 AMD_CG_SUPPORT_GFX_CP_LS |
929 AMD_CG_SUPPORT_GFX_FGCG |
930 AMD_CG_SUPPORT_MC_MGCG |
931 AMD_CG_SUPPORT_MC_LS |
932 AMD_CG_SUPPORT_SDMA_LS |
933 AMD_CG_SUPPORT_HDP_MGCG |
934 AMD_CG_SUPPORT_HDP_LS |
935 AMD_CG_SUPPORT_ATHUB_MGCG |
936 AMD_CG_SUPPORT_ATHUB_LS |
937 AMD_CG_SUPPORT_IH_CG |
938 AMD_CG_SUPPORT_VCN_MGCG |
939 AMD_CG_SUPPORT_JPEG_MGCG;
940 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
941 AMD_PG_SUPPORT_VCN |
942 AMD_PG_SUPPORT_VCN_DPG |
943 AMD_PG_SUPPORT_JPEG;
944 adev->external_rev_id = adev->rev_id + 0x01;
945 break;
946 case IP_VERSION(10, 3, 7):
947 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
948 AMD_CG_SUPPORT_GFX_MGLS |
949 AMD_CG_SUPPORT_GFX_CGCG |
950 AMD_CG_SUPPORT_GFX_CGLS |
951 AMD_CG_SUPPORT_GFX_3D_CGCG |
952 AMD_CG_SUPPORT_GFX_3D_CGLS |
953 AMD_CG_SUPPORT_GFX_RLC_LS |
954 AMD_CG_SUPPORT_GFX_CP_LS |
955 AMD_CG_SUPPORT_GFX_FGCG |
956 AMD_CG_SUPPORT_MC_MGCG |
957 AMD_CG_SUPPORT_MC_LS |
958 AMD_CG_SUPPORT_SDMA_LS |
959 AMD_CG_SUPPORT_HDP_MGCG |
960 AMD_CG_SUPPORT_HDP_LS |
961 AMD_CG_SUPPORT_ATHUB_MGCG |
962 AMD_CG_SUPPORT_ATHUB_LS |
963 AMD_CG_SUPPORT_IH_CG |
964 AMD_CG_SUPPORT_VCN_MGCG |
965 AMD_CG_SUPPORT_JPEG_MGCG;
966 adev->pg_flags = AMD_PG_SUPPORT_VCN |
967 AMD_PG_SUPPORT_VCN_DPG |
968 AMD_PG_SUPPORT_JPEG |
969 AMD_PG_SUPPORT_GFX_PG;
970 adev->external_rev_id = adev->rev_id + 0x01;
971 break;
972 default:
973 /* FIXME: not supported yet */
974 return -EINVAL;
975 }
976
977 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
978 adev->pg_flags &= ~(AMD_PG_SUPPORT_VCN |
979 AMD_PG_SUPPORT_VCN_DPG |
980 AMD_PG_SUPPORT_JPEG);
981
982 if (amdgpu_sriov_vf(adev)) {
983 amdgpu_virt_init_setting(adev);
984 xgpu_nv_mailbox_set_irq_funcs(adev);
985 }
986
987 return 0;
988}
989
990static int nv_common_late_init(void *handle)
991{
992 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
993
994 if (amdgpu_sriov_vf(adev)) {
995 xgpu_nv_mailbox_get_irq(adev);
996 amdgpu_virt_update_sriov_video_codec(adev,
997 sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
998 sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array));
999 }
1000
1001 return 0;
1002}
1003
1004static int nv_common_sw_init(void *handle)
1005{
1006 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1007
1008 if (amdgpu_sriov_vf(adev))
1009 xgpu_nv_mailbox_add_irq_id(adev);
1010
1011 return 0;
1012}
1013
1014static int nv_common_sw_fini(void *handle)
1015{
1016 return 0;
1017}
1018
1019static int nv_common_hw_init(void *handle)
1020{
1021 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1022
1023 if (adev->nbio.funcs->apply_lc_spc_mode_wa)
1024 adev->nbio.funcs->apply_lc_spc_mode_wa(adev);
1025
1026 if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa)
1027 adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev);
1028
1029 /* enable pcie gen2/3 link */
1030 nv_pcie_gen3_enable(adev);
1031 /* enable aspm */
1032 nv_program_aspm(adev);
1033 /* setup nbio registers */
1034 adev->nbio.funcs->init_registers(adev);
1035 /* remap HDP registers to a hole in mmio space,
1036 * for the purpose of expose those registers
1037 * to process space
1038 */
1039 if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
1040 adev->nbio.funcs->remap_hdp_registers(adev);
1041 /* enable the doorbell aperture */
1042 nv_enable_doorbell_aperture(adev, true);
1043
1044 return 0;
1045}
1046
1047static int nv_common_hw_fini(void *handle)
1048{
1049 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1050
1051 /* disable the doorbell aperture */
1052 nv_enable_doorbell_aperture(adev, false);
1053
1054 return 0;
1055}
1056
1057static int nv_common_suspend(void *handle)
1058{
1059 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1060
1061 return nv_common_hw_fini(adev);
1062}
1063
1064static int nv_common_resume(void *handle)
1065{
1066 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1067
1068 return nv_common_hw_init(adev);
1069}
1070
1071static bool nv_common_is_idle(void *handle)
1072{
1073 return true;
1074}
1075
1076static int nv_common_wait_for_idle(void *handle)
1077{
1078 return 0;
1079}
1080
1081static int nv_common_soft_reset(void *handle)
1082{
1083 return 0;
1084}
1085
1086static int nv_common_set_clockgating_state(void *handle,
1087 enum amd_clockgating_state state)
1088{
1089 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1090
1091 if (amdgpu_sriov_vf(adev))
1092 return 0;
1093
1094 switch (adev->ip_versions[NBIO_HWIP][0]) {
1095 case IP_VERSION(2, 3, 0):
1096 case IP_VERSION(2, 3, 1):
1097 case IP_VERSION(2, 3, 2):
1098 case IP_VERSION(3, 3, 0):
1099 case IP_VERSION(3, 3, 1):
1100 case IP_VERSION(3, 3, 2):
1101 case IP_VERSION(3, 3, 3):
1102 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1103 state == AMD_CG_STATE_GATE);
1104 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1105 state == AMD_CG_STATE_GATE);
1106 adev->hdp.funcs->update_clock_gating(adev,
1107 state == AMD_CG_STATE_GATE);
1108 adev->smuio.funcs->update_rom_clock_gating(adev,
1109 state == AMD_CG_STATE_GATE);
1110 break;
1111 default:
1112 break;
1113 }
1114 return 0;
1115}
1116
1117static int nv_common_set_powergating_state(void *handle,
1118 enum amd_powergating_state state)
1119{
1120 /* TODO */
1121 return 0;
1122}
1123
1124static void nv_common_get_clockgating_state(void *handle, u64 *flags)
1125{
1126 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1127
1128 if (amdgpu_sriov_vf(adev))
1129 *flags = 0;
1130
1131 adev->nbio.funcs->get_clockgating_state(adev, flags);
1132
1133 adev->hdp.funcs->get_clock_gating_state(adev, flags);
1134
1135 adev->smuio.funcs->get_clock_gating_state(adev, flags);
1136
1137 return;
1138}
1139
1140static const struct amd_ip_funcs nv_common_ip_funcs = {
1141 .name = "nv_common",
1142 .early_init = nv_common_early_init,
1143 .late_init = nv_common_late_init,
1144 .sw_init = nv_common_sw_init,
1145 .sw_fini = nv_common_sw_fini,
1146 .hw_init = nv_common_hw_init,
1147 .hw_fini = nv_common_hw_fini,
1148 .suspend = nv_common_suspend,
1149 .resume = nv_common_resume,
1150 .is_idle = nv_common_is_idle,
1151 .wait_for_idle = nv_common_wait_for_idle,
1152 .soft_reset = nv_common_soft_reset,
1153 .set_clockgating_state = nv_common_set_clockgating_state,
1154 .set_powergating_state = nv_common_set_powergating_state,
1155 .get_clockgating_state = nv_common_get_clockgating_state,
1156};