Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include <linux/slab.h>
  25#include <linux/module.h>
  26#include <linux/pci.h>
  27
  28#include <drm/amdgpu_drm.h>
  29
  30#include "amdgpu.h"
  31#include "amdgpu_atombios.h"
  32#include "amdgpu_ih.h"
  33#include "amdgpu_uvd.h"
  34#include "amdgpu_vce.h"
  35#include "amdgpu_ucode.h"
  36#include "amdgpu_psp.h"
 
  37#include "atom.h"
  38#include "amd_pcie.h"
  39
  40#include "gc/gc_10_1_0_offset.h"
  41#include "gc/gc_10_1_0_sh_mask.h"
  42#include "mp/mp_11_0_offset.h"
 
  43
  44#include "soc15.h"
  45#include "soc15_common.h"
  46#include "gmc_v10_0.h"
  47#include "gfxhub_v2_0.h"
  48#include "mmhub_v2_0.h"
  49#include "nbio_v2_3.h"
  50#include "nbio_v7_2.h"
  51#include "hdp_v5_0.h"
  52#include "nv.h"
  53#include "navi10_ih.h"
  54#include "gfx_v10_0.h"
  55#include "sdma_v5_0.h"
  56#include "sdma_v5_2.h"
  57#include "vcn_v2_0.h"
  58#include "jpeg_v2_0.h"
  59#include "vcn_v3_0.h"
  60#include "jpeg_v3_0.h"
  61#include "amdgpu_vkms.h"
  62#include "mes_v10_1.h"
  63#include "mxgpu_nv.h"
  64#include "smuio_v11_0.h"
  65#include "smuio_v11_0_6.h"
  66
  67static const struct amd_ip_funcs nv_common_ip_funcs;
  68
  69/* Navi */
  70static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = {
  71	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
  72	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
  73};
  74
  75static const struct amdgpu_video_codecs nv_video_codecs_encode = {
  76	.codec_count = ARRAY_SIZE(nv_video_codecs_encode_array),
  77	.codec_array = nv_video_codecs_encode_array,
  78};
  79
  80/* Navi1x */
  81static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = {
  82	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
  83	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
  84	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
  85	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
  86	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
  87	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
  88	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
  89};
  90
  91static const struct amdgpu_video_codecs nv_video_codecs_decode = {
  92	.codec_count = ARRAY_SIZE(nv_video_codecs_decode_array),
  93	.codec_array = nv_video_codecs_decode_array,
  94};
  95
  96/* Sienna Cichlid */
  97static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = {
  98	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
  99	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
 100};
 101
 102static const struct amdgpu_video_codecs sc_video_codecs_encode = {
 103	.codec_count = ARRAY_SIZE(sc_video_codecs_encode_array),
 104	.codec_array = sc_video_codecs_encode_array,
 105};
 106
 107static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = {
 108	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
 109	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
 110	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
 111	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
 112	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
 113	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
 114	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
 115	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
 116};
 117
 118static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = {
 119	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
 120	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
 121	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
 122	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
 123	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
 124	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
 125	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
 126};
 127
 128static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn0 = {
 129	.codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn0),
 130	.codec_array = sc_video_codecs_decode_array_vcn0,
 131};
 132
 133static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = {
 134	.codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn1),
 135	.codec_array = sc_video_codecs_decode_array_vcn1,
 136};
 137
 138/* SRIOV Sienna Cichlid, not const since data is controlled by host */
 139static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {
 140	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
 141	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
 142};
 143
 144static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {
 145	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
 146	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
 147	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
 148	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
 149	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
 150	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
 151	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
 152	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
 153};
 154
 155static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] = {
 156	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
 157	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
 158	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
 159	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
 160	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
 161	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
 162	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
 163};
 164
 165static struct amdgpu_video_codecs sriov_sc_video_codecs_encode = {
 166	.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
 167	.codec_array = sriov_sc_video_codecs_encode_array,
 168};
 169
 170static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn0 = {
 171	.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0),
 172	.codec_array = sriov_sc_video_codecs_decode_array_vcn0,
 173};
 174
 175static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn1 = {
 176	.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1),
 177	.codec_array = sriov_sc_video_codecs_decode_array_vcn1,
 178};
 179
 180/* Beige Goby*/
 181static const struct amdgpu_video_codec_info bg_video_codecs_decode_array[] = {
 182	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
 183	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
 184	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
 185};
 186
 187static const struct amdgpu_video_codecs bg_video_codecs_decode = {
 188	.codec_count = ARRAY_SIZE(bg_video_codecs_decode_array),
 189	.codec_array = bg_video_codecs_decode_array,
 190};
 191
 192static const struct amdgpu_video_codecs bg_video_codecs_encode = {
 193	.codec_count = 0,
 194	.codec_array = NULL,
 195};
 196
 197/* Yellow Carp*/
 198static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
 199	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
 200	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
 201	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
 202	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
 203	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
 204};
 205
 206static const struct amdgpu_video_codecs yc_video_codecs_decode = {
 207	.codec_count = ARRAY_SIZE(yc_video_codecs_decode_array),
 208	.codec_array = yc_video_codecs_decode_array,
 209};
 
 
 
 210
 211static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
 212				 const struct amdgpu_video_codecs **codecs)
 213{
 214	if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
 215		return -EINVAL;
 216
 217	switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
 218	case IP_VERSION(3, 0, 0):
 219	case IP_VERSION(3, 0, 64):
 220	case IP_VERSION(3, 0, 192):
 221		if (amdgpu_sriov_vf(adev)) {
 222			if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
 223				if (encode)
 224					*codecs = &sriov_sc_video_codecs_encode;
 225				else
 226					*codecs = &sriov_sc_video_codecs_decode_vcn1;
 227			} else {
 228				if (encode)
 229					*codecs = &sriov_sc_video_codecs_encode;
 230				else
 231					*codecs = &sriov_sc_video_codecs_decode_vcn0;
 232			}
 233		} else {
 234			if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
 235				if (encode)
 236					*codecs = &sc_video_codecs_encode;
 237				else
 238					*codecs = &sc_video_codecs_decode_vcn1;
 239			} else {
 240				if (encode)
 241					*codecs = &sc_video_codecs_encode;
 242				else
 243					*codecs = &sc_video_codecs_decode_vcn0;
 244			}
 245		}
 246		return 0;
 247	case IP_VERSION(3, 0, 16):
 248	case IP_VERSION(3, 0, 2):
 249		if (encode)
 250			*codecs = &sc_video_codecs_encode;
 251		else
 252			*codecs = &sc_video_codecs_decode_vcn0;
 253		return 0;
 254	case IP_VERSION(3, 1, 1):
 255	case IP_VERSION(3, 1, 2):
 256		if (encode)
 257			*codecs = &sc_video_codecs_encode;
 258		else
 259			*codecs = &yc_video_codecs_decode;
 260		return 0;
 261	case IP_VERSION(3, 0, 33):
 262		if (encode)
 263			*codecs = &bg_video_codecs_encode;
 264		else
 265			*codecs = &bg_video_codecs_decode;
 266		return 0;
 267	case IP_VERSION(2, 0, 0):
 268	case IP_VERSION(2, 0, 2):
 269		if (encode)
 270			*codecs = &nv_video_codecs_encode;
 271		else
 272			*codecs = &nv_video_codecs_decode;
 273		return 0;
 274	default:
 275		return -EINVAL;
 276	}
 277}
 278
 279static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
 280{
 281	unsigned long flags, address, data;
 282	u32 r;
 283
 284	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 285	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 286
 287	spin_lock_irqsave(&adev->didt_idx_lock, flags);
 288	WREG32(address, (reg));
 289	r = RREG32(data);
 290	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 291	return r;
 292}
 293
 294static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 295{
 296	unsigned long flags, address, data;
 297
 298	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 299	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 300
 301	spin_lock_irqsave(&adev->didt_idx_lock, flags);
 302	WREG32(address, (reg));
 303	WREG32(data, (v));
 304	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 305}
 306
 307static u32 nv_get_config_memsize(struct amdgpu_device *adev)
 308{
 309	return adev->nbio.funcs->get_memsize(adev);
 310}
 311
 312static u32 nv_get_xclk(struct amdgpu_device *adev)
 313{
 314	return adev->clock.spll.reference_freq;
 315}
 316
 317
 318void nv_grbm_select(struct amdgpu_device *adev,
 319		     u32 me, u32 pipe, u32 queue, u32 vmid)
 320{
 321	u32 grbm_gfx_cntl = 0;
 322	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
 323	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
 324	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
 325	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
 326
 327	WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
 
 
 
 
 
 328}
 329
 330static bool nv_read_disabled_bios(struct amdgpu_device *adev)
 331{
 332	/* todo */
 333	return false;
 334}
 335
 
 
 
 
 
 
 
 336static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
 337	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
 338	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
 339	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
 340	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
 341	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
 342	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
 
 343	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
 344	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
 
 345	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
 346	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
 347	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
 348	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
 349	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
 350	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
 351	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
 352	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
 353	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
 354	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
 355	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
 356};
 357
 358static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
 359					 u32 sh_num, u32 reg_offset)
 360{
 361	uint32_t val;
 362
 363	mutex_lock(&adev->grbm_idx_mutex);
 364	if (se_num != 0xffffffff || sh_num != 0xffffffff)
 365		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
 366
 367	val = RREG32(reg_offset);
 368
 369	if (se_num != 0xffffffff || sh_num != 0xffffffff)
 370		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
 371	mutex_unlock(&adev->grbm_idx_mutex);
 372	return val;
 373}
 374
 375static uint32_t nv_get_register_value(struct amdgpu_device *adev,
 376				      bool indexed, u32 se_num,
 377				      u32 sh_num, u32 reg_offset)
 378{
 379	if (indexed) {
 380		return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
 381	} else {
 382		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
 383			return adev->gfx.config.gb_addr_config;
 384		return RREG32(reg_offset);
 385	}
 386}
 387
 388static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
 389			    u32 sh_num, u32 reg_offset, u32 *value)
 390{
 391	uint32_t i;
 392	struct soc15_allowed_register_entry  *en;
 393
 394	*value = 0;
 395	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
 396		en = &nv_allowed_read_registers[i];
 397		if (!adev->reg_offset[en->hwip][en->inst])
 398			continue;
 399		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
 400					+ en->reg_offset))
 401			continue;
 402
 403		*value = nv_get_register_value(adev,
 404					       nv_allowed_read_registers[i].grbm_indexed,
 405					       se_num, sh_num, reg_offset);
 406		return 0;
 407	}
 408	return -EINVAL;
 409}
 410
 411static int nv_asic_mode2_reset(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 412{
 413	u32 i;
 414	int ret = 0;
 415
 416	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 417
 
 
 418	/* disable BM */
 419	pci_clear_master(adev->pdev);
 420
 421	amdgpu_device_cache_pci_state(adev->pdev);
 422
 423	ret = amdgpu_dpm_mode2_reset(adev);
 424	if (ret)
 425		dev_err(adev->dev, "GPU mode2 reset failed\n");
 426
 427	amdgpu_device_load_pci_state(adev->pdev);
 428
 429	/* wait for asic to come out of reset */
 430	for (i = 0; i < adev->usec_timeout; i++) {
 431		u32 memsize = adev->nbio.funcs->get_memsize(adev);
 432
 433		if (memsize != 0xffffffff)
 434			break;
 435		udelay(1);
 436	}
 437
 438	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 439
 440	return ret;
 441}
 442
 443static enum amd_reset_method
 444nv_asic_reset_method(struct amdgpu_device *adev)
 445{
 446	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
 447	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
 448	    amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
 449	    amdgpu_reset_method == AMD_RESET_METHOD_PCI)
 450		return amdgpu_reset_method;
 451
 452	if (amdgpu_reset_method != -1)
 453		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
 454				  amdgpu_reset_method);
 455
 456	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
 457	case IP_VERSION(11, 5, 0):
 458	case IP_VERSION(13, 0, 1):
 459	case IP_VERSION(13, 0, 3):
 460	case IP_VERSION(13, 0, 5):
 461	case IP_VERSION(13, 0, 8):
 462		return AMD_RESET_METHOD_MODE2;
 463	case IP_VERSION(11, 0, 7):
 464	case IP_VERSION(11, 0, 11):
 465	case IP_VERSION(11, 0, 12):
 466	case IP_VERSION(11, 0, 13):
 467		return AMD_RESET_METHOD_MODE1;
 468	default:
 469		if (amdgpu_dpm_is_baco_supported(adev))
 470			return AMD_RESET_METHOD_BACO;
 471		else
 472			return AMD_RESET_METHOD_MODE1;
 473	}
 474}
 475
 476static int nv_asic_reset(struct amdgpu_device *adev)
 477{
 
 
 
 
 
 
 
 
 
 478	int ret = 0;
 
 479
 480	switch (nv_asic_reset_method(adev)) {
 481	case AMD_RESET_METHOD_PCI:
 482		dev_info(adev->dev, "PCI reset\n");
 483		ret = amdgpu_device_pci_reset(adev);
 484		break;
 485	case AMD_RESET_METHOD_BACO:
 486		dev_info(adev->dev, "BACO reset\n");
 487		ret = amdgpu_dpm_baco_reset(adev);
 488		break;
 489	case AMD_RESET_METHOD_MODE2:
 490		dev_info(adev->dev, "MODE2 reset\n");
 491		ret = nv_asic_mode2_reset(adev);
 492		break;
 493	default:
 494		dev_info(adev->dev, "MODE1 reset\n");
 495		ret = amdgpu_device_mode1_reset(adev);
 496		break;
 497	}
 498
 499	return ret;
 500}
 501
 502static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
 503{
 504	/* todo */
 505	return 0;
 506}
 507
 508static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
 509{
 510	/* todo */
 511	return 0;
 512}
 513
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 514static void nv_program_aspm(struct amdgpu_device *adev)
 515{
 516	if (!amdgpu_device_should_use_aspm(adev))
 
 517		return;
 518
 519	if (adev->nbio.funcs->program_aspm)
 520		adev->nbio.funcs->program_aspm(adev);
 521
 
 
 
 
 
 522}
 523
 524const struct amdgpu_ip_block_version nv_common_ip_block = {
 
 525	.type = AMD_IP_BLOCK_TYPE_COMMON,
 526	.major = 1,
 527	.minor = 0,
 528	.rev = 0,
 529	.funcs = &nv_common_ip_funcs,
 530};
 531
 532void nv_set_virt_ops(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 533{
 534	adev->virt.ops = &xgpu_nv_virt_ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 535}
 536
 537static bool nv_need_full_reset(struct amdgpu_device *adev)
 538{
 539	return true;
 540}
 541
 
 
 
 
 
 
 
 542static bool nv_need_reset_on_init(struct amdgpu_device *adev)
 543{
 
 544	u32 sol_reg;
 545
 546	if (adev->flags & AMD_IS_APU)
 547		return false;
 548
 549	/* Check sOS sign of life register to confirm sys driver and sOS
 550	 * are already been loaded.
 551	 */
 552	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
 553	if (sol_reg)
 554		return true;
 555
 
 556	return false;
 557}
 558
 559static void nv_init_doorbell_index(struct amdgpu_device *adev)
 560{
 561	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
 562	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
 563	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
 564	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
 565	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
 566	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
 567	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
 568	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
 569	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
 570	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
 571	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
 572	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
 573	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
 574	adev->doorbell_index.gfx_userqueue_start =
 575		AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_START;
 576	adev->doorbell_index.gfx_userqueue_end =
 577		AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_END;
 578	adev->doorbell_index.mes_ring0 = AMDGPU_NAVI10_DOORBELL_MES_RING0;
 579	adev->doorbell_index.mes_ring1 = AMDGPU_NAVI10_DOORBELL_MES_RING1;
 580	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
 581	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
 582	adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
 583	adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
 584	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
 585	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
 586	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
 587	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
 588	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
 589	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
 590	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
 591
 592	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
 593	adev->doorbell_index.sdma_doorbell_range = 20;
 594}
 595
 596static void nv_pre_asic_init(struct amdgpu_device *adev)
 597{
 598}
 599
 600static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
 601				       bool enter)
 602{
 603	if (enter)
 604		amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
 605	else
 606		amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
 607
 608	if (adev->gfx.funcs->update_perfmon_mgcg)
 609		adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
 610
 611	if (adev->nbio.funcs->enable_aspm &&
 612	    amdgpu_device_should_use_aspm(adev))
 613		adev->nbio.funcs->enable_aspm(adev, !enter);
 614
 615	return 0;
 616}
 617
 618static const struct amdgpu_asic_funcs nv_asic_funcs = {
 619	.read_disabled_bios = &nv_read_disabled_bios,
 620	.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
 621	.read_register = &nv_read_register,
 622	.reset = &nv_asic_reset,
 623	.reset_method = &nv_asic_reset_method,
 
 624	.get_xclk = &nv_get_xclk,
 625	.set_uvd_clocks = &nv_set_uvd_clocks,
 626	.set_vce_clocks = &nv_set_vce_clocks,
 627	.get_config_memsize = &nv_get_config_memsize,
 
 
 628	.init_doorbell_index = &nv_init_doorbell_index,
 629	.need_full_reset = &nv_need_full_reset,
 
 630	.need_reset_on_init = &nv_need_reset_on_init,
 631	.get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count,
 632	.supports_baco = &amdgpu_dpm_is_baco_supported,
 633	.pre_asic_init = &nv_pre_asic_init,
 634	.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
 635	.query_video_codecs = &nv_query_video_codecs,
 636};
 637
 638static int nv_common_early_init(void *handle)
 639{
 640#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
 641	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 642
 643	if (!amdgpu_sriov_vf(adev)) {
 644		adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
 645		adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
 646	}
 647	adev->smc_rreg = NULL;
 648	adev->smc_wreg = NULL;
 649	adev->pcie_rreg = &amdgpu_device_indirect_rreg;
 650	adev->pcie_wreg = &amdgpu_device_indirect_wreg;
 651	adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
 652	adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
 653	adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
 654	adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
 655
 656	/* TODO: will add them during VCN v2 implementation */
 657	adev->uvd_ctx_rreg = NULL;
 658	adev->uvd_ctx_wreg = NULL;
 659
 660	adev->didt_rreg = &nv_didt_rreg;
 661	adev->didt_wreg = &nv_didt_wreg;
 662
 663	adev->asic_funcs = &nv_asic_funcs;
 664
 665	adev->rev_id = amdgpu_device_get_rev_id(adev);
 666	adev->external_rev_id = 0xff;
 667	/* TODO: split the GC and PG flags based on the relevant IP version for which
 668	 * they are relevant.
 669	 */
 670	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
 671	case IP_VERSION(10, 1, 10):
 672		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 673			AMD_CG_SUPPORT_GFX_CGCG |
 674			AMD_CG_SUPPORT_IH_CG |
 675			AMD_CG_SUPPORT_HDP_MGCG |
 676			AMD_CG_SUPPORT_HDP_LS |
 677			AMD_CG_SUPPORT_SDMA_MGCG |
 678			AMD_CG_SUPPORT_SDMA_LS |
 679			AMD_CG_SUPPORT_MC_MGCG |
 680			AMD_CG_SUPPORT_MC_LS |
 681			AMD_CG_SUPPORT_ATHUB_MGCG |
 682			AMD_CG_SUPPORT_ATHUB_LS |
 683			AMD_CG_SUPPORT_VCN_MGCG |
 684			AMD_CG_SUPPORT_JPEG_MGCG |
 685			AMD_CG_SUPPORT_BIF_MGCG |
 686			AMD_CG_SUPPORT_BIF_LS;
 687		adev->pg_flags = AMD_PG_SUPPORT_VCN |
 688			AMD_PG_SUPPORT_VCN_DPG |
 689			AMD_PG_SUPPORT_JPEG |
 690			AMD_PG_SUPPORT_ATHUB;
 691		adev->external_rev_id = adev->rev_id + 0x1;
 692		break;
 693	case IP_VERSION(10, 1, 1):
 694		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 695			AMD_CG_SUPPORT_GFX_CGCG |
 696			AMD_CG_SUPPORT_IH_CG |
 697			AMD_CG_SUPPORT_HDP_MGCG |
 698			AMD_CG_SUPPORT_HDP_LS |
 699			AMD_CG_SUPPORT_SDMA_MGCG |
 700			AMD_CG_SUPPORT_SDMA_LS |
 701			AMD_CG_SUPPORT_MC_MGCG |
 702			AMD_CG_SUPPORT_MC_LS |
 703			AMD_CG_SUPPORT_ATHUB_MGCG |
 704			AMD_CG_SUPPORT_ATHUB_LS |
 705			AMD_CG_SUPPORT_VCN_MGCG |
 706			AMD_CG_SUPPORT_JPEG_MGCG |
 707			AMD_CG_SUPPORT_BIF_MGCG |
 708			AMD_CG_SUPPORT_BIF_LS;
 709		adev->pg_flags = AMD_PG_SUPPORT_VCN |
 710			AMD_PG_SUPPORT_JPEG |
 711			AMD_PG_SUPPORT_VCN_DPG;
 712		adev->external_rev_id = adev->rev_id + 20;
 713		break;
 714	case IP_VERSION(10, 1, 2):
 715		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 716			AMD_CG_SUPPORT_GFX_MGLS |
 717			AMD_CG_SUPPORT_GFX_CGCG |
 718			AMD_CG_SUPPORT_GFX_CP_LS |
 719			AMD_CG_SUPPORT_GFX_RLC_LS |
 720			AMD_CG_SUPPORT_IH_CG |
 721			AMD_CG_SUPPORT_HDP_MGCG |
 722			AMD_CG_SUPPORT_HDP_LS |
 723			AMD_CG_SUPPORT_SDMA_MGCG |
 724			AMD_CG_SUPPORT_SDMA_LS |
 725			AMD_CG_SUPPORT_MC_MGCG |
 726			AMD_CG_SUPPORT_MC_LS |
 727			AMD_CG_SUPPORT_ATHUB_MGCG |
 728			AMD_CG_SUPPORT_ATHUB_LS |
 729			AMD_CG_SUPPORT_VCN_MGCG |
 730			AMD_CG_SUPPORT_JPEG_MGCG;
 731		adev->pg_flags = AMD_PG_SUPPORT_VCN |
 732			AMD_PG_SUPPORT_VCN_DPG |
 733			AMD_PG_SUPPORT_JPEG |
 734			AMD_PG_SUPPORT_ATHUB;
 735		/* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
 736		 * as a consequence, the rev_id and external_rev_id are wrong.
 737		 * workaround it by hardcoding rev_id to 0 (default value).
 738		 */
 739		if (amdgpu_sriov_vf(adev))
 740			adev->rev_id = 0;
 741		adev->external_rev_id = adev->rev_id + 0xa;
 742		break;
 743	case IP_VERSION(10, 3, 0):
 744		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 745			AMD_CG_SUPPORT_GFX_CGCG |
 746			AMD_CG_SUPPORT_GFX_CGLS |
 747			AMD_CG_SUPPORT_GFX_3D_CGCG |
 748			AMD_CG_SUPPORT_MC_MGCG |
 749			AMD_CG_SUPPORT_VCN_MGCG |
 750			AMD_CG_SUPPORT_JPEG_MGCG |
 751			AMD_CG_SUPPORT_HDP_MGCG |
 752			AMD_CG_SUPPORT_HDP_LS |
 753			AMD_CG_SUPPORT_IH_CG |
 754			AMD_CG_SUPPORT_MC_LS;
 755		adev->pg_flags = AMD_PG_SUPPORT_VCN |
 756			AMD_PG_SUPPORT_VCN_DPG |
 757			AMD_PG_SUPPORT_JPEG |
 758			AMD_PG_SUPPORT_ATHUB |
 759			AMD_PG_SUPPORT_MMHUB;
 760		if (amdgpu_sriov_vf(adev)) {
 761			/* hypervisor control CG and PG enablement */
 762			adev->cg_flags = 0;
 763			adev->pg_flags = 0;
 764		}
 765		adev->external_rev_id = adev->rev_id + 0x28;
 766		break;
 767	case IP_VERSION(10, 3, 2):
 768		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 769			AMD_CG_SUPPORT_GFX_CGCG |
 770			AMD_CG_SUPPORT_GFX_CGLS |
 771			AMD_CG_SUPPORT_GFX_3D_CGCG |
 772			AMD_CG_SUPPORT_VCN_MGCG |
 773			AMD_CG_SUPPORT_JPEG_MGCG |
 774			AMD_CG_SUPPORT_MC_MGCG |
 775			AMD_CG_SUPPORT_MC_LS |
 776			AMD_CG_SUPPORT_HDP_MGCG |
 777			AMD_CG_SUPPORT_HDP_LS |
 778			AMD_CG_SUPPORT_IH_CG;
 779		adev->pg_flags = AMD_PG_SUPPORT_VCN |
 780			AMD_PG_SUPPORT_VCN_DPG |
 781			AMD_PG_SUPPORT_JPEG |
 782			AMD_PG_SUPPORT_ATHUB |
 783			AMD_PG_SUPPORT_MMHUB;
 784		adev->external_rev_id = adev->rev_id + 0x32;
 785		break;
 786	case IP_VERSION(10, 3, 1):
 787		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 788			AMD_CG_SUPPORT_GFX_MGLS |
 789			AMD_CG_SUPPORT_GFX_CP_LS |
 790			AMD_CG_SUPPORT_GFX_RLC_LS |
 791			AMD_CG_SUPPORT_GFX_CGCG |
 792			AMD_CG_SUPPORT_GFX_CGLS |
 793			AMD_CG_SUPPORT_GFX_3D_CGCG |
 794			AMD_CG_SUPPORT_GFX_3D_CGLS |
 795			AMD_CG_SUPPORT_MC_MGCG |
 796			AMD_CG_SUPPORT_MC_LS |
 797			AMD_CG_SUPPORT_GFX_FGCG |
 798			AMD_CG_SUPPORT_VCN_MGCG |
 799			AMD_CG_SUPPORT_SDMA_MGCG |
 800			AMD_CG_SUPPORT_SDMA_LS |
 801			AMD_CG_SUPPORT_JPEG_MGCG;
 802		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
 803			AMD_PG_SUPPORT_VCN |
 804			AMD_PG_SUPPORT_VCN_DPG |
 805			AMD_PG_SUPPORT_JPEG;
 806		if (adev->apu_flags & AMD_APU_IS_VANGOGH)
 807			adev->external_rev_id = adev->rev_id + 0x01;
 808		break;
 809	case IP_VERSION(10, 3, 4):
 810		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 811			AMD_CG_SUPPORT_GFX_CGCG |
 812			AMD_CG_SUPPORT_GFX_CGLS |
 813			AMD_CG_SUPPORT_GFX_3D_CGCG |
 814			AMD_CG_SUPPORT_VCN_MGCG |
 815			AMD_CG_SUPPORT_JPEG_MGCG |
 816			AMD_CG_SUPPORT_MC_MGCG |
 817			AMD_CG_SUPPORT_MC_LS |
 818			AMD_CG_SUPPORT_HDP_MGCG |
 819			AMD_CG_SUPPORT_HDP_LS |
 820			AMD_CG_SUPPORT_IH_CG;
 821		adev->pg_flags = AMD_PG_SUPPORT_VCN |
 822			AMD_PG_SUPPORT_VCN_DPG |
 823			AMD_PG_SUPPORT_JPEG |
 824			AMD_PG_SUPPORT_ATHUB |
 825			AMD_PG_SUPPORT_MMHUB;
 826		adev->external_rev_id = adev->rev_id + 0x3c;
 827		break;
 828	case IP_VERSION(10, 3, 5):
 829		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 830			AMD_CG_SUPPORT_GFX_CGCG |
 831			AMD_CG_SUPPORT_GFX_CGLS |
 832			AMD_CG_SUPPORT_GFX_3D_CGCG |
 833			AMD_CG_SUPPORT_MC_MGCG |
 834			AMD_CG_SUPPORT_MC_LS |
 835			AMD_CG_SUPPORT_HDP_MGCG |
 836			AMD_CG_SUPPORT_HDP_LS |
 837			AMD_CG_SUPPORT_IH_CG |
 838			AMD_CG_SUPPORT_VCN_MGCG;
 839		adev->pg_flags = AMD_PG_SUPPORT_VCN |
 840			AMD_PG_SUPPORT_VCN_DPG |
 841			AMD_PG_SUPPORT_ATHUB |
 842			AMD_PG_SUPPORT_MMHUB;
 843		adev->external_rev_id = adev->rev_id + 0x46;
 844		break;
 845	case IP_VERSION(10, 3, 3):
 846		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 847			AMD_CG_SUPPORT_GFX_MGLS |
 848			AMD_CG_SUPPORT_GFX_CGCG |
 849			AMD_CG_SUPPORT_GFX_CGLS |
 850			AMD_CG_SUPPORT_GFX_3D_CGCG |
 851			AMD_CG_SUPPORT_GFX_3D_CGLS |
 852			AMD_CG_SUPPORT_GFX_RLC_LS |
 853			AMD_CG_SUPPORT_GFX_CP_LS |
 854			AMD_CG_SUPPORT_GFX_FGCG |
 855			AMD_CG_SUPPORT_MC_MGCG |
 856			AMD_CG_SUPPORT_MC_LS |
 857			AMD_CG_SUPPORT_SDMA_LS |
 858			AMD_CG_SUPPORT_HDP_MGCG |
 859			AMD_CG_SUPPORT_HDP_LS |
 860			AMD_CG_SUPPORT_ATHUB_MGCG |
 861			AMD_CG_SUPPORT_ATHUB_LS |
 862			AMD_CG_SUPPORT_IH_CG |
 863			AMD_CG_SUPPORT_VCN_MGCG |
 864			AMD_CG_SUPPORT_JPEG_MGCG |
 865			AMD_CG_SUPPORT_SDMA_MGCG;
 866		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
 867			AMD_PG_SUPPORT_VCN |
 868			AMD_PG_SUPPORT_VCN_DPG |
 869			AMD_PG_SUPPORT_JPEG;
 870		if (adev->pdev->device == 0x1681)
 871			adev->external_rev_id = 0x20;
 872		else
 873			adev->external_rev_id = adev->rev_id + 0x01;
 874		break;
 875	case IP_VERSION(10, 1, 3):
 876	case IP_VERSION(10, 1, 4):
 877		adev->cg_flags = 0;
 878		adev->pg_flags = 0;
 879		adev->external_rev_id = adev->rev_id + 0x82;
 880		break;
 881	case IP_VERSION(10, 3, 6):
 882		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 883			AMD_CG_SUPPORT_GFX_MGLS |
 884			AMD_CG_SUPPORT_GFX_CGCG |
 885			AMD_CG_SUPPORT_GFX_CGLS |
 886			AMD_CG_SUPPORT_GFX_3D_CGCG |
 887			AMD_CG_SUPPORT_GFX_3D_CGLS |
 888			AMD_CG_SUPPORT_GFX_RLC_LS |
 889			AMD_CG_SUPPORT_GFX_CP_LS |
 890			AMD_CG_SUPPORT_GFX_FGCG |
 891			AMD_CG_SUPPORT_MC_MGCG |
 892			AMD_CG_SUPPORT_MC_LS |
 893			AMD_CG_SUPPORT_SDMA_LS |
 894			AMD_CG_SUPPORT_HDP_MGCG |
 895			AMD_CG_SUPPORT_HDP_LS |
 896			AMD_CG_SUPPORT_ATHUB_MGCG |
 897			AMD_CG_SUPPORT_ATHUB_LS |
 898			AMD_CG_SUPPORT_IH_CG |
 899			AMD_CG_SUPPORT_VCN_MGCG |
 900			AMD_CG_SUPPORT_JPEG_MGCG;
 901		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
 902			AMD_PG_SUPPORT_VCN |
 903			AMD_PG_SUPPORT_VCN_DPG |
 904			AMD_PG_SUPPORT_JPEG;
 905		adev->external_rev_id = adev->rev_id + 0x01;
 906		break;
 907	case IP_VERSION(10, 3, 7):
 908		adev->cg_flags =  AMD_CG_SUPPORT_GFX_MGCG |
 909			AMD_CG_SUPPORT_GFX_MGLS |
 910			AMD_CG_SUPPORT_GFX_CGCG |
 911			AMD_CG_SUPPORT_GFX_CGLS |
 912			AMD_CG_SUPPORT_GFX_3D_CGCG |
 913			AMD_CG_SUPPORT_GFX_3D_CGLS |
 914			AMD_CG_SUPPORT_GFX_RLC_LS |
 915			AMD_CG_SUPPORT_GFX_CP_LS |
 916			AMD_CG_SUPPORT_GFX_FGCG |
 917			AMD_CG_SUPPORT_MC_MGCG |
 918			AMD_CG_SUPPORT_MC_LS |
 919			AMD_CG_SUPPORT_SDMA_LS |
 920			AMD_CG_SUPPORT_HDP_MGCG |
 921			AMD_CG_SUPPORT_HDP_LS |
 922			AMD_CG_SUPPORT_ATHUB_MGCG |
 923			AMD_CG_SUPPORT_ATHUB_LS |
 924			AMD_CG_SUPPORT_IH_CG |
 925			AMD_CG_SUPPORT_VCN_MGCG |
 926			AMD_CG_SUPPORT_JPEG_MGCG |
 927			AMD_CG_SUPPORT_SDMA_MGCG;
 928		adev->pg_flags = AMD_PG_SUPPORT_VCN |
 929			AMD_PG_SUPPORT_VCN_DPG |
 930			AMD_PG_SUPPORT_JPEG |
 931			AMD_PG_SUPPORT_GFX_PG;
 932		adev->external_rev_id = adev->rev_id + 0x01;
 933		break;
 934	default:
 935		/* FIXME: not supported yet */
 936		return -EINVAL;
 937	}
 938
 939	if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
 940		adev->pg_flags &= ~(AMD_PG_SUPPORT_VCN |
 941				    AMD_PG_SUPPORT_VCN_DPG |
 942				    AMD_PG_SUPPORT_JPEG);
 943
 944	if (amdgpu_sriov_vf(adev)) {
 945		amdgpu_virt_init_setting(adev);
 946		xgpu_nv_mailbox_set_irq_funcs(adev);
 947	}
 948
 949	return 0;
 950}
 951
 952static int nv_common_late_init(void *handle)
 953{
 954	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 955
 956	if (amdgpu_sriov_vf(adev)) {
 957		xgpu_nv_mailbox_get_irq(adev);
 958		if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
 959			amdgpu_virt_update_sriov_video_codec(adev,
 960							     sriov_sc_video_codecs_encode_array,
 961							     ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
 962							     sriov_sc_video_codecs_decode_array_vcn1,
 963							     ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1));
 964		} else {
 965			amdgpu_virt_update_sriov_video_codec(adev,
 966							     sriov_sc_video_codecs_encode_array,
 967							     ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
 968							     sriov_sc_video_codecs_decode_array_vcn0,
 969							     ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0));
 970		}
 971	}
 972
 973	/* Enable selfring doorbell aperture late because doorbell BAR
 974	 * aperture will change if resize BAR successfully in gmc sw_init.
 975	 */
 976	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
 977
 978	return 0;
 979}
 980
 981static int nv_common_sw_init(void *handle)
 982{
 983	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 984
 985	if (amdgpu_sriov_vf(adev))
 986		xgpu_nv_mailbox_add_irq_id(adev);
 987
 988	return 0;
 989}
 990
 991static int nv_common_sw_fini(void *handle)
 992{
 993	return 0;
 994}
 995
 996static int nv_common_hw_init(void *handle)
 997{
 998	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 999
1000	if (adev->nbio.funcs->apply_lc_spc_mode_wa)
1001		adev->nbio.funcs->apply_lc_spc_mode_wa(adev);
1002
1003	if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa)
1004		adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev);
1005
1006	/* enable aspm */
1007	nv_program_aspm(adev);
1008	/* setup nbio registers */
1009	adev->nbio.funcs->init_registers(adev);
1010	/* remap HDP registers to a hole in mmio space,
1011	 * for the purpose of expose those registers
1012	 * to process space
1013	 */
1014	if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
1015		adev->nbio.funcs->remap_hdp_registers(adev);
1016	/* enable the doorbell aperture */
1017	adev->nbio.funcs->enable_doorbell_aperture(adev, true);
1018
1019	return 0;
1020}
1021
1022static int nv_common_hw_fini(void *handle)
1023{
1024	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1025
1026	/* Disable the doorbell aperture and selfring doorbell aperture
1027	 * separately in hw_fini because nv_enable_doorbell_aperture
1028	 * has been removed and there is no need to delay disabling
1029	 * selfring doorbell.
1030	 */
1031	adev->nbio.funcs->enable_doorbell_aperture(adev, false);
1032	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
1033
1034	return 0;
1035}
1036
1037static int nv_common_suspend(void *handle)
1038{
1039	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1040
1041	return nv_common_hw_fini(adev);
1042}
1043
1044static int nv_common_resume(void *handle)
1045{
1046	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1047
1048	return nv_common_hw_init(adev);
1049}
1050
1051static bool nv_common_is_idle(void *handle)
1052{
1053	return true;
1054}
1055
1056static int nv_common_wait_for_idle(void *handle)
1057{
1058	return 0;
1059}
1060
1061static int nv_common_soft_reset(void *handle)
1062{
1063	return 0;
1064}
1065
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1066static int nv_common_set_clockgating_state(void *handle,
1067					   enum amd_clockgating_state state)
1068{
1069	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1070
1071	if (amdgpu_sriov_vf(adev))
1072		return 0;
1073
1074	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
1075	case IP_VERSION(2, 3, 0):
1076	case IP_VERSION(2, 3, 1):
1077	case IP_VERSION(2, 3, 2):
1078	case IP_VERSION(3, 3, 0):
1079	case IP_VERSION(3, 3, 1):
1080	case IP_VERSION(3, 3, 2):
1081	case IP_VERSION(3, 3, 3):
1082		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1083				state == AMD_CG_STATE_GATE);
1084		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1085				state == AMD_CG_STATE_GATE);
1086		adev->hdp.funcs->update_clock_gating(adev,
1087				state == AMD_CG_STATE_GATE);
1088		adev->smuio.funcs->update_rom_clock_gating(adev,
1089				state == AMD_CG_STATE_GATE);
1090		break;
1091	default:
1092		break;
1093	}
1094	return 0;
1095}
1096
1097static int nv_common_set_powergating_state(void *handle,
1098					   enum amd_powergating_state state)
1099{
1100	/* TODO */
1101	return 0;
1102}
1103
1104static void nv_common_get_clockgating_state(void *handle, u64 *flags)
1105{
1106	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
1107
1108	if (amdgpu_sriov_vf(adev))
1109		*flags = 0;
1110
1111	adev->nbio.funcs->get_clockgating_state(adev, flags);
1112
1113	adev->hdp.funcs->get_clock_gating_state(adev, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1114
1115	adev->smuio.funcs->get_clock_gating_state(adev, flags);
1116}
1117
1118static const struct amd_ip_funcs nv_common_ip_funcs = {
1119	.name = "nv_common",
1120	.early_init = nv_common_early_init,
1121	.late_init = nv_common_late_init,
1122	.sw_init = nv_common_sw_init,
1123	.sw_fini = nv_common_sw_fini,
1124	.hw_init = nv_common_hw_init,
1125	.hw_fini = nv_common_hw_fini,
1126	.suspend = nv_common_suspend,
1127	.resume = nv_common_resume,
1128	.is_idle = nv_common_is_idle,
1129	.wait_for_idle = nv_common_wait_for_idle,
1130	.soft_reset = nv_common_soft_reset,
1131	.set_clockgating_state = nv_common_set_clockgating_state,
1132	.set_powergating_state = nv_common_set_powergating_state,
1133	.get_clockgating_state = nv_common_get_clockgating_state,
1134};
v5.4
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23#include <linux/firmware.h>
 24#include <linux/slab.h>
 25#include <linux/module.h>
 26#include <linux/pci.h>
 27
 
 
 28#include "amdgpu.h"
 29#include "amdgpu_atombios.h"
 30#include "amdgpu_ih.h"
 31#include "amdgpu_uvd.h"
 32#include "amdgpu_vce.h"
 33#include "amdgpu_ucode.h"
 34#include "amdgpu_psp.h"
 35#include "amdgpu_smu.h"
 36#include "atom.h"
 37#include "amd_pcie.h"
 38
 39#include "gc/gc_10_1_0_offset.h"
 40#include "gc/gc_10_1_0_sh_mask.h"
 41#include "hdp/hdp_5_0_0_offset.h"
 42#include "hdp/hdp_5_0_0_sh_mask.h"
 43
 44#include "soc15.h"
 45#include "soc15_common.h"
 46#include "gmc_v10_0.h"
 47#include "gfxhub_v2_0.h"
 48#include "mmhub_v2_0.h"
 
 
 
 49#include "nv.h"
 50#include "navi10_ih.h"
 51#include "gfx_v10_0.h"
 52#include "sdma_v5_0.h"
 
 53#include "vcn_v2_0.h"
 54#include "dce_virtual.h"
 
 
 
 55#include "mes_v10_1.h"
 
 
 
 56
 57static const struct amd_ip_funcs nv_common_ip_funcs;
 58
 59/*
 60 * Indirect registers accessor
 61 */
 62static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
 63{
 64	unsigned long flags, address, data;
 65	u32 r;
 66	address = adev->nbio_funcs->get_pcie_index_offset(adev);
 67	data = adev->nbio_funcs->get_pcie_data_offset(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 68
 69	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 70	WREG32(address, reg);
 71	(void)RREG32(address);
 72	r = RREG32(data);
 73	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 74	return r;
 75}
 76
 77static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 
 78{
 79	unsigned long flags, address, data;
 
 80
 81	address = adev->nbio_funcs->get_pcie_index_offset(adev);
 82	data = adev->nbio_funcs->get_pcie_data_offset(adev);
 83
 84	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 85	WREG32(address, reg);
 86	(void)RREG32(address);
 87	WREG32(data, v);
 88	(void)RREG32(data);
 89	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90}
 91
 92static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
 93{
 94	unsigned long flags, address, data;
 95	u32 r;
 96
 97	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 98	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 99
100	spin_lock_irqsave(&adev->didt_idx_lock, flags);
101	WREG32(address, (reg));
102	r = RREG32(data);
103	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
104	return r;
105}
106
107static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
108{
109	unsigned long flags, address, data;
110
111	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
112	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
113
114	spin_lock_irqsave(&adev->didt_idx_lock, flags);
115	WREG32(address, (reg));
116	WREG32(data, (v));
117	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
118}
119
120static u32 nv_get_config_memsize(struct amdgpu_device *adev)
121{
122	return adev->nbio_funcs->get_memsize(adev);
123}
124
125static u32 nv_get_xclk(struct amdgpu_device *adev)
126{
127	return adev->clock.spll.reference_freq;
128}
129
130
131void nv_grbm_select(struct amdgpu_device *adev,
132		     u32 me, u32 pipe, u32 queue, u32 vmid)
133{
134	u32 grbm_gfx_cntl = 0;
135	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
136	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
137	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
138	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
139
140	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
141}
142
143static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
144{
145	/* todo */
146}
147
148static bool nv_read_disabled_bios(struct amdgpu_device *adev)
149{
150	/* todo */
151	return false;
152}
153
154static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
155				  u8 *bios, u32 length_bytes)
156{
157	/* TODO: will implement it when SMU header is available */
158	return false;
159}
160
161static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
162	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
163	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
164	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
165	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
166	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
167	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
168#if 0	/* TODO: will set it when SDMA header is available */
169	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
170	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
171#endif
172	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
173	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
174	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
175	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
176	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
177	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
178	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
 
179	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
180	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
181	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
182};
183
184static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
185					 u32 sh_num, u32 reg_offset)
186{
187	uint32_t val;
188
189	mutex_lock(&adev->grbm_idx_mutex);
190	if (se_num != 0xffffffff || sh_num != 0xffffffff)
191		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
192
193	val = RREG32(reg_offset);
194
195	if (se_num != 0xffffffff || sh_num != 0xffffffff)
196		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
197	mutex_unlock(&adev->grbm_idx_mutex);
198	return val;
199}
200
201static uint32_t nv_get_register_value(struct amdgpu_device *adev,
202				      bool indexed, u32 se_num,
203				      u32 sh_num, u32 reg_offset)
204{
205	if (indexed) {
206		return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
207	} else {
208		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
209			return adev->gfx.config.gb_addr_config;
210		return RREG32(reg_offset);
211	}
212}
213
214static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
215			    u32 sh_num, u32 reg_offset, u32 *value)
216{
217	uint32_t i;
218	struct soc15_allowed_register_entry  *en;
219
220	*value = 0;
221	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
222		en = &nv_allowed_read_registers[i];
223		if (reg_offset !=
224		    (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
 
 
225			continue;
226
227		*value = nv_get_register_value(adev,
228					       nv_allowed_read_registers[i].grbm_indexed,
229					       se_num, sh_num, reg_offset);
230		return 0;
231	}
232	return -EINVAL;
233}
234
235#if 0
236static void nv_gpu_pci_config_reset(struct amdgpu_device *adev)
237{
238	u32 i;
239
240	dev_info(adev->dev, "GPU pci config reset\n");
241
242	/* disable BM */
243	pci_clear_master(adev->pdev);
244	/* reset */
245	amdgpu_pci_config_reset(adev);
246
247	udelay(100);
248
249	/* wait for asic to come out of reset */
250	for (i = 0; i < adev->usec_timeout; i++) {
251		u32 memsize = nbio_v2_3_get_memsize(adev);
252		if (memsize != 0xffffffff)
253			break;
254		udelay(1);
255	}
256
257}
258#endif
259
260static int nv_asic_mode1_reset(struct amdgpu_device *adev)
261{
262	u32 i;
263	int ret = 0;
264
265	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
266
267	dev_info(adev->dev, "GPU mode1 reset\n");
268
269	/* disable BM */
270	pci_clear_master(adev->pdev);
271
272	pci_save_state(adev->pdev);
273
274	ret = psp_gpu_reset(adev);
275	if (ret)
276		dev_err(adev->dev, "GPU mode1 reset failed\n");
277
278	pci_restore_state(adev->pdev);
279
280	/* wait for asic to come out of reset */
281	for (i = 0; i < adev->usec_timeout; i++) {
282		u32 memsize = adev->nbio_funcs->get_memsize(adev);
283
284		if (memsize != 0xffffffff)
285			break;
286		udelay(1);
287	}
288
289	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
290
291	return ret;
292}
293
294static enum amd_reset_method
295nv_asic_reset_method(struct amdgpu_device *adev)
296{
297	struct smu_context *smu = &adev->smu;
298
299	if (smu_baco_is_support(smu))
300		return AMD_RESET_METHOD_BACO;
301	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302		return AMD_RESET_METHOD_MODE1;
 
 
 
 
 
 
303}
304
305static int nv_asic_reset(struct amdgpu_device *adev)
306{
307
308	/* FIXME: it doesn't work since vega10 */
309#if 0
310	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
311
312	nv_gpu_pci_config_reset(adev);
313
314	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
315#endif
316	int ret = 0;
317	struct smu_context *smu = &adev->smu;
318
319	if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
320		if (!adev->in_suspend)
321			amdgpu_inc_vram_lost(adev);
322		ret = smu_baco_reset(smu);
323	} else {
324		if (!adev->in_suspend)
325			amdgpu_inc_vram_lost(adev);
326		ret = nv_asic_mode1_reset(adev);
 
 
 
 
 
 
 
 
 
327	}
328
329	return ret;
330}
331
332static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
333{
334	/* todo */
335	return 0;
336}
337
338static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
339{
340	/* todo */
341	return 0;
342}
343
344static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
345{
346	if (pci_is_root_bus(adev->pdev->bus))
347		return;
348
349	if (amdgpu_pcie_gen2 == 0)
350		return;
351
352	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
353					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
354		return;
355
356	/* todo */
357}
358
359static void nv_program_aspm(struct amdgpu_device *adev)
360{
361
362	if (amdgpu_aspm == 0)
363		return;
364
365	/* todo */
366}
367
368static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
369					bool enable)
370{
371	adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
372	adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
373}
374
375static const struct amdgpu_ip_block_version nv_common_ip_block =
376{
377	.type = AMD_IP_BLOCK_TYPE_COMMON,
378	.major = 1,
379	.minor = 0,
380	.rev = 0,
381	.funcs = &nv_common_ip_funcs,
382};
383
384static int nv_reg_base_init(struct amdgpu_device *adev)
385{
386	int r;
387
388	if (amdgpu_discovery) {
389		r = amdgpu_discovery_reg_base_init(adev);
390		if (r) {
391			DRM_WARN("failed to init reg base from ip discovery table, "
392					"fallback to legacy init method\n");
393			goto legacy_init;
394		}
395
396		return 0;
397	}
398
399legacy_init:
400	switch (adev->asic_type) {
401	case CHIP_NAVI10:
402		navi10_reg_base_init(adev);
403		break;
404	case CHIP_NAVI14:
405		navi14_reg_base_init(adev);
406		break;
407	case CHIP_NAVI12:
408		navi12_reg_base_init(adev);
409		break;
410	default:
411		return -EINVAL;
412	}
413
414	return 0;
415}
416
417int nv_set_ip_blocks(struct amdgpu_device *adev)
418{
419	int r;
420
421	/* Set IP register base before any HW register access */
422	r = nv_reg_base_init(adev);
423	if (r)
424		return r;
425
426	adev->nbio_funcs = &nbio_v2_3_funcs;
427
428	adev->nbio_funcs->detect_hw_virt(adev);
429
430	switch (adev->asic_type) {
431	case CHIP_NAVI10:
432	case CHIP_NAVI14:
433		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
434		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
435		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
436		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
437		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
438		    is_support_sw_smu(adev))
439			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
440		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
441			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
442#if defined(CONFIG_DRM_AMD_DC)
443		else if (amdgpu_device_has_dc_support(adev))
444			amdgpu_device_ip_block_add(adev, &dm_ip_block);
445#endif
446		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
447		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
448		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
449		    is_support_sw_smu(adev))
450			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
451		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
452		if (adev->enable_mes)
453			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
454		break;
455	case CHIP_NAVI12:
456		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
457		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
458		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
459		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
460		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
461		    is_support_sw_smu(adev))
462			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
463		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
464			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
465#if defined(CONFIG_DRM_AMD_DC)
466		else if (amdgpu_device_has_dc_support(adev))
467			amdgpu_device_ip_block_add(adev, &dm_ip_block);
468#endif
469		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
470		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
471		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
472		    is_support_sw_smu(adev))
473			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
474		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
475		break;
476	default:
477		return -EINVAL;
478	}
479
480	return 0;
481}
482
483static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
484{
485	return adev->nbio_funcs->get_rev_id(adev);
486}
487
488static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
489{
490	adev->nbio_funcs->hdp_flush(adev, ring);
491}
492
493static void nv_invalidate_hdp(struct amdgpu_device *adev,
494				struct amdgpu_ring *ring)
495{
496	if (!ring || !ring->funcs->emit_wreg) {
497		WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
498	} else {
499		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
500					HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
501	}
502}
503
504static bool nv_need_full_reset(struct amdgpu_device *adev)
505{
506	return true;
507}
508
509static void nv_get_pcie_usage(struct amdgpu_device *adev,
510			      uint64_t *count0,
511			      uint64_t *count1)
512{
513	/*TODO*/
514}
515
516static bool nv_need_reset_on_init(struct amdgpu_device *adev)
517{
518#if 0
519	u32 sol_reg;
520
521	if (adev->flags & AMD_IS_APU)
522		return false;
523
524	/* Check sOS sign of life register to confirm sys driver and sOS
525	 * are already been loaded.
526	 */
527	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
528	if (sol_reg)
529		return true;
530#endif
531	/* TODO: re-enable it when mode1 reset is functional */
532	return false;
533}
534
535static void nv_init_doorbell_index(struct amdgpu_device *adev)
536{
537	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
538	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
539	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
540	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
541	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
542	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
543	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
544	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
545	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
546	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
547	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
548	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
549	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
 
 
 
 
 
 
550	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
551	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
 
 
552	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
553	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
554	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
555	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
556	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
557	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
558	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
559
560	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
561	adev->doorbell_index.sdma_doorbell_range = 20;
562}
563
564static const struct amdgpu_asic_funcs nv_asic_funcs =
 
 
 
 
 
565{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
566	.read_disabled_bios = &nv_read_disabled_bios,
567	.read_bios_from_rom = &nv_read_bios_from_rom,
568	.read_register = &nv_read_register,
569	.reset = &nv_asic_reset,
570	.reset_method = &nv_asic_reset_method,
571	.set_vga_state = &nv_vga_set_state,
572	.get_xclk = &nv_get_xclk,
573	.set_uvd_clocks = &nv_set_uvd_clocks,
574	.set_vce_clocks = &nv_set_vce_clocks,
575	.get_config_memsize = &nv_get_config_memsize,
576	.flush_hdp = &nv_flush_hdp,
577	.invalidate_hdp = &nv_invalidate_hdp,
578	.init_doorbell_index = &nv_init_doorbell_index,
579	.need_full_reset = &nv_need_full_reset,
580	.get_pcie_usage = &nv_get_pcie_usage,
581	.need_reset_on_init = &nv_need_reset_on_init,
 
 
 
 
 
582};
583
584static int nv_common_early_init(void *handle)
585{
 
586	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
587
 
 
 
 
588	adev->smc_rreg = NULL;
589	adev->smc_wreg = NULL;
590	adev->pcie_rreg = &nv_pcie_rreg;
591	adev->pcie_wreg = &nv_pcie_wreg;
 
 
 
 
592
593	/* TODO: will add them during VCN v2 implementation */
594	adev->uvd_ctx_rreg = NULL;
595	adev->uvd_ctx_wreg = NULL;
596
597	adev->didt_rreg = &nv_didt_rreg;
598	adev->didt_wreg = &nv_didt_wreg;
599
600	adev->asic_funcs = &nv_asic_funcs;
601
602	adev->rev_id = nv_get_rev_id(adev);
603	adev->external_rev_id = 0xff;
604	switch (adev->asic_type) {
605	case CHIP_NAVI10:
 
 
 
606		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
607			AMD_CG_SUPPORT_GFX_CGCG |
608			AMD_CG_SUPPORT_IH_CG |
609			AMD_CG_SUPPORT_HDP_MGCG |
610			AMD_CG_SUPPORT_HDP_LS |
611			AMD_CG_SUPPORT_SDMA_MGCG |
612			AMD_CG_SUPPORT_SDMA_LS |
613			AMD_CG_SUPPORT_MC_MGCG |
614			AMD_CG_SUPPORT_MC_LS |
615			AMD_CG_SUPPORT_ATHUB_MGCG |
616			AMD_CG_SUPPORT_ATHUB_LS |
617			AMD_CG_SUPPORT_VCN_MGCG |
 
618			AMD_CG_SUPPORT_BIF_MGCG |
619			AMD_CG_SUPPORT_BIF_LS;
620		adev->pg_flags = AMD_PG_SUPPORT_VCN |
621			AMD_PG_SUPPORT_VCN_DPG |
 
622			AMD_PG_SUPPORT_ATHUB;
623		adev->external_rev_id = adev->rev_id + 0x1;
624		break;
625	case CHIP_NAVI14:
626		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
627			AMD_CG_SUPPORT_GFX_CGCG |
628			AMD_CG_SUPPORT_IH_CG |
629			AMD_CG_SUPPORT_HDP_MGCG |
630			AMD_CG_SUPPORT_HDP_LS |
631			AMD_CG_SUPPORT_SDMA_MGCG |
632			AMD_CG_SUPPORT_SDMA_LS |
633			AMD_CG_SUPPORT_MC_MGCG |
634			AMD_CG_SUPPORT_MC_LS |
635			AMD_CG_SUPPORT_ATHUB_MGCG |
636			AMD_CG_SUPPORT_ATHUB_LS |
637			AMD_CG_SUPPORT_VCN_MGCG |
 
638			AMD_CG_SUPPORT_BIF_MGCG |
639			AMD_CG_SUPPORT_BIF_LS;
640		adev->pg_flags = AMD_PG_SUPPORT_VCN |
 
641			AMD_PG_SUPPORT_VCN_DPG;
642		adev->external_rev_id = adev->rev_id + 20;
643		break;
644	case CHIP_NAVI12:
645		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
646			AMD_CG_SUPPORT_GFX_MGLS |
647			AMD_CG_SUPPORT_GFX_CGCG |
648			AMD_CG_SUPPORT_GFX_CP_LS |
649			AMD_CG_SUPPORT_GFX_RLC_LS |
650			AMD_CG_SUPPORT_IH_CG |
651			AMD_CG_SUPPORT_HDP_MGCG |
652			AMD_CG_SUPPORT_HDP_LS |
653			AMD_CG_SUPPORT_SDMA_MGCG |
654			AMD_CG_SUPPORT_SDMA_LS |
655			AMD_CG_SUPPORT_MC_MGCG |
656			AMD_CG_SUPPORT_MC_LS |
657			AMD_CG_SUPPORT_ATHUB_MGCG |
658			AMD_CG_SUPPORT_ATHUB_LS |
659			AMD_CG_SUPPORT_VCN_MGCG;
 
660		adev->pg_flags = AMD_PG_SUPPORT_VCN |
661			AMD_PG_SUPPORT_VCN_DPG |
 
662			AMD_PG_SUPPORT_ATHUB;
 
 
 
 
 
 
663		adev->external_rev_id = adev->rev_id + 0xa;
664		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
665	default:
666		/* FIXME: not supported yet */
667		return -EINVAL;
668	}
669
 
 
 
 
 
 
 
 
 
 
670	return 0;
671}
672
673static int nv_common_late_init(void *handle)
674{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
675	return 0;
676}
677
678static int nv_common_sw_init(void *handle)
679{
 
 
 
 
 
680	return 0;
681}
682
683static int nv_common_sw_fini(void *handle)
684{
685	return 0;
686}
687
688static int nv_common_hw_init(void *handle)
689{
690	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
691
692	/* enable pcie gen2/3 link */
693	nv_pcie_gen3_enable(adev);
 
 
 
 
694	/* enable aspm */
695	nv_program_aspm(adev);
696	/* setup nbio registers */
697	adev->nbio_funcs->init_registers(adev);
 
 
 
 
 
 
698	/* enable the doorbell aperture */
699	nv_enable_doorbell_aperture(adev, true);
700
701	return 0;
702}
703
704static int nv_common_hw_fini(void *handle)
705{
706	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
707
708	/* disable the doorbell aperture */
709	nv_enable_doorbell_aperture(adev, false);
 
 
 
 
 
710
711	return 0;
712}
713
714static int nv_common_suspend(void *handle)
715{
716	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
717
718	return nv_common_hw_fini(adev);
719}
720
721static int nv_common_resume(void *handle)
722{
723	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
724
725	return nv_common_hw_init(adev);
726}
727
728static bool nv_common_is_idle(void *handle)
729{
730	return true;
731}
732
733static int nv_common_wait_for_idle(void *handle)
734{
735	return 0;
736}
737
738static int nv_common_soft_reset(void *handle)
739{
740	return 0;
741}
742
743static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
744					   bool enable)
745{
746	uint32_t hdp_clk_cntl, hdp_clk_cntl1;
747	uint32_t hdp_mem_pwr_cntl;
748
749	if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
750				AMD_CG_SUPPORT_HDP_DS |
751				AMD_CG_SUPPORT_HDP_SD)))
752		return;
753
754	hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
755	hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
756
757	/* Before doing clock/power mode switch,
758	 * forced on IPH & RC clock */
759	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
760				     IPH_MEM_CLK_SOFT_OVERRIDE, 1);
761	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
762				     RC_MEM_CLK_SOFT_OVERRIDE, 1);
763	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
764
765	/* HDP 5.0 doesn't support dynamic power mode switch,
766	 * disable clock and power gating before any changing */
767	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
768					 IPH_MEM_POWER_CTRL_EN, 0);
769	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
770					 IPH_MEM_POWER_LS_EN, 0);
771	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
772					 IPH_MEM_POWER_DS_EN, 0);
773	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
774					 IPH_MEM_POWER_SD_EN, 0);
775	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
776					 RC_MEM_POWER_CTRL_EN, 0);
777	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
778					 RC_MEM_POWER_LS_EN, 0);
779	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
780					 RC_MEM_POWER_DS_EN, 0);
781	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
782					 RC_MEM_POWER_SD_EN, 0);
783	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
784
785	/* only one clock gating mode (LS/DS/SD) can be enabled */
786	if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
787		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
788						 HDP_MEM_POWER_CTRL,
789						 IPH_MEM_POWER_LS_EN, enable);
790		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
791						 HDP_MEM_POWER_CTRL,
792						 RC_MEM_POWER_LS_EN, enable);
793	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
794		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
795						 HDP_MEM_POWER_CTRL,
796						 IPH_MEM_POWER_DS_EN, enable);
797		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
798						 HDP_MEM_POWER_CTRL,
799						 RC_MEM_POWER_DS_EN, enable);
800	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
801		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
802						 HDP_MEM_POWER_CTRL,
803						 IPH_MEM_POWER_SD_EN, enable);
804		/* RC should not use shut down mode, fallback to ds */
805		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
806						 HDP_MEM_POWER_CTRL,
807						 RC_MEM_POWER_DS_EN, enable);
808	}
809
810	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
811
812	/* restore IPH & RC clock override after clock/power mode changing */
813	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
814}
815
816static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
817				       bool enable)
818{
819	uint32_t hdp_clk_cntl;
820
821	if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
822		return;
823
824	hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
825
826	if (enable) {
827		hdp_clk_cntl &=
828			~(uint32_t)
829			  (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
830			   HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
831			   HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
832			   HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
833			   HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
834			   HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
835	} else {
836		hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
837			HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
838			HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
839			HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
840			HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
841			HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
842	}
843
844	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
845}
846
847static int nv_common_set_clockgating_state(void *handle,
848					   enum amd_clockgating_state state)
849{
850	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
851
852	if (amdgpu_sriov_vf(adev))
853		return 0;
854
855	switch (adev->asic_type) {
856	case CHIP_NAVI10:
857	case CHIP_NAVI14:
858	case CHIP_NAVI12:
859		adev->nbio_funcs->update_medium_grain_clock_gating(adev,
860				state == AMD_CG_STATE_GATE ? true : false);
861		adev->nbio_funcs->update_medium_grain_light_sleep(adev,
862				state == AMD_CG_STATE_GATE ? true : false);
863		nv_update_hdp_mem_power_gating(adev,
864				   state == AMD_CG_STATE_GATE ? true : false);
865		nv_update_hdp_clock_gating(adev,
866				state == AMD_CG_STATE_GATE ? true : false);
 
 
 
 
867		break;
868	default:
869		break;
870	}
871	return 0;
872}
873
874static int nv_common_set_powergating_state(void *handle,
875					   enum amd_powergating_state state)
876{
877	/* TODO */
878	return 0;
879}
880
881static void nv_common_get_clockgating_state(void *handle, u32 *flags)
882{
883	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
884	uint32_t tmp;
885
886	if (amdgpu_sriov_vf(adev))
887		*flags = 0;
888
889	adev->nbio_funcs->get_clockgating_state(adev, flags);
890
891	/* AMD_CG_SUPPORT_HDP_MGCG */
892	tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
893	if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
894		     HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
895		     HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
896		     HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
897		     HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
898		     HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
899		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
900
901	/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
902	tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
903	if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
904		*flags |= AMD_CG_SUPPORT_HDP_LS;
905	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
906		*flags |= AMD_CG_SUPPORT_HDP_DS;
907	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
908		*flags |= AMD_CG_SUPPORT_HDP_SD;
909
910	return;
911}
912
913static const struct amd_ip_funcs nv_common_ip_funcs = {
914	.name = "nv_common",
915	.early_init = nv_common_early_init,
916	.late_init = nv_common_late_init,
917	.sw_init = nv_common_sw_init,
918	.sw_fini = nv_common_sw_fini,
919	.hw_init = nv_common_hw_init,
920	.hw_fini = nv_common_hw_fini,
921	.suspend = nv_common_suspend,
922	.resume = nv_common_resume,
923	.is_idle = nv_common_is_idle,
924	.wait_for_idle = nv_common_wait_for_idle,
925	.soft_reset = nv_common_soft_reset,
926	.set_clockgating_state = nv_common_set_clockgating_state,
927	.set_powergating_state = nv_common_set_powergating_state,
928	.get_clockgating_state = nv_common_get_clockgating_state,
929};