Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2018 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25
  26#include "amdgpu.h"
  27#include "amdgpu_discovery.h"
  28#include "soc15_hw_ip.h"
  29#include "discovery.h"
  30#include "amdgpu_ras.h"
  31
  32#include "soc15.h"
  33#include "gfx_v9_0.h"
  34#include "gfx_v9_4_3.h"
  35#include "gmc_v9_0.h"
  36#include "df_v1_7.h"
  37#include "df_v3_6.h"
  38#include "df_v4_3.h"
  39#include "df_v4_6_2.h"
 
  40#include "nbio_v6_1.h"
  41#include "nbio_v7_0.h"
  42#include "nbio_v7_4.h"
  43#include "nbio_v7_9.h"
  44#include "nbio_v7_11.h"
  45#include "hdp_v4_0.h"
  46#include "vega10_ih.h"
  47#include "vega20_ih.h"
  48#include "sdma_v4_0.h"
  49#include "sdma_v4_4_2.h"
  50#include "uvd_v7_0.h"
  51#include "vce_v4_0.h"
  52#include "vcn_v1_0.h"
  53#include "vcn_v2_5.h"
  54#include "jpeg_v2_5.h"
  55#include "smuio_v9_0.h"
  56#include "gmc_v10_0.h"
  57#include "gmc_v11_0.h"
 
  58#include "gfxhub_v2_0.h"
  59#include "mmhub_v2_0.h"
  60#include "nbio_v2_3.h"
  61#include "nbio_v4_3.h"
  62#include "nbio_v7_2.h"
  63#include "nbio_v7_7.h"
  64#include "nbif_v6_3_1.h"
  65#include "hdp_v5_0.h"
  66#include "hdp_v5_2.h"
  67#include "hdp_v6_0.h"
  68#include "hdp_v7_0.h"
  69#include "nv.h"
  70#include "soc21.h"
 
  71#include "navi10_ih.h"
  72#include "ih_v6_0.h"
  73#include "ih_v6_1.h"
  74#include "ih_v7_0.h"
  75#include "gfx_v10_0.h"
  76#include "gfx_v11_0.h"
 
  77#include "sdma_v5_0.h"
  78#include "sdma_v5_2.h"
  79#include "sdma_v6_0.h"
 
  80#include "lsdma_v6_0.h"
  81#include "lsdma_v7_0.h"
  82#include "vcn_v2_0.h"
  83#include "jpeg_v2_0.h"
  84#include "vcn_v3_0.h"
  85#include "jpeg_v3_0.h"
  86#include "vcn_v4_0.h"
  87#include "jpeg_v4_0.h"
  88#include "vcn_v4_0_3.h"
  89#include "jpeg_v4_0_3.h"
  90#include "vcn_v4_0_5.h"
  91#include "jpeg_v4_0_5.h"
  92#include "amdgpu_vkms.h"
  93#include "mes_v10_1.h"
  94#include "mes_v11_0.h"
 
  95#include "smuio_v11_0.h"
  96#include "smuio_v11_0_6.h"
  97#include "smuio_v13_0.h"
  98#include "smuio_v13_0_3.h"
  99#include "smuio_v13_0_6.h"
 
 100#include "vcn_v5_0_0.h"
 101#include "jpeg_v5_0_0.h"
 102
 103#include "amdgpu_vpe.h"
 
 
 
 104
 105#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
 106MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
 107
 108#define mmIP_DISCOVERY_VERSION  0x16A00
 109#define mmRCC_CONFIG_MEMSIZE	0xde3
 110#define mmMP0_SMN_C2PMSG_33	0x16061
 111#define mmMM_INDEX		0x0
 112#define mmMM_INDEX_HI		0x6
 113#define mmMM_DATA		0x1
 114
 115static const char *hw_id_names[HW_ID_MAX] = {
 116	[MP1_HWID]		= "MP1",
 117	[MP2_HWID]		= "MP2",
 118	[THM_HWID]		= "THM",
 119	[SMUIO_HWID]		= "SMUIO",
 120	[FUSE_HWID]		= "FUSE",
 121	[CLKA_HWID]		= "CLKA",
 122	[PWR_HWID]		= "PWR",
 123	[GC_HWID]		= "GC",
 124	[UVD_HWID]		= "UVD",
 125	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
 126	[ACP_HWID]		= "ACP",
 127	[DCI_HWID]		= "DCI",
 128	[DMU_HWID]		= "DMU",
 129	[DCO_HWID]		= "DCO",
 130	[DIO_HWID]		= "DIO",
 131	[XDMA_HWID]		= "XDMA",
 132	[DCEAZ_HWID]		= "DCEAZ",
 133	[DAZ_HWID]		= "DAZ",
 134	[SDPMUX_HWID]		= "SDPMUX",
 135	[NTB_HWID]		= "NTB",
 136	[IOHC_HWID]		= "IOHC",
 137	[L2IMU_HWID]		= "L2IMU",
 138	[VCE_HWID]		= "VCE",
 139	[MMHUB_HWID]		= "MMHUB",
 140	[ATHUB_HWID]		= "ATHUB",
 141	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
 142	[DFX_HWID]		= "DFX",
 143	[DBGU0_HWID]		= "DBGU0",
 144	[DBGU1_HWID]		= "DBGU1",
 145	[OSSSYS_HWID]		= "OSSSYS",
 146	[HDP_HWID]		= "HDP",
 147	[SDMA0_HWID]		= "SDMA0",
 148	[SDMA1_HWID]		= "SDMA1",
 149	[SDMA2_HWID]		= "SDMA2",
 150	[SDMA3_HWID]		= "SDMA3",
 151	[LSDMA_HWID]		= "LSDMA",
 152	[ISP_HWID]		= "ISP",
 153	[DBGU_IO_HWID]		= "DBGU_IO",
 154	[DF_HWID]		= "DF",
 155	[CLKB_HWID]		= "CLKB",
 156	[FCH_HWID]		= "FCH",
 157	[DFX_DAP_HWID]		= "DFX_DAP",
 158	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
 159	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
 160	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
 161	[L1IMU3_HWID]		= "L1IMU3",
 162	[L1IMU4_HWID]		= "L1IMU4",
 163	[L1IMU5_HWID]		= "L1IMU5",
 164	[L1IMU6_HWID]		= "L1IMU6",
 165	[L1IMU7_HWID]		= "L1IMU7",
 166	[L1IMU8_HWID]		= "L1IMU8",
 167	[L1IMU9_HWID]		= "L1IMU9",
 168	[L1IMU10_HWID]		= "L1IMU10",
 169	[L1IMU11_HWID]		= "L1IMU11",
 170	[L1IMU12_HWID]		= "L1IMU12",
 171	[L1IMU13_HWID]		= "L1IMU13",
 172	[L1IMU14_HWID]		= "L1IMU14",
 173	[L1IMU15_HWID]		= "L1IMU15",
 174	[WAFLC_HWID]		= "WAFLC",
 175	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
 176	[PCIE_HWID]		= "PCIE",
 177	[PCS_HWID]		= "PCS",
 178	[DDCL_HWID]		= "DDCL",
 179	[SST_HWID]		= "SST",
 180	[IOAGR_HWID]		= "IOAGR",
 181	[NBIF_HWID]		= "NBIF",
 182	[IOAPIC_HWID]		= "IOAPIC",
 183	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
 184	[NTBCCP_HWID]		= "NTBCCP",
 185	[UMC_HWID]		= "UMC",
 186	[SATA_HWID]		= "SATA",
 187	[USB_HWID]		= "USB",
 188	[CCXSEC_HWID]		= "CCXSEC",
 189	[XGMI_HWID]		= "XGMI",
 190	[XGBE_HWID]		= "XGBE",
 191	[MP0_HWID]		= "MP0",
 192	[VPE_HWID]		= "VPE",
 193};
 194
 195static int hw_id_map[MAX_HWIP] = {
 196	[GC_HWIP]	= GC_HWID,
 197	[HDP_HWIP]	= HDP_HWID,
 198	[SDMA0_HWIP]	= SDMA0_HWID,
 199	[SDMA1_HWIP]	= SDMA1_HWID,
 200	[SDMA2_HWIP]    = SDMA2_HWID,
 201	[SDMA3_HWIP]    = SDMA3_HWID,
 202	[LSDMA_HWIP]    = LSDMA_HWID,
 203	[MMHUB_HWIP]	= MMHUB_HWID,
 204	[ATHUB_HWIP]	= ATHUB_HWID,
 205	[NBIO_HWIP]	= NBIF_HWID,
 206	[MP0_HWIP]	= MP0_HWID,
 207	[MP1_HWIP]	= MP1_HWID,
 208	[UVD_HWIP]	= UVD_HWID,
 209	[VCE_HWIP]	= VCE_HWID,
 210	[DF_HWIP]	= DF_HWID,
 211	[DCE_HWIP]	= DMU_HWID,
 212	[OSSSYS_HWIP]	= OSSSYS_HWID,
 213	[SMUIO_HWIP]	= SMUIO_HWID,
 214	[PWR_HWIP]	= PWR_HWID,
 215	[NBIF_HWIP]	= NBIF_HWID,
 216	[THM_HWIP]	= THM_HWID,
 217	[CLK_HWIP]	= CLKA_HWID,
 218	[UMC_HWIP]	= UMC_HWID,
 219	[XGMI_HWIP]	= XGMI_HWID,
 220	[DCI_HWIP]	= DCI_HWID,
 221	[PCIE_HWIP]	= PCIE_HWID,
 222	[VPE_HWIP]	= VPE_HWID,
 
 223};
 224
 225static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
 226{
 227	u64 tmr_offset, tmr_size, pos;
 228	void *discv_regn;
 229	int ret;
 230
 231	ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
 232	if (ret)
 233		return ret;
 234
 235	pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
 236
 237	/* This region is read-only and reserved from system use */
 238	discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
 239	if (discv_regn) {
 240		memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
 241		memunmap(discv_regn);
 242		return 0;
 243	}
 244
 245	return -ENOENT;
 246}
 247
 
 
 
 248static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
 249						 uint8_t *binary)
 250{
 251	uint64_t vram_size;
 252	u32 msg;
 253	int i, ret = 0;
 254
 255	/* It can take up to a second for IFWI init to complete on some dGPUs,
 256	 * but generally it should be in the 60-100ms range.  Normally this starts
 257	 * as soon as the device gets power so by the time the OS loads this has long
 258	 * completed.  However, when a card is hotplugged via e.g., USB4, we need to
 259	 * wait for this to complete.  Once the C2PMSG is updated, we can
 260	 * continue.
 261	 */
 262	if (dev_is_removable(&adev->pdev->dev)) {
 
 263		for (i = 0; i < 1000; i++) {
 264			msg = RREG32(mmMP0_SMN_C2PMSG_33);
 265			if (msg & 0x80000000)
 266				break;
 267			msleep(1);
 268		}
 269	}
 
 270	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
 271
 272	if (vram_size) {
 273		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
 274		amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
 275					  adev->mman.discovery_tmr_size, false);
 276	} else {
 277		ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
 278	}
 279
 280	return ret;
 281}
 282
 283static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
 284{
 285	const struct firmware *fw;
 286	const char *fw_name;
 287	int r;
 288
 289	switch (amdgpu_discovery) {
 290	case 2:
 291		fw_name = FIRMWARE_IP_DISCOVERY;
 292		break;
 293	default:
 294		dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
 295		return -EINVAL;
 296	}
 297
 298	r = request_firmware(&fw, fw_name, adev->dev);
 299	if (r) {
 300		dev_err(adev->dev, "can't load firmware \"%s\"\n",
 301			fw_name);
 302		return r;
 303	}
 304
 305	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
 306	release_firmware(fw);
 307
 308	return 0;
 309}
 310
 311static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
 312{
 313	uint16_t checksum = 0;
 314	int i;
 315
 316	for (i = 0; i < size; i++)
 317		checksum += data[i];
 318
 319	return checksum;
 320}
 321
 322static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
 323						    uint16_t expected)
 324{
 325	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
 326}
 327
 328static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
 329{
 330	struct binary_header *bhdr;
 331	bhdr = (struct binary_header *)binary;
 332
 333	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
 334}
 335
 336static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
 337{
 338	/*
 339	 * So far, apply this quirk only on those Navy Flounder boards which
 340	 * have a bad harvest table of VCN config.
 341	 */
 342	if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
 343	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
 344		switch (adev->pdev->revision) {
 345		case 0xC1:
 346		case 0xC2:
 347		case 0xC3:
 348		case 0xC5:
 349		case 0xC7:
 350		case 0xCF:
 351		case 0xDF:
 352			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
 353			adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
 354			break;
 355		default:
 356			break;
 357		}
 358	}
 359}
 360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 361static int amdgpu_discovery_init(struct amdgpu_device *adev)
 362{
 363	struct table_info *info;
 364	struct binary_header *bhdr;
 365	uint16_t offset;
 366	uint16_t size;
 367	uint16_t checksum;
 368	int r;
 369
 370	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
 371	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
 372	if (!adev->mman.discovery_bin)
 373		return -ENOMEM;
 374
 375	/* Read from file if it is the preferred option */
 376	if (amdgpu_discovery == 2) {
 377		dev_info(adev->dev, "use ip discovery information from file");
 378		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
 379
 380		if (r) {
 381			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
 382			r = -EINVAL;
 383			goto out;
 384		}
 385
 386	} else {
 387		r = amdgpu_discovery_read_binary_from_mem(
 388			adev, adev->mman.discovery_bin);
 389		if (r)
 390			goto out;
 391	}
 392
 393	/* check the ip discovery binary signature */
 394	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
 395		dev_err(adev->dev,
 396			"get invalid ip discovery binary signature\n");
 397		r = -EINVAL;
 398		goto out;
 399	}
 400
 401	bhdr = (struct binary_header *)adev->mman.discovery_bin;
 402
 403	offset = offsetof(struct binary_header, binary_checksum) +
 404		sizeof(bhdr->binary_checksum);
 405	size = le16_to_cpu(bhdr->binary_size) - offset;
 406	checksum = le16_to_cpu(bhdr->binary_checksum);
 407
 408	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
 409					      size, checksum)) {
 410		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
 411		r = -EINVAL;
 412		goto out;
 413	}
 414
 415	info = &bhdr->table_list[IP_DISCOVERY];
 416	offset = le16_to_cpu(info->offset);
 417	checksum = le16_to_cpu(info->checksum);
 418
 419	if (offset) {
 420		struct ip_discovery_header *ihdr =
 421			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
 422		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
 423			dev_err(adev->dev, "invalid ip discovery data table signature\n");
 424			r = -EINVAL;
 425			goto out;
 426		}
 427
 428		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
 429						      le16_to_cpu(ihdr->size), checksum)) {
 430			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
 431			r = -EINVAL;
 432			goto out;
 433		}
 434	}
 435
 436	info = &bhdr->table_list[GC];
 437	offset = le16_to_cpu(info->offset);
 438	checksum = le16_to_cpu(info->checksum);
 439
 440	if (offset) {
 441		struct gpu_info_header *ghdr =
 442			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
 443
 444		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
 445			dev_err(adev->dev, "invalid ip discovery gc table id\n");
 446			r = -EINVAL;
 447			goto out;
 448		}
 449
 450		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
 451						      le32_to_cpu(ghdr->size), checksum)) {
 452			dev_err(adev->dev, "invalid gc data table checksum\n");
 453			r = -EINVAL;
 454			goto out;
 455		}
 456	}
 457
 458	info = &bhdr->table_list[HARVEST_INFO];
 459	offset = le16_to_cpu(info->offset);
 460	checksum = le16_to_cpu(info->checksum);
 461
 462	if (offset) {
 463		struct harvest_info_header *hhdr =
 464			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
 465
 466		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
 467			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
 468			r = -EINVAL;
 469			goto out;
 470		}
 471
 472		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
 473						      sizeof(struct harvest_table), checksum)) {
 474			dev_err(adev->dev, "invalid harvest data table checksum\n");
 475			r = -EINVAL;
 476			goto out;
 477		}
 478	}
 479
 480	info = &bhdr->table_list[VCN_INFO];
 481	offset = le16_to_cpu(info->offset);
 482	checksum = le16_to_cpu(info->checksum);
 483
 484	if (offset) {
 485		struct vcn_info_header *vhdr =
 486			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
 487
 488		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
 489			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
 490			r = -EINVAL;
 491			goto out;
 492		}
 493
 494		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
 495						      le32_to_cpu(vhdr->size_bytes), checksum)) {
 496			dev_err(adev->dev, "invalid vcn data table checksum\n");
 497			r = -EINVAL;
 498			goto out;
 499		}
 500	}
 501
 502	info = &bhdr->table_list[MALL_INFO];
 503	offset = le16_to_cpu(info->offset);
 504	checksum = le16_to_cpu(info->checksum);
 505
 506	if (0 && offset) {
 507		struct mall_info_header *mhdr =
 508			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
 509
 510		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
 511			dev_err(adev->dev, "invalid ip discovery mall table id\n");
 512			r = -EINVAL;
 513			goto out;
 514		}
 515
 516		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
 517						      le32_to_cpu(mhdr->size_bytes), checksum)) {
 518			dev_err(adev->dev, "invalid mall data table checksum\n");
 519			r = -EINVAL;
 520			goto out;
 521		}
 522	}
 523
 524	return 0;
 525
 526out:
 527	kfree(adev->mman.discovery_bin);
 528	adev->mman.discovery_bin = NULL;
 529	if ((amdgpu_discovery != 2) &&
 530	    (RREG32(mmIP_DISCOVERY_VERSION) == 4))
 531		amdgpu_ras_query_boot_status(adev, 4);
 532	return r;
 533}
 534
 535static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
 536
 537void amdgpu_discovery_fini(struct amdgpu_device *adev)
 538{
 539	amdgpu_discovery_sysfs_fini(adev);
 540	kfree(adev->mman.discovery_bin);
 541	adev->mman.discovery_bin = NULL;
 542}
 543
 544static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
 545{
 546	if (ip->instance_number >= HWIP_MAX_INSTANCE) {
 547		DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
 548			  ip->instance_number);
 549		return -EINVAL;
 550	}
 551	if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
 552		DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
 553			  le16_to_cpu(ip->hw_id));
 554		return -EINVAL;
 555	}
 556
 557	return 0;
 558}
 559
 560static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
 561						uint32_t *vcn_harvest_count)
 562{
 563	struct binary_header *bhdr;
 564	struct ip_discovery_header *ihdr;
 565	struct die_header *dhdr;
 566	struct ip_v4 *ip;
 567	uint16_t die_offset, ip_offset, num_dies, num_ips;
 568	int i, j;
 569
 570	bhdr = (struct binary_header *)adev->mman.discovery_bin;
 571	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
 572			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
 573	num_dies = le16_to_cpu(ihdr->num_dies);
 574
 575	/* scan harvest bit of all IP data structures */
 576	for (i = 0; i < num_dies; i++) {
 577		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
 578		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
 579		num_ips = le16_to_cpu(dhdr->num_ips);
 580		ip_offset = die_offset + sizeof(*dhdr);
 581
 582		for (j = 0; j < num_ips; j++) {
 583			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
 584
 585			if (amdgpu_discovery_validate_ip(ip))
 586				goto next_ip;
 587
 588			if (le16_to_cpu(ip->variant) == 1) {
 589				switch (le16_to_cpu(ip->hw_id)) {
 590				case VCN_HWID:
 591					(*vcn_harvest_count)++;
 592					if (ip->instance_number == 0) {
 593						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
 594						adev->vcn.inst_mask &=
 595							~AMDGPU_VCN_HARVEST_VCN0;
 596						adev->jpeg.inst_mask &=
 597							~AMDGPU_VCN_HARVEST_VCN0;
 598					} else {
 599						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
 600						adev->vcn.inst_mask &=
 601							~AMDGPU_VCN_HARVEST_VCN1;
 602						adev->jpeg.inst_mask &=
 603							~AMDGPU_VCN_HARVEST_VCN1;
 604					}
 605					break;
 606				case DMU_HWID:
 607					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
 608					break;
 609				default:
 610					break;
 611				}
 612			}
 613next_ip:
 614			if (ihdr->base_addr_64_bit)
 615				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
 616			else
 617				ip_offset += struct_size(ip, base_address, ip->num_base_address);
 618		}
 619	}
 620}
 621
 622static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
 623						     uint32_t *vcn_harvest_count,
 624						     uint32_t *umc_harvest_count)
 625{
 626	struct binary_header *bhdr;
 627	struct harvest_table *harvest_info;
 628	u16 offset;
 629	int i;
 630	uint32_t umc_harvest_config = 0;
 631
 632	bhdr = (struct binary_header *)adev->mman.discovery_bin;
 633	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
 634
 635	if (!offset) {
 636		dev_err(adev->dev, "invalid harvest table offset\n");
 637		return;
 638	}
 639
 640	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
 641
 642	for (i = 0; i < 32; i++) {
 643		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
 644			break;
 645
 646		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
 647		case VCN_HWID:
 648			(*vcn_harvest_count)++;
 649			adev->vcn.harvest_config |=
 650				(1 << harvest_info->list[i].number_instance);
 651			adev->jpeg.harvest_config |=
 652				(1 << harvest_info->list[i].number_instance);
 653
 654			adev->vcn.inst_mask &=
 655				~(1U << harvest_info->list[i].number_instance);
 656			adev->jpeg.inst_mask &=
 657				~(1U << harvest_info->list[i].number_instance);
 658			break;
 659		case DMU_HWID:
 660			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
 661			break;
 662		case UMC_HWID:
 663			umc_harvest_config |=
 664				1 << (le16_to_cpu(harvest_info->list[i].number_instance));
 665			(*umc_harvest_count)++;
 666			break;
 667		case GC_HWID:
 668			adev->gfx.xcc_mask &=
 669				~(1U << harvest_info->list[i].number_instance);
 670			break;
 671		case SDMA0_HWID:
 672			adev->sdma.sdma_mask &=
 673				~(1U << harvest_info->list[i].number_instance);
 674			break;
 
 
 
 
 
 
 675		default:
 676			break;
 677		}
 678	}
 679
 680	adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
 681				~umc_harvest_config;
 682}
 683
 684/* ================================================== */
 685
 686struct ip_hw_instance {
 687	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
 688
 689	int hw_id;
 690	u8  num_instance;
 691	u8  major, minor, revision;
 692	u8  harvest;
 693
 694	int num_base_addresses;
 695	u32 base_addr[] __counted_by(num_base_addresses);
 696};
 697
 698struct ip_hw_id {
 699	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
 700	int hw_id;
 701};
 702
 703struct ip_die_entry {
 704	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
 705	u16 num_ips;
 706};
 707
 708/* -------------------------------------------------- */
 709
 710struct ip_hw_instance_attr {
 711	struct attribute attr;
 712	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
 713};
 714
 715static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 716{
 717	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
 718}
 719
 720static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 721{
 722	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
 723}
 724
 725static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 726{
 727	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
 728}
 729
 730static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 731{
 732	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
 733}
 734
 735static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 736{
 737	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
 738}
 739
 740static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 741{
 742	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
 743}
 744
 745static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 746{
 747	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
 748}
 749
 750static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 751{
 752	ssize_t res, at;
 753	int ii;
 754
 755	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
 756		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
 757		 */
 758		if (at + 12 > PAGE_SIZE)
 759			break;
 760		res = sysfs_emit_at(buf, at, "0x%08X\n",
 761				    ip_hw_instance->base_addr[ii]);
 762		if (res <= 0)
 763			break;
 764		at += res;
 765	}
 766
 767	return res < 0 ? res : at;
 768}
 769
 770static struct ip_hw_instance_attr ip_hw_attr[] = {
 771	__ATTR_RO(hw_id),
 772	__ATTR_RO(num_instance),
 773	__ATTR_RO(major),
 774	__ATTR_RO(minor),
 775	__ATTR_RO(revision),
 776	__ATTR_RO(harvest),
 777	__ATTR_RO(num_base_addresses),
 778	__ATTR_RO(base_addr),
 779};
 780
 781static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
 782ATTRIBUTE_GROUPS(ip_hw_instance);
 783
 784#define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
 785#define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
 786
 787static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
 788					struct attribute *attr,
 789					char *buf)
 790{
 791	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
 792	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
 793
 794	if (!ip_hw_attr->show)
 795		return -EIO;
 796
 797	return ip_hw_attr->show(ip_hw_instance, buf);
 798}
 799
 800static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
 801	.show = ip_hw_instance_attr_show,
 802};
 803
 804static void ip_hw_instance_release(struct kobject *kobj)
 805{
 806	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
 807
 808	kfree(ip_hw_instance);
 809}
 810
 811static const struct kobj_type ip_hw_instance_ktype = {
 812	.release = ip_hw_instance_release,
 813	.sysfs_ops = &ip_hw_instance_sysfs_ops,
 814	.default_groups = ip_hw_instance_groups,
 815};
 816
 817/* -------------------------------------------------- */
 818
 819#define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
 820
 821static void ip_hw_id_release(struct kobject *kobj)
 822{
 823	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
 824
 825	if (!list_empty(&ip_hw_id->hw_id_kset.list))
 826		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
 827	kfree(ip_hw_id);
 828}
 829
 830static const struct kobj_type ip_hw_id_ktype = {
 831	.release = ip_hw_id_release,
 832	.sysfs_ops = &kobj_sysfs_ops,
 833};
 834
 835/* -------------------------------------------------- */
 836
 837static void die_kobj_release(struct kobject *kobj);
 838static void ip_disc_release(struct kobject *kobj);
 839
 840struct ip_die_entry_attribute {
 841	struct attribute attr;
 842	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
 843};
 844
 845#define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
 846
 847static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
 848{
 849	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
 850}
 851
 852/* If there are more ip_die_entry attrs, other than the number of IPs,
 853 * we can make this intro an array of attrs, and then initialize
 854 * ip_die_entry_attrs in a loop.
 855 */
 856static struct ip_die_entry_attribute num_ips_attr =
 857	__ATTR_RO(num_ips);
 858
 859static struct attribute *ip_die_entry_attrs[] = {
 860	&num_ips_attr.attr,
 861	NULL,
 862};
 863ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
 864
 865#define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
 866
 867static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
 868				      struct attribute *attr,
 869				      char *buf)
 870{
 871	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
 872	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
 873
 874	if (!ip_die_entry_attr->show)
 875		return -EIO;
 876
 877	return ip_die_entry_attr->show(ip_die_entry, buf);
 878}
 879
 880static void ip_die_entry_release(struct kobject *kobj)
 881{
 882	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
 883
 884	if (!list_empty(&ip_die_entry->ip_kset.list))
 885		DRM_ERROR("ip_die_entry->ip_kset is not empty");
 886	kfree(ip_die_entry);
 887}
 888
 889static const struct sysfs_ops ip_die_entry_sysfs_ops = {
 890	.show = ip_die_entry_attr_show,
 891};
 892
 893static const struct kobj_type ip_die_entry_ktype = {
 894	.release = ip_die_entry_release,
 895	.sysfs_ops = &ip_die_entry_sysfs_ops,
 896	.default_groups = ip_die_entry_groups,
 897};
 898
 899static const struct kobj_type die_kobj_ktype = {
 900	.release = die_kobj_release,
 901	.sysfs_ops = &kobj_sysfs_ops,
 902};
 903
 904static const struct kobj_type ip_discovery_ktype = {
 905	.release = ip_disc_release,
 906	.sysfs_ops = &kobj_sysfs_ops,
 907};
 908
 909struct ip_discovery_top {
 910	struct kobject kobj;    /* ip_discovery/ */
 911	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
 912	struct amdgpu_device *adev;
 913};
 914
 915static void die_kobj_release(struct kobject *kobj)
 916{
 917	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
 918						       struct ip_discovery_top,
 919						       die_kset);
 920	if (!list_empty(&ip_top->die_kset.list))
 921		DRM_ERROR("ip_top->die_kset is not empty");
 922}
 923
 924static void ip_disc_release(struct kobject *kobj)
 925{
 926	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
 927						       kobj);
 928	struct amdgpu_device *adev = ip_top->adev;
 929
 930	adev->ip_top = NULL;
 931	kfree(ip_top);
 932}
 933
 934static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
 935						 uint16_t hw_id, uint8_t inst)
 936{
 937	uint8_t harvest = 0;
 938
 939	/* Until a uniform way is figured, get mask based on hwid */
 940	switch (hw_id) {
 941	case VCN_HWID:
 942		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
 943		break;
 944	case DMU_HWID:
 945		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
 946			harvest = 0x1;
 947		break;
 948	case UMC_HWID:
 949		/* TODO: It needs another parsing; for now, ignore.*/
 950		break;
 951	case GC_HWID:
 952		harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
 953		break;
 954	case SDMA0_HWID:
 955		harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
 956		break;
 957	default:
 958		break;
 959	}
 960
 961	return harvest;
 962}
 963
 964static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
 965				      struct ip_die_entry *ip_die_entry,
 966				      const size_t _ip_offset, const int num_ips,
 967				      bool reg_base_64)
 968{
 969	int ii, jj, kk, res;
 970
 971	DRM_DEBUG("num_ips:%d", num_ips);
 972
 973	/* Find all IPs of a given HW ID, and add their instance to
 974	 * #die/#hw_id/#instance/<attributes>
 975	 */
 976	for (ii = 0; ii < HW_ID_MAX; ii++) {
 977		struct ip_hw_id *ip_hw_id = NULL;
 978		size_t ip_offset = _ip_offset;
 979
 980		for (jj = 0; jj < num_ips; jj++) {
 981			struct ip_v4 *ip;
 982			struct ip_hw_instance *ip_hw_instance;
 983
 984			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
 985			if (amdgpu_discovery_validate_ip(ip) ||
 986			    le16_to_cpu(ip->hw_id) != ii)
 987				goto next_ip;
 988
 989			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
 990
 991			/* We have a hw_id match; register the hw
 992			 * block if not yet registered.
 993			 */
 994			if (!ip_hw_id) {
 995				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
 996				if (!ip_hw_id)
 997					return -ENOMEM;
 998				ip_hw_id->hw_id = ii;
 999
1000				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1001				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1002				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1003				res = kset_register(&ip_hw_id->hw_id_kset);
1004				if (res) {
1005					DRM_ERROR("Couldn't register ip_hw_id kset");
1006					kfree(ip_hw_id);
1007					return res;
1008				}
1009				if (hw_id_names[ii]) {
1010					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1011								&ip_hw_id->hw_id_kset.kobj,
1012								hw_id_names[ii]);
1013					if (res) {
1014						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1015							  hw_id_names[ii],
1016							  kobject_name(&ip_die_entry->ip_kset.kobj));
1017					}
1018				}
1019			}
1020
1021			/* Now register its instance.
1022			 */
1023			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1024							     base_addr,
1025							     ip->num_base_address),
1026						 GFP_KERNEL);
1027			if (!ip_hw_instance) {
1028				DRM_ERROR("no memory for ip_hw_instance");
1029				return -ENOMEM;
1030			}
1031			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1032			ip_hw_instance->num_instance = ip->instance_number;
1033			ip_hw_instance->major = ip->major;
1034			ip_hw_instance->minor = ip->minor;
1035			ip_hw_instance->revision = ip->revision;
1036			ip_hw_instance->harvest =
1037				amdgpu_discovery_get_harvest_info(
1038					adev, ip_hw_instance->hw_id,
1039					ip_hw_instance->num_instance);
1040			ip_hw_instance->num_base_addresses = ip->num_base_address;
1041
1042			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1043				if (reg_base_64)
1044					ip_hw_instance->base_addr[kk] =
1045						lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1046				else
1047					ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1048			}
1049
1050			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1051			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1052			res = kobject_add(&ip_hw_instance->kobj, NULL,
1053					  "%d", ip_hw_instance->num_instance);
1054next_ip:
1055			if (reg_base_64)
1056				ip_offset += struct_size(ip, base_address_64,
1057							 ip->num_base_address);
1058			else
1059				ip_offset += struct_size(ip, base_address,
1060							 ip->num_base_address);
1061		}
1062	}
1063
1064	return 0;
1065}
1066
1067static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1068{
1069	struct binary_header *bhdr;
1070	struct ip_discovery_header *ihdr;
1071	struct die_header *dhdr;
1072	struct kset *die_kset = &adev->ip_top->die_kset;
1073	u16 num_dies, die_offset, num_ips;
1074	size_t ip_offset;
1075	int ii, res;
1076
1077	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1078	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1079					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1080	num_dies = le16_to_cpu(ihdr->num_dies);
1081
1082	DRM_DEBUG("number of dies: %d\n", num_dies);
1083
1084	for (ii = 0; ii < num_dies; ii++) {
1085		struct ip_die_entry *ip_die_entry;
1086
1087		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1088		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1089		num_ips = le16_to_cpu(dhdr->num_ips);
1090		ip_offset = die_offset + sizeof(*dhdr);
1091
1092		/* Add the die to the kset.
1093		 *
1094		 * dhdr->die_id == ii, which was checked in
1095		 * amdgpu_discovery_reg_base_init().
1096		 */
1097
1098		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1099		if (!ip_die_entry)
1100			return -ENOMEM;
1101
1102		ip_die_entry->num_ips = num_ips;
1103
1104		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1105		ip_die_entry->ip_kset.kobj.kset = die_kset;
1106		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1107		res = kset_register(&ip_die_entry->ip_kset);
1108		if (res) {
1109			DRM_ERROR("Couldn't register ip_die_entry kset");
1110			kfree(ip_die_entry);
1111			return res;
1112		}
1113
1114		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1115	}
1116
1117	return 0;
1118}
1119
1120static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1121{
1122	struct kset *die_kset;
1123	int res, ii;
1124
1125	if (!adev->mman.discovery_bin)
1126		return -EINVAL;
1127
1128	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1129	if (!adev->ip_top)
1130		return -ENOMEM;
1131
1132	adev->ip_top->adev = adev;
1133
1134	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1135				   &adev->dev->kobj, "ip_discovery");
1136	if (res) {
1137		DRM_ERROR("Couldn't init and add ip_discovery/");
1138		goto Err;
1139	}
1140
1141	die_kset = &adev->ip_top->die_kset;
1142	kobject_set_name(&die_kset->kobj, "%s", "die");
1143	die_kset->kobj.parent = &adev->ip_top->kobj;
1144	die_kset->kobj.ktype = &die_kobj_ktype;
1145	res = kset_register(&adev->ip_top->die_kset);
1146	if (res) {
1147		DRM_ERROR("Couldn't register die_kset");
1148		goto Err;
1149	}
1150
1151	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1152		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1153	ip_hw_instance_attrs[ii] = NULL;
1154
1155	res = amdgpu_discovery_sysfs_recurse(adev);
1156
1157	return res;
1158Err:
1159	kobject_put(&adev->ip_top->kobj);
1160	return res;
1161}
1162
1163/* -------------------------------------------------- */
1164
1165#define list_to_kobj(el) container_of(el, struct kobject, entry)
1166
1167static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1168{
1169	struct list_head *el, *tmp;
1170	struct kset *hw_id_kset;
1171
1172	hw_id_kset = &ip_hw_id->hw_id_kset;
1173	spin_lock(&hw_id_kset->list_lock);
1174	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1175		list_del_init(el);
1176		spin_unlock(&hw_id_kset->list_lock);
1177		/* kobject is embedded in ip_hw_instance */
1178		kobject_put(list_to_kobj(el));
1179		spin_lock(&hw_id_kset->list_lock);
1180	}
1181	spin_unlock(&hw_id_kset->list_lock);
1182	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1183}
1184
1185static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1186{
1187	struct list_head *el, *tmp;
1188	struct kset *ip_kset;
1189
1190	ip_kset = &ip_die_entry->ip_kset;
1191	spin_lock(&ip_kset->list_lock);
1192	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1193		list_del_init(el);
1194		spin_unlock(&ip_kset->list_lock);
1195		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1196		spin_lock(&ip_kset->list_lock);
1197	}
1198	spin_unlock(&ip_kset->list_lock);
1199	kobject_put(&ip_die_entry->ip_kset.kobj);
1200}
1201
1202static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1203{
1204	struct list_head *el, *tmp;
1205	struct kset *die_kset;
1206
1207	die_kset = &adev->ip_top->die_kset;
1208	spin_lock(&die_kset->list_lock);
1209	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1210		list_del_init(el);
1211		spin_unlock(&die_kset->list_lock);
1212		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1213		spin_lock(&die_kset->list_lock);
1214	}
1215	spin_unlock(&die_kset->list_lock);
1216	kobject_put(&adev->ip_top->die_kset.kobj);
1217	kobject_put(&adev->ip_top->kobj);
1218}
1219
1220/* ================================================== */
1221
1222static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1223{
1224	uint8_t num_base_address, subrev, variant;
1225	struct binary_header *bhdr;
1226	struct ip_discovery_header *ihdr;
1227	struct die_header *dhdr;
1228	struct ip_v4 *ip;
1229	uint16_t die_offset;
1230	uint16_t ip_offset;
1231	uint16_t num_dies;
1232	uint16_t num_ips;
1233	int hw_ip;
1234	int i, j, k;
1235	int r;
1236
1237	r = amdgpu_discovery_init(adev);
1238	if (r) {
1239		DRM_ERROR("amdgpu_discovery_init failed\n");
1240		return r;
1241	}
1242
1243	adev->gfx.xcc_mask = 0;
1244	adev->sdma.sdma_mask = 0;
1245	adev->vcn.inst_mask = 0;
1246	adev->jpeg.inst_mask = 0;
1247	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1248	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1249			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1250	num_dies = le16_to_cpu(ihdr->num_dies);
1251
1252	DRM_DEBUG("number of dies: %d\n", num_dies);
1253
1254	for (i = 0; i < num_dies; i++) {
1255		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1256		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1257		num_ips = le16_to_cpu(dhdr->num_ips);
1258		ip_offset = die_offset + sizeof(*dhdr);
1259
1260		if (le16_to_cpu(dhdr->die_id) != i) {
1261			DRM_ERROR("invalid die id %d, expected %d\n",
1262					le16_to_cpu(dhdr->die_id), i);
1263			return -EINVAL;
1264		}
1265
1266		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1267				le16_to_cpu(dhdr->die_id), num_ips);
1268
1269		for (j = 0; j < num_ips; j++) {
1270			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1271
1272			if (amdgpu_discovery_validate_ip(ip))
1273				goto next_ip;
1274
1275			num_base_address = ip->num_base_address;
1276
1277			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1278				  hw_id_names[le16_to_cpu(ip->hw_id)],
1279				  le16_to_cpu(ip->hw_id),
1280				  ip->instance_number,
1281				  ip->major, ip->minor,
1282				  ip->revision);
1283
1284			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1285				/* Bit [5:0]: original revision value
1286				 * Bit [7:6]: en/decode capability:
1287				 *     0b00 : VCN function normally
1288				 *     0b10 : encode is disabled
1289				 *     0b01 : decode is disabled
1290				 */
1291				if (adev->vcn.num_vcn_inst <
1292				    AMDGPU_MAX_VCN_INSTANCES) {
1293					adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1294						ip->revision & 0xc0;
1295					adev->vcn.num_vcn_inst++;
1296					adev->vcn.inst_mask |=
1297						(1U << ip->instance_number);
1298					adev->jpeg.inst_mask |=
1299						(1U << ip->instance_number);
1300				} else {
1301					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1302						adev->vcn.num_vcn_inst + 1,
1303						AMDGPU_MAX_VCN_INSTANCES);
1304				}
1305				ip->revision &= ~0xc0;
1306			}
1307			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1308			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1309			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1310			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1311				if (adev->sdma.num_instances <
1312				    AMDGPU_MAX_SDMA_INSTANCES) {
1313					adev->sdma.num_instances++;
1314					adev->sdma.sdma_mask |=
1315						(1U << ip->instance_number);
1316				} else {
1317					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1318						adev->sdma.num_instances + 1,
1319						AMDGPU_MAX_SDMA_INSTANCES);
1320				}
1321			}
1322
1323			if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1324				if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1325					adev->vpe.num_instances++;
1326				else
1327					dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1328						adev->vpe.num_instances + 1,
1329						AMDGPU_MAX_VPE_INSTANCES);
1330			}
1331
1332			if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1333				adev->gmc.num_umc++;
1334				adev->umc.node_inst_num++;
1335			}
1336
1337			if (le16_to_cpu(ip->hw_id) == GC_HWID)
1338				adev->gfx.xcc_mask |=
1339					(1U << ip->instance_number);
1340
1341			for (k = 0; k < num_base_address; k++) {
1342				/*
1343				 * convert the endianness of base addresses in place,
1344				 * so that we don't need to convert them when accessing adev->reg_offset.
1345				 */
1346				if (ihdr->base_addr_64_bit)
1347					/* Truncate the 64bit base address from ip discovery
1348					 * and only store lower 32bit ip base in reg_offset[].
1349					 * Bits > 32 follows ASIC specific format, thus just
1350					 * discard them and handle it within specific ASIC.
1351					 * By this way reg_offset[] and related helpers can
1352					 * stay unchanged.
1353					 * The base address is in dwords, thus clear the
1354					 * highest 2 bits to store.
1355					 */
1356					ip->base_address[k] =
1357						lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1358				else
1359					ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1360				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1361			}
1362
1363			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1364				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1365				    hw_id_map[hw_ip] != 0) {
1366					DRM_DEBUG("set register base offset for %s\n",
1367							hw_id_names[le16_to_cpu(ip->hw_id)]);
1368					adev->reg_offset[hw_ip][ip->instance_number] =
1369						ip->base_address;
1370					/* Instance support is somewhat inconsistent.
1371					 * SDMA is a good example.  Sienna cichlid has 4 total
1372					 * SDMA instances, each enumerated separately (HWIDs
1373					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1374					 * but they are enumerated as multiple instances of the
1375					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1376					 * example.  On most chips there are multiple instances
1377					 * with the same HWID.
1378					 */
1379
1380					if (ihdr->version < 3) {
1381						subrev = 0;
1382						variant = 0;
1383					} else {
1384						subrev = ip->sub_revision;
1385						variant = ip->variant;
1386					}
1387
1388					adev->ip_versions[hw_ip]
1389							 [ip->instance_number] =
1390						IP_VERSION_FULL(ip->major,
1391								ip->minor,
1392								ip->revision,
1393								variant,
1394								subrev);
1395				}
1396			}
1397
1398next_ip:
1399			if (ihdr->base_addr_64_bit)
1400				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1401			else
1402				ip_offset += struct_size(ip, base_address, ip->num_base_address);
1403		}
1404	}
1405
1406	return 0;
1407}
1408
1409static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1410{
1411	int vcn_harvest_count = 0;
1412	int umc_harvest_count = 0;
1413
1414	/*
1415	 * Harvest table does not fit Navi1x and legacy GPUs,
1416	 * so read harvest bit per IP data structure to set
1417	 * harvest configuration.
1418	 */
1419	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1420	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) {
 
1421		if ((adev->pdev->device == 0x731E &&
1422			(adev->pdev->revision == 0xC6 ||
1423			 adev->pdev->revision == 0xC7)) ||
1424			(adev->pdev->device == 0x7340 &&
1425			 adev->pdev->revision == 0xC9) ||
1426			(adev->pdev->device == 0x7360 &&
1427			 adev->pdev->revision == 0xC7))
1428			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1429				&vcn_harvest_count);
1430	} else {
1431		amdgpu_discovery_read_from_harvest_table(adev,
1432							 &vcn_harvest_count,
1433							 &umc_harvest_count);
1434	}
1435
1436	amdgpu_discovery_harvest_config_quirk(adev);
1437
1438	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1439		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1440		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1441	}
1442
1443	if (umc_harvest_count < adev->gmc.num_umc) {
1444		adev->gmc.num_umc -= umc_harvest_count;
1445	}
1446}
1447
1448union gc_info {
1449	struct gc_info_v1_0 v1;
1450	struct gc_info_v1_1 v1_1;
1451	struct gc_info_v1_2 v1_2;
 
1452	struct gc_info_v2_0 v2;
1453	struct gc_info_v2_1 v2_1;
1454};
1455
1456static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1457{
1458	struct binary_header *bhdr;
1459	union gc_info *gc_info;
1460	u16 offset;
1461
1462	if (!adev->mman.discovery_bin) {
1463		DRM_ERROR("ip discovery uninitialized\n");
1464		return -EINVAL;
1465	}
1466
1467	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1468	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1469
1470	if (!offset)
1471		return 0;
1472
1473	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1474
1475	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1476	case 1:
1477		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1478		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1479						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1480		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1481		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1482		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1483		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1484		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1485		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1486		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1487		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1488		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1489		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1490		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1491		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1492		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1493			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1494		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1495		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1496			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1497			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1498			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1499		}
1500		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1501			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1502			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1503			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1504			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1505			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1506			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1507			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1508			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1509		}
 
 
 
 
 
 
 
 
 
 
1510		break;
1511	case 2:
1512		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1513		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1514		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1515		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1516		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1517		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1518		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1519		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1520		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1521		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1522		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1523		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1524		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1525		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1526		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1527			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1528		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1529		if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1530			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1531			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1532			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1533			adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1534			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1535			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1536			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1537		}
1538		break;
1539	default:
1540		dev_err(adev->dev,
1541			"Unhandled GC info table %d.%d\n",
1542			le16_to_cpu(gc_info->v1.header.version_major),
1543			le16_to_cpu(gc_info->v1.header.version_minor));
1544		return -EINVAL;
1545	}
1546	return 0;
1547}
1548
1549union mall_info {
1550	struct mall_info_v1_0 v1;
1551	struct mall_info_v2_0 v2;
1552};
1553
1554static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1555{
1556	struct binary_header *bhdr;
1557	union mall_info *mall_info;
1558	u32 u, mall_size_per_umc, m_s_present, half_use;
1559	u64 mall_size;
1560	u16 offset;
1561
1562	if (!adev->mman.discovery_bin) {
1563		DRM_ERROR("ip discovery uninitialized\n");
1564		return -EINVAL;
1565	}
1566
1567	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1568	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1569
1570	if (!offset)
1571		return 0;
1572
1573	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1574
1575	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1576	case 1:
1577		mall_size = 0;
1578		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1579		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1580		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1581		for (u = 0; u < adev->gmc.num_umc; u++) {
1582			if (m_s_present & (1 << u))
1583				mall_size += mall_size_per_umc * 2;
1584			else if (half_use & (1 << u))
1585				mall_size += mall_size_per_umc / 2;
1586			else
1587				mall_size += mall_size_per_umc;
1588		}
1589		adev->gmc.mall_size = mall_size;
1590		adev->gmc.m_half_use = half_use;
1591		break;
1592	case 2:
1593		mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1594		adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
1595		break;
1596	default:
1597		dev_err(adev->dev,
1598			"Unhandled MALL info table %d.%d\n",
1599			le16_to_cpu(mall_info->v1.header.version_major),
1600			le16_to_cpu(mall_info->v1.header.version_minor));
1601		return -EINVAL;
1602	}
1603	return 0;
1604}
1605
1606union vcn_info {
1607	struct vcn_info_v1_0 v1;
1608};
1609
1610static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1611{
1612	struct binary_header *bhdr;
1613	union vcn_info *vcn_info;
1614	u16 offset;
1615	int v;
1616
1617	if (!adev->mman.discovery_bin) {
1618		DRM_ERROR("ip discovery uninitialized\n");
1619		return -EINVAL;
1620	}
1621
1622	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1623	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1624	 * but that may change in the future with new GPUs so keep this
1625	 * check for defensive purposes.
1626	 */
1627	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1628		dev_err(adev->dev, "invalid vcn instances\n");
1629		return -EINVAL;
1630	}
1631
1632	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1633	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1634
1635	if (!offset)
1636		return 0;
1637
1638	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1639
1640	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1641	case 1:
1642		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1643		 * so this won't overflow.
1644		 */
1645		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1646			adev->vcn.vcn_codec_disable_mask[v] =
1647				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1648		}
1649		break;
1650	default:
1651		dev_err(adev->dev,
1652			"Unhandled VCN info table %d.%d\n",
1653			le16_to_cpu(vcn_info->v1.header.version_major),
1654			le16_to_cpu(vcn_info->v1.header.version_minor));
1655		return -EINVAL;
1656	}
1657	return 0;
1658}
1659
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1660static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1661{
1662	/* what IP to use for this? */
1663	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1664	case IP_VERSION(9, 0, 1):
1665	case IP_VERSION(9, 1, 0):
1666	case IP_VERSION(9, 2, 1):
1667	case IP_VERSION(9, 2, 2):
1668	case IP_VERSION(9, 3, 0):
1669	case IP_VERSION(9, 4, 0):
1670	case IP_VERSION(9, 4, 1):
1671	case IP_VERSION(9, 4, 2):
1672	case IP_VERSION(9, 4, 3):
 
1673		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1674		break;
1675	case IP_VERSION(10, 1, 10):
1676	case IP_VERSION(10, 1, 1):
1677	case IP_VERSION(10, 1, 2):
1678	case IP_VERSION(10, 1, 3):
1679	case IP_VERSION(10, 1, 4):
1680	case IP_VERSION(10, 3, 0):
1681	case IP_VERSION(10, 3, 1):
1682	case IP_VERSION(10, 3, 2):
1683	case IP_VERSION(10, 3, 3):
1684	case IP_VERSION(10, 3, 4):
1685	case IP_VERSION(10, 3, 5):
1686	case IP_VERSION(10, 3, 6):
1687	case IP_VERSION(10, 3, 7):
1688		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1689		break;
1690	case IP_VERSION(11, 0, 0):
1691	case IP_VERSION(11, 0, 1):
1692	case IP_VERSION(11, 0, 2):
1693	case IP_VERSION(11, 0, 3):
1694	case IP_VERSION(11, 0, 4):
1695	case IP_VERSION(11, 5, 0):
1696	case IP_VERSION(11, 5, 1):
 
1697		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1698		break;
 
 
 
 
1699	default:
1700		dev_err(adev->dev,
1701			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1702			amdgpu_ip_version(adev, GC_HWIP, 0));
1703		return -EINVAL;
1704	}
1705	return 0;
1706}
1707
1708static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1709{
1710	/* use GC or MMHUB IP version */
1711	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1712	case IP_VERSION(9, 0, 1):
1713	case IP_VERSION(9, 1, 0):
1714	case IP_VERSION(9, 2, 1):
1715	case IP_VERSION(9, 2, 2):
1716	case IP_VERSION(9, 3, 0):
1717	case IP_VERSION(9, 4, 0):
1718	case IP_VERSION(9, 4, 1):
1719	case IP_VERSION(9, 4, 2):
1720	case IP_VERSION(9, 4, 3):
 
1721		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1722		break;
1723	case IP_VERSION(10, 1, 10):
1724	case IP_VERSION(10, 1, 1):
1725	case IP_VERSION(10, 1, 2):
1726	case IP_VERSION(10, 1, 3):
1727	case IP_VERSION(10, 1, 4):
1728	case IP_VERSION(10, 3, 0):
1729	case IP_VERSION(10, 3, 1):
1730	case IP_VERSION(10, 3, 2):
1731	case IP_VERSION(10, 3, 3):
1732	case IP_VERSION(10, 3, 4):
1733	case IP_VERSION(10, 3, 5):
1734	case IP_VERSION(10, 3, 6):
1735	case IP_VERSION(10, 3, 7):
1736		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1737		break;
1738	case IP_VERSION(11, 0, 0):
1739	case IP_VERSION(11, 0, 1):
1740	case IP_VERSION(11, 0, 2):
1741	case IP_VERSION(11, 0, 3):
1742	case IP_VERSION(11, 0, 4):
1743	case IP_VERSION(11, 5, 0):
1744	case IP_VERSION(11, 5, 1):
 
1745		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1746		break;
 
 
 
 
1747	default:
1748		dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1749			amdgpu_ip_version(adev, GC_HWIP, 0));
1750		return -EINVAL;
1751	}
1752	return 0;
1753}
1754
1755static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1756{
1757	switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1758	case IP_VERSION(4, 0, 0):
1759	case IP_VERSION(4, 0, 1):
1760	case IP_VERSION(4, 1, 0):
1761	case IP_VERSION(4, 1, 1):
1762	case IP_VERSION(4, 3, 0):
1763		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1764		break;
1765	case IP_VERSION(4, 2, 0):
1766	case IP_VERSION(4, 2, 1):
1767	case IP_VERSION(4, 4, 0):
1768	case IP_VERSION(4, 4, 2):
 
1769		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1770		break;
1771	case IP_VERSION(5, 0, 0):
1772	case IP_VERSION(5, 0, 1):
1773	case IP_VERSION(5, 0, 2):
1774	case IP_VERSION(5, 0, 3):
1775	case IP_VERSION(5, 2, 0):
1776	case IP_VERSION(5, 2, 1):
1777		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1778		break;
1779	case IP_VERSION(6, 0, 0):
1780	case IP_VERSION(6, 0, 1):
1781	case IP_VERSION(6, 0, 2):
1782		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1783		break;
1784	case IP_VERSION(6, 1, 0):
1785		amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1786		break;
1787	case IP_VERSION(7, 0, 0):
1788		amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
1789		break;
1790	default:
1791		dev_err(adev->dev,
1792			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1793			amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
1794		return -EINVAL;
1795	}
1796	return 0;
1797}
1798
1799static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1800{
1801	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1802	case IP_VERSION(9, 0, 0):
1803		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1804		break;
1805	case IP_VERSION(10, 0, 0):
1806	case IP_VERSION(10, 0, 1):
1807		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1808		break;
1809	case IP_VERSION(11, 0, 0):
1810	case IP_VERSION(11, 0, 2):
1811	case IP_VERSION(11, 0, 4):
1812	case IP_VERSION(11, 0, 5):
1813	case IP_VERSION(11, 0, 9):
1814	case IP_VERSION(11, 0, 7):
1815	case IP_VERSION(11, 0, 11):
1816	case IP_VERSION(11, 0, 12):
1817	case IP_VERSION(11, 0, 13):
1818	case IP_VERSION(11, 5, 0):
1819		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1820		break;
1821	case IP_VERSION(11, 0, 8):
1822		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1823		break;
1824	case IP_VERSION(11, 0, 3):
1825	case IP_VERSION(12, 0, 1):
1826		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1827		break;
1828	case IP_VERSION(13, 0, 0):
1829	case IP_VERSION(13, 0, 1):
1830	case IP_VERSION(13, 0, 2):
1831	case IP_VERSION(13, 0, 3):
1832	case IP_VERSION(13, 0, 5):
1833	case IP_VERSION(13, 0, 6):
1834	case IP_VERSION(13, 0, 7):
1835	case IP_VERSION(13, 0, 8):
1836	case IP_VERSION(13, 0, 10):
1837	case IP_VERSION(13, 0, 11):
 
1838	case IP_VERSION(14, 0, 0):
1839	case IP_VERSION(14, 0, 1):
 
1840		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1841		break;
1842	case IP_VERSION(13, 0, 4):
1843		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1844		break;
1845	case IP_VERSION(14, 0, 2):
1846	case IP_VERSION(14, 0, 3):
1847		amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
1848		break;
1849	default:
1850		dev_err(adev->dev,
1851			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1852			amdgpu_ip_version(adev, MP0_HWIP, 0));
1853		return -EINVAL;
1854	}
1855	return 0;
1856}
1857
1858static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1859{
1860	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1861	case IP_VERSION(9, 0, 0):
1862	case IP_VERSION(10, 0, 0):
1863	case IP_VERSION(10, 0, 1):
1864	case IP_VERSION(11, 0, 2):
1865		if (adev->asic_type == CHIP_ARCTURUS)
1866			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1867		else
1868			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1869		break;
1870	case IP_VERSION(11, 0, 0):
1871	case IP_VERSION(11, 0, 5):
1872	case IP_VERSION(11, 0, 9):
1873	case IP_VERSION(11, 0, 7):
1874	case IP_VERSION(11, 0, 8):
1875	case IP_VERSION(11, 0, 11):
1876	case IP_VERSION(11, 0, 12):
1877	case IP_VERSION(11, 0, 13):
1878	case IP_VERSION(11, 5, 0):
1879		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1880		break;
1881	case IP_VERSION(12, 0, 0):
1882	case IP_VERSION(12, 0, 1):
1883		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1884		break;
1885	case IP_VERSION(13, 0, 0):
1886	case IP_VERSION(13, 0, 1):
1887	case IP_VERSION(13, 0, 2):
1888	case IP_VERSION(13, 0, 3):
1889	case IP_VERSION(13, 0, 4):
1890	case IP_VERSION(13, 0, 5):
1891	case IP_VERSION(13, 0, 6):
1892	case IP_VERSION(13, 0, 7):
1893	case IP_VERSION(13, 0, 8):
1894	case IP_VERSION(13, 0, 10):
1895	case IP_VERSION(13, 0, 11):
 
1896		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1897		break;
1898	case IP_VERSION(14, 0, 0):
1899	case IP_VERSION(14, 0, 1):
 
 
 
1900		amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
1901		break;
1902	default:
1903		dev_err(adev->dev,
1904			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1905			amdgpu_ip_version(adev, MP1_HWIP, 0));
1906		return -EINVAL;
1907	}
1908	return 0;
1909}
1910
1911#if defined(CONFIG_DRM_AMD_DC)
1912static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
1913{
1914	amdgpu_device_set_sriov_virtual_display(adev);
1915	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1916}
1917#endif
1918
1919static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1920{
1921	if (adev->enable_virtual_display) {
1922		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1923		return 0;
1924	}
1925
1926	if (!amdgpu_device_has_dc_support(adev))
1927		return 0;
1928
1929#if defined(CONFIG_DRM_AMD_DC)
1930	if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1931		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1932		case IP_VERSION(1, 0, 0):
1933		case IP_VERSION(1, 0, 1):
1934		case IP_VERSION(2, 0, 2):
1935		case IP_VERSION(2, 0, 0):
1936		case IP_VERSION(2, 0, 3):
1937		case IP_VERSION(2, 1, 0):
1938		case IP_VERSION(3, 0, 0):
1939		case IP_VERSION(3, 0, 2):
1940		case IP_VERSION(3, 0, 3):
1941		case IP_VERSION(3, 0, 1):
1942		case IP_VERSION(3, 1, 2):
1943		case IP_VERSION(3, 1, 3):
1944		case IP_VERSION(3, 1, 4):
1945		case IP_VERSION(3, 1, 5):
1946		case IP_VERSION(3, 1, 6):
1947		case IP_VERSION(3, 2, 0):
1948		case IP_VERSION(3, 2, 1):
1949		case IP_VERSION(3, 5, 0):
1950		case IP_VERSION(3, 5, 1):
 
 
 
 
 
1951			if (amdgpu_sriov_vf(adev))
1952				amdgpu_discovery_set_sriov_display(adev);
1953			else
1954				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1955			break;
1956		default:
1957			dev_err(adev->dev,
1958				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1959				amdgpu_ip_version(adev, DCE_HWIP, 0));
1960			return -EINVAL;
1961		}
1962	} else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
1963		switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
1964		case IP_VERSION(12, 0, 0):
1965		case IP_VERSION(12, 0, 1):
1966		case IP_VERSION(12, 1, 0):
1967			if (amdgpu_sriov_vf(adev))
1968				amdgpu_discovery_set_sriov_display(adev);
1969			else
1970				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1971			break;
1972		default:
1973			dev_err(adev->dev,
1974				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1975				amdgpu_ip_version(adev, DCI_HWIP, 0));
1976			return -EINVAL;
1977		}
1978	}
1979#endif
1980	return 0;
1981}
1982
1983static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1984{
1985	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1986	case IP_VERSION(9, 0, 1):
1987	case IP_VERSION(9, 1, 0):
1988	case IP_VERSION(9, 2, 1):
1989	case IP_VERSION(9, 2, 2):
1990	case IP_VERSION(9, 3, 0):
1991	case IP_VERSION(9, 4, 0):
1992	case IP_VERSION(9, 4, 1):
1993	case IP_VERSION(9, 4, 2):
1994		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1995		break;
1996	case IP_VERSION(9, 4, 3):
 
1997		amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
1998		break;
1999	case IP_VERSION(10, 1, 10):
2000	case IP_VERSION(10, 1, 2):
2001	case IP_VERSION(10, 1, 1):
2002	case IP_VERSION(10, 1, 3):
2003	case IP_VERSION(10, 1, 4):
2004	case IP_VERSION(10, 3, 0):
2005	case IP_VERSION(10, 3, 2):
2006	case IP_VERSION(10, 3, 1):
2007	case IP_VERSION(10, 3, 4):
2008	case IP_VERSION(10, 3, 5):
2009	case IP_VERSION(10, 3, 6):
2010	case IP_VERSION(10, 3, 3):
2011	case IP_VERSION(10, 3, 7):
2012		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2013		break;
2014	case IP_VERSION(11, 0, 0):
2015	case IP_VERSION(11, 0, 1):
2016	case IP_VERSION(11, 0, 2):
2017	case IP_VERSION(11, 0, 3):
2018	case IP_VERSION(11, 0, 4):
2019	case IP_VERSION(11, 5, 0):
2020	case IP_VERSION(11, 5, 1):
 
2021		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2022		break;
 
 
 
 
2023	default:
2024		dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2025			amdgpu_ip_version(adev, GC_HWIP, 0));
2026		return -EINVAL;
2027	}
2028	return 0;
2029}
2030
2031static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2032{
2033	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2034	case IP_VERSION(4, 0, 0):
2035	case IP_VERSION(4, 0, 1):
2036	case IP_VERSION(4, 1, 0):
2037	case IP_VERSION(4, 1, 1):
2038	case IP_VERSION(4, 1, 2):
2039	case IP_VERSION(4, 2, 0):
2040	case IP_VERSION(4, 2, 2):
2041	case IP_VERSION(4, 4, 0):
2042		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2043		break;
2044	case IP_VERSION(4, 4, 2):
 
2045		amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2046		break;
2047	case IP_VERSION(5, 0, 0):
2048	case IP_VERSION(5, 0, 1):
2049	case IP_VERSION(5, 0, 2):
2050	case IP_VERSION(5, 0, 5):
2051		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2052		break;
2053	case IP_VERSION(5, 2, 0):
2054	case IP_VERSION(5, 2, 2):
2055	case IP_VERSION(5, 2, 4):
2056	case IP_VERSION(5, 2, 5):
2057	case IP_VERSION(5, 2, 6):
2058	case IP_VERSION(5, 2, 3):
2059	case IP_VERSION(5, 2, 1):
2060	case IP_VERSION(5, 2, 7):
2061		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2062		break;
2063	case IP_VERSION(6, 0, 0):
2064	case IP_VERSION(6, 0, 1):
2065	case IP_VERSION(6, 0, 2):
2066	case IP_VERSION(6, 0, 3):
2067	case IP_VERSION(6, 1, 0):
2068	case IP_VERSION(6, 1, 1):
 
2069		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2070		break;
 
 
 
 
2071	default:
2072		dev_err(adev->dev,
2073			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2074			amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2075		return -EINVAL;
2076	}
2077	return 0;
2078}
2079
2080static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2081{
2082	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2083		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2084		case IP_VERSION(7, 0, 0):
2085		case IP_VERSION(7, 2, 0):
2086			/* UVD is not supported on vega20 SR-IOV */
2087			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2088				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2089			break;
2090		default:
2091			dev_err(adev->dev,
2092				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2093				amdgpu_ip_version(adev, UVD_HWIP, 0));
2094			return -EINVAL;
2095		}
2096		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2097		case IP_VERSION(4, 0, 0):
2098		case IP_VERSION(4, 1, 0):
2099			/* VCE is not supported on vega20 SR-IOV */
2100			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2101				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2102			break;
2103		default:
2104			dev_err(adev->dev,
2105				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2106				amdgpu_ip_version(adev, VCE_HWIP, 0));
2107			return -EINVAL;
2108		}
2109	} else {
2110		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2111		case IP_VERSION(1, 0, 0):
2112		case IP_VERSION(1, 0, 1):
2113			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2114			break;
2115		case IP_VERSION(2, 0, 0):
2116		case IP_VERSION(2, 0, 2):
2117		case IP_VERSION(2, 2, 0):
2118			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2119			if (!amdgpu_sriov_vf(adev))
2120				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2121			break;
2122		case IP_VERSION(2, 0, 3):
2123			break;
2124		case IP_VERSION(2, 5, 0):
2125			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2126			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2127			break;
2128		case IP_VERSION(2, 6, 0):
2129			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2130			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2131			break;
2132		case IP_VERSION(3, 0, 0):
2133		case IP_VERSION(3, 0, 16):
2134		case IP_VERSION(3, 1, 1):
2135		case IP_VERSION(3, 1, 2):
2136		case IP_VERSION(3, 0, 2):
2137			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2138			if (!amdgpu_sriov_vf(adev))
2139				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2140			break;
2141		case IP_VERSION(3, 0, 33):
2142			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2143			break;
2144		case IP_VERSION(4, 0, 0):
2145		case IP_VERSION(4, 0, 2):
2146		case IP_VERSION(4, 0, 4):
2147			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2148			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2149			break;
2150		case IP_VERSION(4, 0, 3):
2151			amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2152			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2153			break;
2154		case IP_VERSION(4, 0, 5):
2155		case IP_VERSION(4, 0, 6):
2156			amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2157			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2158			break;
2159		case IP_VERSION(5, 0, 0):
2160			amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2161			amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2162			break;
2163		default:
2164			dev_err(adev->dev,
2165				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2166				amdgpu_ip_version(adev, UVD_HWIP, 0));
2167			return -EINVAL;
2168		}
2169	}
2170	return 0;
2171}
2172
2173static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2174{
2175	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2176	case IP_VERSION(10, 1, 10):
2177	case IP_VERSION(10, 1, 1):
2178	case IP_VERSION(10, 1, 2):
2179	case IP_VERSION(10, 1, 3):
2180	case IP_VERSION(10, 1, 4):
2181	case IP_VERSION(10, 3, 0):
2182	case IP_VERSION(10, 3, 1):
2183	case IP_VERSION(10, 3, 2):
2184	case IP_VERSION(10, 3, 3):
2185	case IP_VERSION(10, 3, 4):
2186	case IP_VERSION(10, 3, 5):
2187	case IP_VERSION(10, 3, 6):
2188		if (amdgpu_mes) {
2189			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
2190			adev->enable_mes = true;
2191			if (amdgpu_mes_kiq)
2192				adev->enable_mes_kiq = true;
2193		}
2194		break;
2195	case IP_VERSION(11, 0, 0):
2196	case IP_VERSION(11, 0, 1):
2197	case IP_VERSION(11, 0, 2):
2198	case IP_VERSION(11, 0, 3):
2199	case IP_VERSION(11, 0, 4):
2200	case IP_VERSION(11, 5, 0):
2201	case IP_VERSION(11, 5, 1):
 
2202		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2203		adev->enable_mes = true;
2204		adev->enable_mes_kiq = true;
2205		break;
 
 
 
 
 
 
 
 
2206	default:
2207		break;
2208	}
2209	return 0;
2210}
2211
2212static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2213{
2214	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2215	case IP_VERSION(9, 4, 3):
 
2216		aqua_vanjaram_init_soc_config(adev);
2217		break;
2218	default:
2219		break;
2220	}
2221}
2222
2223static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2224{
2225	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2226	case IP_VERSION(6, 1, 0):
2227	case IP_VERSION(6, 1, 1):
 
2228		amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2229		break;
2230	default:
2231		break;
2232	}
2233
2234	return 0;
2235}
2236
2237static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2238{
2239	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2240	case IP_VERSION(4, 0, 5):
2241	case IP_VERSION(4, 0, 6):
2242		if (amdgpu_umsch_mm & 0x1) {
2243			amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2244			adev->enable_umsch_mm = true;
2245		}
2246		break;
2247	default:
2248		break;
2249	}
2250
2251	return 0;
2252}
2253
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2254int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2255{
2256	int r;
2257
2258	switch (adev->asic_type) {
2259	case CHIP_VEGA10:
2260		vega10_reg_base_init(adev);
2261		adev->sdma.num_instances = 2;
2262		adev->gmc.num_umc = 4;
2263		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2264		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2265		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2266		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2267		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2268		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2269		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2270		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2271		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2272		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2273		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2274		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2275		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2276		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2277		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2278		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2279		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2280		break;
2281	case CHIP_VEGA12:
2282		vega10_reg_base_init(adev);
2283		adev->sdma.num_instances = 2;
2284		adev->gmc.num_umc = 4;
2285		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2286		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2287		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2288		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2289		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2290		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2291		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2292		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2293		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2294		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2295		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2296		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2297		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2298		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2299		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2300		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2301		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2302		break;
2303	case CHIP_RAVEN:
2304		vega10_reg_base_init(adev);
2305		adev->sdma.num_instances = 1;
2306		adev->vcn.num_vcn_inst = 1;
2307		adev->gmc.num_umc = 2;
2308		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2309			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2310			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2311			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2312			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2313			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2314			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2315			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2316			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2317			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2318			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2319			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2320			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2321			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2322			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2323			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
 
2324		} else {
2325			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2326			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2327			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2328			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2329			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2330			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2331			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2332			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2333			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2334			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2335			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2336			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2337			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2338			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2339			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
 
2340		}
2341		break;
2342	case CHIP_VEGA20:
2343		vega20_reg_base_init(adev);
2344		adev->sdma.num_instances = 2;
2345		adev->gmc.num_umc = 8;
2346		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2347		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2348		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2349		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2350		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2351		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2352		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2353		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2354		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2355		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2356		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2357		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2358		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2359		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2360		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2361		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2362		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2363		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2364		break;
2365	case CHIP_ARCTURUS:
2366		arct_reg_base_init(adev);
2367		adev->sdma.num_instances = 8;
2368		adev->vcn.num_vcn_inst = 2;
2369		adev->gmc.num_umc = 8;
2370		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2371		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2372		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2373		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2374		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2375		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2376		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2377		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2378		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2379		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2380		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2381		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2382		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2383		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2384		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2385		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2386		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2387		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2388		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2389		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2390		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2391		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2392		break;
2393	case CHIP_ALDEBARAN:
2394		aldebaran_reg_base_init(adev);
2395		adev->sdma.num_instances = 5;
2396		adev->vcn.num_vcn_inst = 2;
2397		adev->gmc.num_umc = 4;
2398		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2399		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2400		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2401		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2402		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2403		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2404		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2405		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2406		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2407		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2408		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2409		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2410		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2411		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2412		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2413		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2414		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2415		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2416		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2417		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2418		break;
2419	default:
2420		r = amdgpu_discovery_reg_base_init(adev);
2421		if (r)
2422			return -EINVAL;
2423
2424		amdgpu_discovery_harvest_ip(adev);
2425		amdgpu_discovery_get_gfx_info(adev);
2426		amdgpu_discovery_get_mall_info(adev);
2427		amdgpu_discovery_get_vcn_info(adev);
2428		break;
2429	}
2430
2431	amdgpu_discovery_init_soc_config(adev);
2432	amdgpu_discovery_sysfs_init(adev);
2433
2434	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2435	case IP_VERSION(9, 0, 1):
2436	case IP_VERSION(9, 2, 1):
2437	case IP_VERSION(9, 4, 0):
2438	case IP_VERSION(9, 4, 1):
2439	case IP_VERSION(9, 4, 2):
2440	case IP_VERSION(9, 4, 3):
 
2441		adev->family = AMDGPU_FAMILY_AI;
2442		break;
2443	case IP_VERSION(9, 1, 0):
2444	case IP_VERSION(9, 2, 2):
2445	case IP_VERSION(9, 3, 0):
2446		adev->family = AMDGPU_FAMILY_RV;
2447		break;
2448	case IP_VERSION(10, 1, 10):
2449	case IP_VERSION(10, 1, 1):
2450	case IP_VERSION(10, 1, 2):
2451	case IP_VERSION(10, 1, 3):
2452	case IP_VERSION(10, 1, 4):
2453	case IP_VERSION(10, 3, 0):
2454	case IP_VERSION(10, 3, 2):
2455	case IP_VERSION(10, 3, 4):
2456	case IP_VERSION(10, 3, 5):
2457		adev->family = AMDGPU_FAMILY_NV;
2458		break;
2459	case IP_VERSION(10, 3, 1):
2460		adev->family = AMDGPU_FAMILY_VGH;
2461		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2462		break;
2463	case IP_VERSION(10, 3, 3):
2464		adev->family = AMDGPU_FAMILY_YC;
2465		break;
2466	case IP_VERSION(10, 3, 6):
2467		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2468		break;
2469	case IP_VERSION(10, 3, 7):
2470		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2471		break;
2472	case IP_VERSION(11, 0, 0):
2473	case IP_VERSION(11, 0, 2):
2474	case IP_VERSION(11, 0, 3):
2475		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2476		break;
2477	case IP_VERSION(11, 0, 1):
2478	case IP_VERSION(11, 0, 4):
2479		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2480		break;
2481	case IP_VERSION(11, 5, 0):
2482	case IP_VERSION(11, 5, 1):
 
2483		adev->family = AMDGPU_FAMILY_GC_11_5_0;
2484		break;
 
 
 
 
2485	default:
2486		return -EINVAL;
2487	}
2488
2489	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2490	case IP_VERSION(9, 1, 0):
2491	case IP_VERSION(9, 2, 2):
2492	case IP_VERSION(9, 3, 0):
2493	case IP_VERSION(10, 1, 3):
2494	case IP_VERSION(10, 1, 4):
2495	case IP_VERSION(10, 3, 1):
2496	case IP_VERSION(10, 3, 3):
2497	case IP_VERSION(10, 3, 6):
2498	case IP_VERSION(10, 3, 7):
2499	case IP_VERSION(11, 0, 1):
2500	case IP_VERSION(11, 0, 4):
2501	case IP_VERSION(11, 5, 0):
2502	case IP_VERSION(11, 5, 1):
 
2503		adev->flags |= AMD_IS_APU;
2504		break;
2505	default:
2506		break;
2507	}
2508
2509	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
2510		adev->gmc.xgmi.supported = true;
2511
2512	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
 
2513		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
2514
2515	/* set NBIO version */
2516	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2517	case IP_VERSION(6, 1, 0):
2518	case IP_VERSION(6, 2, 0):
2519		adev->nbio.funcs = &nbio_v6_1_funcs;
2520		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2521		break;
2522	case IP_VERSION(7, 0, 0):
2523	case IP_VERSION(7, 0, 1):
2524	case IP_VERSION(2, 5, 0):
2525		adev->nbio.funcs = &nbio_v7_0_funcs;
2526		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2527		break;
2528	case IP_VERSION(7, 4, 0):
2529	case IP_VERSION(7, 4, 1):
2530	case IP_VERSION(7, 4, 4):
2531		adev->nbio.funcs = &nbio_v7_4_funcs;
2532		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2533		break;
2534	case IP_VERSION(7, 9, 0):
2535		adev->nbio.funcs = &nbio_v7_9_funcs;
2536		adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2537		break;
2538	case IP_VERSION(7, 11, 0):
2539	case IP_VERSION(7, 11, 1):
 
2540		adev->nbio.funcs = &nbio_v7_11_funcs;
2541		adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2542		break;
2543	case IP_VERSION(7, 2, 0):
2544	case IP_VERSION(7, 2, 1):
2545	case IP_VERSION(7, 3, 0):
2546	case IP_VERSION(7, 5, 0):
2547	case IP_VERSION(7, 5, 1):
2548		adev->nbio.funcs = &nbio_v7_2_funcs;
2549		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2550		break;
2551	case IP_VERSION(2, 1, 1):
2552	case IP_VERSION(2, 3, 0):
2553	case IP_VERSION(2, 3, 1):
2554	case IP_VERSION(2, 3, 2):
2555	case IP_VERSION(3, 3, 0):
2556	case IP_VERSION(3, 3, 1):
2557	case IP_VERSION(3, 3, 2):
2558	case IP_VERSION(3, 3, 3):
2559		adev->nbio.funcs = &nbio_v2_3_funcs;
2560		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2561		break;
2562	case IP_VERSION(4, 3, 0):
2563	case IP_VERSION(4, 3, 1):
2564		if (amdgpu_sriov_vf(adev))
2565			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2566		else
2567			adev->nbio.funcs = &nbio_v4_3_funcs;
2568		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2569		break;
2570	case IP_VERSION(7, 7, 0):
2571	case IP_VERSION(7, 7, 1):
2572		adev->nbio.funcs = &nbio_v7_7_funcs;
2573		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2574		break;
2575	case IP_VERSION(6, 3, 1):
2576		adev->nbio.funcs = &nbif_v6_3_1_funcs;
2577		adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
2578		break;
2579	default:
2580		break;
2581	}
2582
2583	switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2584	case IP_VERSION(4, 0, 0):
2585	case IP_VERSION(4, 0, 1):
2586	case IP_VERSION(4, 1, 0):
2587	case IP_VERSION(4, 1, 1):
2588	case IP_VERSION(4, 1, 2):
2589	case IP_VERSION(4, 2, 0):
2590	case IP_VERSION(4, 2, 1):
2591	case IP_VERSION(4, 4, 0):
2592	case IP_VERSION(4, 4, 2):
 
2593		adev->hdp.funcs = &hdp_v4_0_funcs;
2594		break;
2595	case IP_VERSION(5, 0, 0):
2596	case IP_VERSION(5, 0, 1):
2597	case IP_VERSION(5, 0, 2):
2598	case IP_VERSION(5, 0, 3):
2599	case IP_VERSION(5, 0, 4):
2600	case IP_VERSION(5, 2, 0):
2601		adev->hdp.funcs = &hdp_v5_0_funcs;
2602		break;
2603	case IP_VERSION(5, 2, 1):
2604		adev->hdp.funcs = &hdp_v5_2_funcs;
2605		break;
2606	case IP_VERSION(6, 0, 0):
2607	case IP_VERSION(6, 0, 1):
2608	case IP_VERSION(6, 1, 0):
2609		adev->hdp.funcs = &hdp_v6_0_funcs;
2610		break;
2611	case IP_VERSION(7, 0, 0):
2612		adev->hdp.funcs = &hdp_v7_0_funcs;
2613		break;
2614	default:
2615		break;
2616	}
2617
2618	switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2619	case IP_VERSION(3, 6, 0):
2620	case IP_VERSION(3, 6, 1):
2621	case IP_VERSION(3, 6, 2):
2622		adev->df.funcs = &df_v3_6_funcs;
2623		break;
2624	case IP_VERSION(2, 1, 0):
2625	case IP_VERSION(2, 1, 1):
2626	case IP_VERSION(2, 5, 0):
2627	case IP_VERSION(3, 5, 1):
2628	case IP_VERSION(3, 5, 2):
2629		adev->df.funcs = &df_v1_7_funcs;
2630		break;
2631	case IP_VERSION(4, 3, 0):
2632		adev->df.funcs = &df_v4_3_funcs;
2633		break;
2634	case IP_VERSION(4, 6, 2):
2635		adev->df.funcs = &df_v4_6_2_funcs;
2636		break;
 
 
 
 
2637	default:
2638		break;
2639	}
2640
2641	switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2642	case IP_VERSION(9, 0, 0):
2643	case IP_VERSION(9, 0, 1):
2644	case IP_VERSION(10, 0, 0):
2645	case IP_VERSION(10, 0, 1):
2646	case IP_VERSION(10, 0, 2):
2647		adev->smuio.funcs = &smuio_v9_0_funcs;
2648		break;
2649	case IP_VERSION(11, 0, 0):
2650	case IP_VERSION(11, 0, 2):
2651	case IP_VERSION(11, 0, 3):
2652	case IP_VERSION(11, 0, 4):
2653	case IP_VERSION(11, 0, 7):
2654	case IP_VERSION(11, 0, 8):
2655		adev->smuio.funcs = &smuio_v11_0_funcs;
2656		break;
2657	case IP_VERSION(11, 0, 6):
2658	case IP_VERSION(11, 0, 10):
2659	case IP_VERSION(11, 0, 11):
2660	case IP_VERSION(11, 5, 0):
2661	case IP_VERSION(13, 0, 1):
2662	case IP_VERSION(13, 0, 9):
2663	case IP_VERSION(13, 0, 10):
2664		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2665		break;
2666	case IP_VERSION(13, 0, 2):
2667		adev->smuio.funcs = &smuio_v13_0_funcs;
2668		break;
2669	case IP_VERSION(13, 0, 3):
2670		adev->smuio.funcs = &smuio_v13_0_3_funcs;
2671		if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2672			adev->flags |= AMD_IS_APU;
2673		}
2674		break;
2675	case IP_VERSION(13, 0, 6):
2676	case IP_VERSION(13, 0, 8):
2677	case IP_VERSION(14, 0, 0):
2678	case IP_VERSION(14, 0, 1):
2679		adev->smuio.funcs = &smuio_v13_0_6_funcs;
2680		break;
 
 
 
2681	default:
2682		break;
2683	}
2684
2685	switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
2686	case IP_VERSION(6, 0, 0):
2687	case IP_VERSION(6, 0, 1):
2688	case IP_VERSION(6, 0, 2):
2689	case IP_VERSION(6, 0, 3):
2690		adev->lsdma.funcs = &lsdma_v6_0_funcs;
2691		break;
2692	case IP_VERSION(7, 0, 0):
2693	case IP_VERSION(7, 0, 1):
2694		adev->lsdma.funcs = &lsdma_v7_0_funcs;
2695		break;
2696	default:
2697		break;
2698	}
2699
2700	r = amdgpu_discovery_set_common_ip_blocks(adev);
2701	if (r)
2702		return r;
2703
2704	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2705	if (r)
2706		return r;
2707
2708	/* For SR-IOV, PSP needs to be initialized before IH */
2709	if (amdgpu_sriov_vf(adev)) {
2710		r = amdgpu_discovery_set_psp_ip_blocks(adev);
2711		if (r)
2712			return r;
2713		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2714		if (r)
2715			return r;
2716	} else {
2717		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2718		if (r)
2719			return r;
2720
2721		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2722			r = amdgpu_discovery_set_psp_ip_blocks(adev);
2723			if (r)
2724				return r;
2725		}
2726	}
2727
2728	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2729		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2730		if (r)
2731			return r;
2732	}
2733
2734	r = amdgpu_discovery_set_display_ip_blocks(adev);
2735	if (r)
2736		return r;
2737
2738	r = amdgpu_discovery_set_gc_ip_blocks(adev);
2739	if (r)
2740		return r;
2741
2742	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2743	if (r)
2744		return r;
2745
2746	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2747	     !amdgpu_sriov_vf(adev)) ||
2748	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2749		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2750		if (r)
2751			return r;
2752	}
2753
2754	r = amdgpu_discovery_set_mm_ip_blocks(adev);
2755	if (r)
2756		return r;
2757
2758	r = amdgpu_discovery_set_mes_ip_blocks(adev);
2759	if (r)
2760		return r;
2761
2762	r = amdgpu_discovery_set_vpe_ip_blocks(adev);
2763	if (r)
2764		return r;
2765
2766	r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
2767	if (r)
2768		return r;
2769
 
 
 
2770	return 0;
2771}
2772
v6.13.7
   1/*
   2 * Copyright 2018 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25
  26#include "amdgpu.h"
  27#include "amdgpu_discovery.h"
  28#include "soc15_hw_ip.h"
  29#include "discovery.h"
  30#include "amdgpu_ras.h"
  31
  32#include "soc15.h"
  33#include "gfx_v9_0.h"
  34#include "gfx_v9_4_3.h"
  35#include "gmc_v9_0.h"
  36#include "df_v1_7.h"
  37#include "df_v3_6.h"
  38#include "df_v4_3.h"
  39#include "df_v4_6_2.h"
  40#include "df_v4_15.h"
  41#include "nbio_v6_1.h"
  42#include "nbio_v7_0.h"
  43#include "nbio_v7_4.h"
  44#include "nbio_v7_9.h"
  45#include "nbio_v7_11.h"
  46#include "hdp_v4_0.h"
  47#include "vega10_ih.h"
  48#include "vega20_ih.h"
  49#include "sdma_v4_0.h"
  50#include "sdma_v4_4_2.h"
  51#include "uvd_v7_0.h"
  52#include "vce_v4_0.h"
  53#include "vcn_v1_0.h"
  54#include "vcn_v2_5.h"
  55#include "jpeg_v2_5.h"
  56#include "smuio_v9_0.h"
  57#include "gmc_v10_0.h"
  58#include "gmc_v11_0.h"
  59#include "gmc_v12_0.h"
  60#include "gfxhub_v2_0.h"
  61#include "mmhub_v2_0.h"
  62#include "nbio_v2_3.h"
  63#include "nbio_v4_3.h"
  64#include "nbio_v7_2.h"
  65#include "nbio_v7_7.h"
  66#include "nbif_v6_3_1.h"
  67#include "hdp_v5_0.h"
  68#include "hdp_v5_2.h"
  69#include "hdp_v6_0.h"
  70#include "hdp_v7_0.h"
  71#include "nv.h"
  72#include "soc21.h"
  73#include "soc24.h"
  74#include "navi10_ih.h"
  75#include "ih_v6_0.h"
  76#include "ih_v6_1.h"
  77#include "ih_v7_0.h"
  78#include "gfx_v10_0.h"
  79#include "gfx_v11_0.h"
  80#include "gfx_v12_0.h"
  81#include "sdma_v5_0.h"
  82#include "sdma_v5_2.h"
  83#include "sdma_v6_0.h"
  84#include "sdma_v7_0.h"
  85#include "lsdma_v6_0.h"
  86#include "lsdma_v7_0.h"
  87#include "vcn_v2_0.h"
  88#include "jpeg_v2_0.h"
  89#include "vcn_v3_0.h"
  90#include "jpeg_v3_0.h"
  91#include "vcn_v4_0.h"
  92#include "jpeg_v4_0.h"
  93#include "vcn_v4_0_3.h"
  94#include "jpeg_v4_0_3.h"
  95#include "vcn_v4_0_5.h"
  96#include "jpeg_v4_0_5.h"
  97#include "amdgpu_vkms.h"
 
  98#include "mes_v11_0.h"
  99#include "mes_v12_0.h"
 100#include "smuio_v11_0.h"
 101#include "smuio_v11_0_6.h"
 102#include "smuio_v13_0.h"
 103#include "smuio_v13_0_3.h"
 104#include "smuio_v13_0_6.h"
 105#include "smuio_v14_0_2.h"
 106#include "vcn_v5_0_0.h"
 107#include "jpeg_v5_0_0.h"
 108
 109#include "amdgpu_vpe.h"
 110#if defined(CONFIG_DRM_AMD_ISP)
 111#include "amdgpu_isp.h"
 112#endif
 113
 114#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
 115MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
 116
 117#define mmIP_DISCOVERY_VERSION  0x16A00
 118#define mmRCC_CONFIG_MEMSIZE	0xde3
 119#define mmMP0_SMN_C2PMSG_33	0x16061
 120#define mmMM_INDEX		0x0
 121#define mmMM_INDEX_HI		0x6
 122#define mmMM_DATA		0x1
 123
 124static const char *hw_id_names[HW_ID_MAX] = {
 125	[MP1_HWID]		= "MP1",
 126	[MP2_HWID]		= "MP2",
 127	[THM_HWID]		= "THM",
 128	[SMUIO_HWID]		= "SMUIO",
 129	[FUSE_HWID]		= "FUSE",
 130	[CLKA_HWID]		= "CLKA",
 131	[PWR_HWID]		= "PWR",
 132	[GC_HWID]		= "GC",
 133	[UVD_HWID]		= "UVD",
 134	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
 135	[ACP_HWID]		= "ACP",
 136	[DCI_HWID]		= "DCI",
 137	[DMU_HWID]		= "DMU",
 138	[DCO_HWID]		= "DCO",
 139	[DIO_HWID]		= "DIO",
 140	[XDMA_HWID]		= "XDMA",
 141	[DCEAZ_HWID]		= "DCEAZ",
 142	[DAZ_HWID]		= "DAZ",
 143	[SDPMUX_HWID]		= "SDPMUX",
 144	[NTB_HWID]		= "NTB",
 145	[IOHC_HWID]		= "IOHC",
 146	[L2IMU_HWID]		= "L2IMU",
 147	[VCE_HWID]		= "VCE",
 148	[MMHUB_HWID]		= "MMHUB",
 149	[ATHUB_HWID]		= "ATHUB",
 150	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
 151	[DFX_HWID]		= "DFX",
 152	[DBGU0_HWID]		= "DBGU0",
 153	[DBGU1_HWID]		= "DBGU1",
 154	[OSSSYS_HWID]		= "OSSSYS",
 155	[HDP_HWID]		= "HDP",
 156	[SDMA0_HWID]		= "SDMA0",
 157	[SDMA1_HWID]		= "SDMA1",
 158	[SDMA2_HWID]		= "SDMA2",
 159	[SDMA3_HWID]		= "SDMA3",
 160	[LSDMA_HWID]		= "LSDMA",
 161	[ISP_HWID]		= "ISP",
 162	[DBGU_IO_HWID]		= "DBGU_IO",
 163	[DF_HWID]		= "DF",
 164	[CLKB_HWID]		= "CLKB",
 165	[FCH_HWID]		= "FCH",
 166	[DFX_DAP_HWID]		= "DFX_DAP",
 167	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
 168	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
 169	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
 170	[L1IMU3_HWID]		= "L1IMU3",
 171	[L1IMU4_HWID]		= "L1IMU4",
 172	[L1IMU5_HWID]		= "L1IMU5",
 173	[L1IMU6_HWID]		= "L1IMU6",
 174	[L1IMU7_HWID]		= "L1IMU7",
 175	[L1IMU8_HWID]		= "L1IMU8",
 176	[L1IMU9_HWID]		= "L1IMU9",
 177	[L1IMU10_HWID]		= "L1IMU10",
 178	[L1IMU11_HWID]		= "L1IMU11",
 179	[L1IMU12_HWID]		= "L1IMU12",
 180	[L1IMU13_HWID]		= "L1IMU13",
 181	[L1IMU14_HWID]		= "L1IMU14",
 182	[L1IMU15_HWID]		= "L1IMU15",
 183	[WAFLC_HWID]		= "WAFLC",
 184	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
 185	[PCIE_HWID]		= "PCIE",
 186	[PCS_HWID]		= "PCS",
 187	[DDCL_HWID]		= "DDCL",
 188	[SST_HWID]		= "SST",
 189	[IOAGR_HWID]		= "IOAGR",
 190	[NBIF_HWID]		= "NBIF",
 191	[IOAPIC_HWID]		= "IOAPIC",
 192	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
 193	[NTBCCP_HWID]		= "NTBCCP",
 194	[UMC_HWID]		= "UMC",
 195	[SATA_HWID]		= "SATA",
 196	[USB_HWID]		= "USB",
 197	[CCXSEC_HWID]		= "CCXSEC",
 198	[XGMI_HWID]		= "XGMI",
 199	[XGBE_HWID]		= "XGBE",
 200	[MP0_HWID]		= "MP0",
 201	[VPE_HWID]		= "VPE",
 202};
 203
 204static int hw_id_map[MAX_HWIP] = {
 205	[GC_HWIP]	= GC_HWID,
 206	[HDP_HWIP]	= HDP_HWID,
 207	[SDMA0_HWIP]	= SDMA0_HWID,
 208	[SDMA1_HWIP]	= SDMA1_HWID,
 209	[SDMA2_HWIP]    = SDMA2_HWID,
 210	[SDMA3_HWIP]    = SDMA3_HWID,
 211	[LSDMA_HWIP]    = LSDMA_HWID,
 212	[MMHUB_HWIP]	= MMHUB_HWID,
 213	[ATHUB_HWIP]	= ATHUB_HWID,
 214	[NBIO_HWIP]	= NBIF_HWID,
 215	[MP0_HWIP]	= MP0_HWID,
 216	[MP1_HWIP]	= MP1_HWID,
 217	[UVD_HWIP]	= UVD_HWID,
 218	[VCE_HWIP]	= VCE_HWID,
 219	[DF_HWIP]	= DF_HWID,
 220	[DCE_HWIP]	= DMU_HWID,
 221	[OSSSYS_HWIP]	= OSSSYS_HWID,
 222	[SMUIO_HWIP]	= SMUIO_HWID,
 223	[PWR_HWIP]	= PWR_HWID,
 224	[NBIF_HWIP]	= NBIF_HWID,
 225	[THM_HWIP]	= THM_HWID,
 226	[CLK_HWIP]	= CLKA_HWID,
 227	[UMC_HWIP]	= UMC_HWID,
 228	[XGMI_HWIP]	= XGMI_HWID,
 229	[DCI_HWIP]	= DCI_HWID,
 230	[PCIE_HWIP]	= PCIE_HWID,
 231	[VPE_HWIP]	= VPE_HWID,
 232	[ISP_HWIP]	= ISP_HWID,
 233};
 234
 235static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
 236{
 237	u64 tmr_offset, tmr_size, pos;
 238	void *discv_regn;
 239	int ret;
 240
 241	ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
 242	if (ret)
 243		return ret;
 244
 245	pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
 246
 247	/* This region is read-only and reserved from system use */
 248	discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
 249	if (discv_regn) {
 250		memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
 251		memunmap(discv_regn);
 252		return 0;
 253	}
 254
 255	return -ENOENT;
 256}
 257
 258#define IP_DISCOVERY_V2		2
 259#define IP_DISCOVERY_V4		4
 260
 261static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
 262						 uint8_t *binary)
 263{
 264	uint64_t vram_size;
 265	u32 msg;
 266	int i, ret = 0;
 267
 268	if (!amdgpu_sriov_vf(adev)) {
 269		/* It can take up to a second for IFWI init to complete on some dGPUs,
 270		 * but generally it should be in the 60-100ms range.  Normally this starts
 271		 * as soon as the device gets power so by the time the OS loads this has long
 272		 * completed.  However, when a card is hotplugged via e.g., USB4, we need to
 273		 * wait for this to complete.  Once the C2PMSG is updated, we can
 274		 * continue.
 275		 */
 276
 277		for (i = 0; i < 1000; i++) {
 278			msg = RREG32(mmMP0_SMN_C2PMSG_33);
 279			if (msg & 0x80000000)
 280				break;
 281			msleep(1);
 282		}
 283	}
 284
 285	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
 286
 287	if (vram_size) {
 288		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
 289		amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
 290					  adev->mman.discovery_tmr_size, false);
 291	} else {
 292		ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
 293	}
 294
 295	return ret;
 296}
 297
 298static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
 299{
 300	const struct firmware *fw;
 301	const char *fw_name;
 302	int r;
 303
 304	switch (amdgpu_discovery) {
 305	case 2:
 306		fw_name = FIRMWARE_IP_DISCOVERY;
 307		break;
 308	default:
 309		dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
 310		return -EINVAL;
 311	}
 312
 313	r = request_firmware(&fw, fw_name, adev->dev);
 314	if (r) {
 315		dev_err(adev->dev, "can't load firmware \"%s\"\n",
 316			fw_name);
 317		return r;
 318	}
 319
 320	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
 321	release_firmware(fw);
 322
 323	return 0;
 324}
 325
 326static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
 327{
 328	uint16_t checksum = 0;
 329	int i;
 330
 331	for (i = 0; i < size; i++)
 332		checksum += data[i];
 333
 334	return checksum;
 335}
 336
 337static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
 338						    uint16_t expected)
 339{
 340	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
 341}
 342
 343static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
 344{
 345	struct binary_header *bhdr;
 346	bhdr = (struct binary_header *)binary;
 347
 348	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
 349}
 350
 351static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
 352{
 353	/*
 354	 * So far, apply this quirk only on those Navy Flounder boards which
 355	 * have a bad harvest table of VCN config.
 356	 */
 357	if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
 358	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
 359		switch (adev->pdev->revision) {
 360		case 0xC1:
 361		case 0xC2:
 362		case 0xC3:
 363		case 0xC5:
 364		case 0xC7:
 365		case 0xCF:
 366		case 0xDF:
 367			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
 368			adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
 369			break;
 370		default:
 371			break;
 372		}
 373	}
 374}
 375
 376static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
 377					   struct binary_header *bhdr)
 378{
 379	struct table_info *info;
 380	uint16_t checksum;
 381	uint16_t offset;
 382
 383	info = &bhdr->table_list[NPS_INFO];
 384	offset = le16_to_cpu(info->offset);
 385	checksum = le16_to_cpu(info->checksum);
 386
 387	struct nps_info_header *nhdr =
 388		(struct nps_info_header *)(adev->mman.discovery_bin + offset);
 389
 390	if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
 391		dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
 392		return -EINVAL;
 393	}
 394
 395	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
 396					      le32_to_cpu(nhdr->size_bytes),
 397					      checksum)) {
 398		dev_dbg(adev->dev, "invalid nps info data table checksum\n");
 399		return -EINVAL;
 400	}
 401
 402	return 0;
 403}
 404
 405static int amdgpu_discovery_init(struct amdgpu_device *adev)
 406{
 407	struct table_info *info;
 408	struct binary_header *bhdr;
 409	uint16_t offset;
 410	uint16_t size;
 411	uint16_t checksum;
 412	int r;
 413
 414	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
 415	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
 416	if (!adev->mman.discovery_bin)
 417		return -ENOMEM;
 418
 419	/* Read from file if it is the preferred option */
 420	if (amdgpu_discovery == 2) {
 421		dev_info(adev->dev, "use ip discovery information from file");
 422		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
 423
 424		if (r) {
 425			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
 426			r = -EINVAL;
 427			goto out;
 428		}
 429
 430	} else {
 431		r = amdgpu_discovery_read_binary_from_mem(
 432			adev, adev->mman.discovery_bin);
 433		if (r)
 434			goto out;
 435	}
 436
 437	/* check the ip discovery binary signature */
 438	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
 439		dev_err(adev->dev,
 440			"get invalid ip discovery binary signature\n");
 441		r = -EINVAL;
 442		goto out;
 443	}
 444
 445	bhdr = (struct binary_header *)adev->mman.discovery_bin;
 446
 447	offset = offsetof(struct binary_header, binary_checksum) +
 448		sizeof(bhdr->binary_checksum);
 449	size = le16_to_cpu(bhdr->binary_size) - offset;
 450	checksum = le16_to_cpu(bhdr->binary_checksum);
 451
 452	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
 453					      size, checksum)) {
 454		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
 455		r = -EINVAL;
 456		goto out;
 457	}
 458
 459	info = &bhdr->table_list[IP_DISCOVERY];
 460	offset = le16_to_cpu(info->offset);
 461	checksum = le16_to_cpu(info->checksum);
 462
 463	if (offset) {
 464		struct ip_discovery_header *ihdr =
 465			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
 466		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
 467			dev_err(adev->dev, "invalid ip discovery data table signature\n");
 468			r = -EINVAL;
 469			goto out;
 470		}
 471
 472		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
 473						      le16_to_cpu(ihdr->size), checksum)) {
 474			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
 475			r = -EINVAL;
 476			goto out;
 477		}
 478	}
 479
 480	info = &bhdr->table_list[GC];
 481	offset = le16_to_cpu(info->offset);
 482	checksum = le16_to_cpu(info->checksum);
 483
 484	if (offset) {
 485		struct gpu_info_header *ghdr =
 486			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
 487
 488		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
 489			dev_err(adev->dev, "invalid ip discovery gc table id\n");
 490			r = -EINVAL;
 491			goto out;
 492		}
 493
 494		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
 495						      le32_to_cpu(ghdr->size), checksum)) {
 496			dev_err(adev->dev, "invalid gc data table checksum\n");
 497			r = -EINVAL;
 498			goto out;
 499		}
 500	}
 501
 502	info = &bhdr->table_list[HARVEST_INFO];
 503	offset = le16_to_cpu(info->offset);
 504	checksum = le16_to_cpu(info->checksum);
 505
 506	if (offset) {
 507		struct harvest_info_header *hhdr =
 508			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
 509
 510		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
 511			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
 512			r = -EINVAL;
 513			goto out;
 514		}
 515
 516		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
 517						      sizeof(struct harvest_table), checksum)) {
 518			dev_err(adev->dev, "invalid harvest data table checksum\n");
 519			r = -EINVAL;
 520			goto out;
 521		}
 522	}
 523
 524	info = &bhdr->table_list[VCN_INFO];
 525	offset = le16_to_cpu(info->offset);
 526	checksum = le16_to_cpu(info->checksum);
 527
 528	if (offset) {
 529		struct vcn_info_header *vhdr =
 530			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
 531
 532		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
 533			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
 534			r = -EINVAL;
 535			goto out;
 536		}
 537
 538		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
 539						      le32_to_cpu(vhdr->size_bytes), checksum)) {
 540			dev_err(adev->dev, "invalid vcn data table checksum\n");
 541			r = -EINVAL;
 542			goto out;
 543		}
 544	}
 545
 546	info = &bhdr->table_list[MALL_INFO];
 547	offset = le16_to_cpu(info->offset);
 548	checksum = le16_to_cpu(info->checksum);
 549
 550	if (0 && offset) {
 551		struct mall_info_header *mhdr =
 552			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
 553
 554		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
 555			dev_err(adev->dev, "invalid ip discovery mall table id\n");
 556			r = -EINVAL;
 557			goto out;
 558		}
 559
 560		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
 561						      le32_to_cpu(mhdr->size_bytes), checksum)) {
 562			dev_err(adev->dev, "invalid mall data table checksum\n");
 563			r = -EINVAL;
 564			goto out;
 565		}
 566	}
 567
 568	return 0;
 569
 570out:
 571	kfree(adev->mman.discovery_bin);
 572	adev->mman.discovery_bin = NULL;
 573	if ((amdgpu_discovery != 2) &&
 574	    (RREG32(mmIP_DISCOVERY_VERSION) == 4))
 575		amdgpu_ras_query_boot_status(adev, 4);
 576	return r;
 577}
 578
 579static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
 580
 581void amdgpu_discovery_fini(struct amdgpu_device *adev)
 582{
 583	amdgpu_discovery_sysfs_fini(adev);
 584	kfree(adev->mman.discovery_bin);
 585	adev->mman.discovery_bin = NULL;
 586}
 587
 588static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
 589{
 590	if (ip->instance_number >= HWIP_MAX_INSTANCE) {
 591		DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
 592			  ip->instance_number);
 593		return -EINVAL;
 594	}
 595	if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
 596		DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
 597			  le16_to_cpu(ip->hw_id));
 598		return -EINVAL;
 599	}
 600
 601	return 0;
 602}
 603
 604static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
 605						uint32_t *vcn_harvest_count)
 606{
 607	struct binary_header *bhdr;
 608	struct ip_discovery_header *ihdr;
 609	struct die_header *dhdr;
 610	struct ip_v4 *ip;
 611	uint16_t die_offset, ip_offset, num_dies, num_ips;
 612	int i, j;
 613
 614	bhdr = (struct binary_header *)adev->mman.discovery_bin;
 615	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
 616			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
 617	num_dies = le16_to_cpu(ihdr->num_dies);
 618
 619	/* scan harvest bit of all IP data structures */
 620	for (i = 0; i < num_dies; i++) {
 621		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
 622		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
 623		num_ips = le16_to_cpu(dhdr->num_ips);
 624		ip_offset = die_offset + sizeof(*dhdr);
 625
 626		for (j = 0; j < num_ips; j++) {
 627			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
 628
 629			if (amdgpu_discovery_validate_ip(ip))
 630				goto next_ip;
 631
 632			if (le16_to_cpu(ip->variant) == 1) {
 633				switch (le16_to_cpu(ip->hw_id)) {
 634				case VCN_HWID:
 635					(*vcn_harvest_count)++;
 636					if (ip->instance_number == 0) {
 637						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
 638						adev->vcn.inst_mask &=
 639							~AMDGPU_VCN_HARVEST_VCN0;
 640						adev->jpeg.inst_mask &=
 641							~AMDGPU_VCN_HARVEST_VCN0;
 642					} else {
 643						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
 644						adev->vcn.inst_mask &=
 645							~AMDGPU_VCN_HARVEST_VCN1;
 646						adev->jpeg.inst_mask &=
 647							~AMDGPU_VCN_HARVEST_VCN1;
 648					}
 649					break;
 650				case DMU_HWID:
 651					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
 652					break;
 653				default:
 654					break;
 655				}
 656			}
 657next_ip:
 658			if (ihdr->base_addr_64_bit)
 659				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
 660			else
 661				ip_offset += struct_size(ip, base_address, ip->num_base_address);
 662		}
 663	}
 664}
 665
 666static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
 667						     uint32_t *vcn_harvest_count,
 668						     uint32_t *umc_harvest_count)
 669{
 670	struct binary_header *bhdr;
 671	struct harvest_table *harvest_info;
 672	u16 offset;
 673	int i;
 674	uint32_t umc_harvest_config = 0;
 675
 676	bhdr = (struct binary_header *)adev->mman.discovery_bin;
 677	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
 678
 679	if (!offset) {
 680		dev_err(adev->dev, "invalid harvest table offset\n");
 681		return;
 682	}
 683
 684	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
 685
 686	for (i = 0; i < 32; i++) {
 687		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
 688			break;
 689
 690		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
 691		case VCN_HWID:
 692			(*vcn_harvest_count)++;
 693			adev->vcn.harvest_config |=
 694				(1 << harvest_info->list[i].number_instance);
 695			adev->jpeg.harvest_config |=
 696				(1 << harvest_info->list[i].number_instance);
 697
 698			adev->vcn.inst_mask &=
 699				~(1U << harvest_info->list[i].number_instance);
 700			adev->jpeg.inst_mask &=
 701				~(1U << harvest_info->list[i].number_instance);
 702			break;
 703		case DMU_HWID:
 704			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
 705			break;
 706		case UMC_HWID:
 707			umc_harvest_config |=
 708				1 << (le16_to_cpu(harvest_info->list[i].number_instance));
 709			(*umc_harvest_count)++;
 710			break;
 711		case GC_HWID:
 712			adev->gfx.xcc_mask &=
 713				~(1U << harvest_info->list[i].number_instance);
 714			break;
 715		case SDMA0_HWID:
 716			adev->sdma.sdma_mask &=
 717				~(1U << harvest_info->list[i].number_instance);
 718			break;
 719#if defined(CONFIG_DRM_AMD_ISP)
 720		case ISP_HWID:
 721			adev->isp.harvest_config |=
 722				~(1U << harvest_info->list[i].number_instance);
 723			break;
 724#endif
 725		default:
 726			break;
 727		}
 728	}
 729
 730	adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
 731				~umc_harvest_config;
 732}
 733
 734/* ================================================== */
 735
 736struct ip_hw_instance {
 737	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
 738
 739	int hw_id;
 740	u8  num_instance;
 741	u8  major, minor, revision;
 742	u8  harvest;
 743
 744	int num_base_addresses;
 745	u32 base_addr[] __counted_by(num_base_addresses);
 746};
 747
 748struct ip_hw_id {
 749	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
 750	int hw_id;
 751};
 752
 753struct ip_die_entry {
 754	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
 755	u16 num_ips;
 756};
 757
 758/* -------------------------------------------------- */
 759
 760struct ip_hw_instance_attr {
 761	struct attribute attr;
 762	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
 763};
 764
 765static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 766{
 767	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
 768}
 769
 770static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 771{
 772	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
 773}
 774
 775static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 776{
 777	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
 778}
 779
 780static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 781{
 782	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
 783}
 784
 785static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 786{
 787	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
 788}
 789
 790static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 791{
 792	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
 793}
 794
 795static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 796{
 797	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
 798}
 799
 800static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
 801{
 802	ssize_t res, at;
 803	int ii;
 804
 805	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
 806		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
 807		 */
 808		if (at + 12 > PAGE_SIZE)
 809			break;
 810		res = sysfs_emit_at(buf, at, "0x%08X\n",
 811				    ip_hw_instance->base_addr[ii]);
 812		if (res <= 0)
 813			break;
 814		at += res;
 815	}
 816
 817	return res < 0 ? res : at;
 818}
 819
 820static struct ip_hw_instance_attr ip_hw_attr[] = {
 821	__ATTR_RO(hw_id),
 822	__ATTR_RO(num_instance),
 823	__ATTR_RO(major),
 824	__ATTR_RO(minor),
 825	__ATTR_RO(revision),
 826	__ATTR_RO(harvest),
 827	__ATTR_RO(num_base_addresses),
 828	__ATTR_RO(base_addr),
 829};
 830
 831static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
 832ATTRIBUTE_GROUPS(ip_hw_instance);
 833
 834#define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
 835#define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
 836
 837static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
 838					struct attribute *attr,
 839					char *buf)
 840{
 841	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
 842	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
 843
 844	if (!ip_hw_attr->show)
 845		return -EIO;
 846
 847	return ip_hw_attr->show(ip_hw_instance, buf);
 848}
 849
 850static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
 851	.show = ip_hw_instance_attr_show,
 852};
 853
 854static void ip_hw_instance_release(struct kobject *kobj)
 855{
 856	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
 857
 858	kfree(ip_hw_instance);
 859}
 860
 861static const struct kobj_type ip_hw_instance_ktype = {
 862	.release = ip_hw_instance_release,
 863	.sysfs_ops = &ip_hw_instance_sysfs_ops,
 864	.default_groups = ip_hw_instance_groups,
 865};
 866
 867/* -------------------------------------------------- */
 868
 869#define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
 870
 871static void ip_hw_id_release(struct kobject *kobj)
 872{
 873	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
 874
 875	if (!list_empty(&ip_hw_id->hw_id_kset.list))
 876		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
 877	kfree(ip_hw_id);
 878}
 879
 880static const struct kobj_type ip_hw_id_ktype = {
 881	.release = ip_hw_id_release,
 882	.sysfs_ops = &kobj_sysfs_ops,
 883};
 884
 885/* -------------------------------------------------- */
 886
 887static void die_kobj_release(struct kobject *kobj);
 888static void ip_disc_release(struct kobject *kobj);
 889
 890struct ip_die_entry_attribute {
 891	struct attribute attr;
 892	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
 893};
 894
 895#define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
 896
 897static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
 898{
 899	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
 900}
 901
 902/* If there are more ip_die_entry attrs, other than the number of IPs,
 903 * we can make this intro an array of attrs, and then initialize
 904 * ip_die_entry_attrs in a loop.
 905 */
 906static struct ip_die_entry_attribute num_ips_attr =
 907	__ATTR_RO(num_ips);
 908
 909static struct attribute *ip_die_entry_attrs[] = {
 910	&num_ips_attr.attr,
 911	NULL,
 912};
 913ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
 914
 915#define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
 916
 917static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
 918				      struct attribute *attr,
 919				      char *buf)
 920{
 921	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
 922	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
 923
 924	if (!ip_die_entry_attr->show)
 925		return -EIO;
 926
 927	return ip_die_entry_attr->show(ip_die_entry, buf);
 928}
 929
 930static void ip_die_entry_release(struct kobject *kobj)
 931{
 932	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
 933
 934	if (!list_empty(&ip_die_entry->ip_kset.list))
 935		DRM_ERROR("ip_die_entry->ip_kset is not empty");
 936	kfree(ip_die_entry);
 937}
 938
 939static const struct sysfs_ops ip_die_entry_sysfs_ops = {
 940	.show = ip_die_entry_attr_show,
 941};
 942
 943static const struct kobj_type ip_die_entry_ktype = {
 944	.release = ip_die_entry_release,
 945	.sysfs_ops = &ip_die_entry_sysfs_ops,
 946	.default_groups = ip_die_entry_groups,
 947};
 948
 949static const struct kobj_type die_kobj_ktype = {
 950	.release = die_kobj_release,
 951	.sysfs_ops = &kobj_sysfs_ops,
 952};
 953
 954static const struct kobj_type ip_discovery_ktype = {
 955	.release = ip_disc_release,
 956	.sysfs_ops = &kobj_sysfs_ops,
 957};
 958
 959struct ip_discovery_top {
 960	struct kobject kobj;    /* ip_discovery/ */
 961	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
 962	struct amdgpu_device *adev;
 963};
 964
 965static void die_kobj_release(struct kobject *kobj)
 966{
 967	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
 968						       struct ip_discovery_top,
 969						       die_kset);
 970	if (!list_empty(&ip_top->die_kset.list))
 971		DRM_ERROR("ip_top->die_kset is not empty");
 972}
 973
 974static void ip_disc_release(struct kobject *kobj)
 975{
 976	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
 977						       kobj);
 978	struct amdgpu_device *adev = ip_top->adev;
 979
 980	adev->ip_top = NULL;
 981	kfree(ip_top);
 982}
 983
 984static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
 985						 uint16_t hw_id, uint8_t inst)
 986{
 987	uint8_t harvest = 0;
 988
 989	/* Until a uniform way is figured, get mask based on hwid */
 990	switch (hw_id) {
 991	case VCN_HWID:
 992		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
 993		break;
 994	case DMU_HWID:
 995		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
 996			harvest = 0x1;
 997		break;
 998	case UMC_HWID:
 999		/* TODO: It needs another parsing; for now, ignore.*/
1000		break;
1001	case GC_HWID:
1002		harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
1003		break;
1004	case SDMA0_HWID:
1005		harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
1006		break;
1007	default:
1008		break;
1009	}
1010
1011	return harvest;
1012}
1013
1014static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
1015				      struct ip_die_entry *ip_die_entry,
1016				      const size_t _ip_offset, const int num_ips,
1017				      bool reg_base_64)
1018{
1019	int ii, jj, kk, res;
1020
1021	DRM_DEBUG("num_ips:%d", num_ips);
1022
1023	/* Find all IPs of a given HW ID, and add their instance to
1024	 * #die/#hw_id/#instance/<attributes>
1025	 */
1026	for (ii = 0; ii < HW_ID_MAX; ii++) {
1027		struct ip_hw_id *ip_hw_id = NULL;
1028		size_t ip_offset = _ip_offset;
1029
1030		for (jj = 0; jj < num_ips; jj++) {
1031			struct ip_v4 *ip;
1032			struct ip_hw_instance *ip_hw_instance;
1033
1034			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1035			if (amdgpu_discovery_validate_ip(ip) ||
1036			    le16_to_cpu(ip->hw_id) != ii)
1037				goto next_ip;
1038
1039			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
1040
1041			/* We have a hw_id match; register the hw
1042			 * block if not yet registered.
1043			 */
1044			if (!ip_hw_id) {
1045				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
1046				if (!ip_hw_id)
1047					return -ENOMEM;
1048				ip_hw_id->hw_id = ii;
1049
1050				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1051				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1052				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1053				res = kset_register(&ip_hw_id->hw_id_kset);
1054				if (res) {
1055					DRM_ERROR("Couldn't register ip_hw_id kset");
1056					kfree(ip_hw_id);
1057					return res;
1058				}
1059				if (hw_id_names[ii]) {
1060					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1061								&ip_hw_id->hw_id_kset.kobj,
1062								hw_id_names[ii]);
1063					if (res) {
1064						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1065							  hw_id_names[ii],
1066							  kobject_name(&ip_die_entry->ip_kset.kobj));
1067					}
1068				}
1069			}
1070
1071			/* Now register its instance.
1072			 */
1073			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1074							     base_addr,
1075							     ip->num_base_address),
1076						 GFP_KERNEL);
1077			if (!ip_hw_instance) {
1078				DRM_ERROR("no memory for ip_hw_instance");
1079				return -ENOMEM;
1080			}
1081			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1082			ip_hw_instance->num_instance = ip->instance_number;
1083			ip_hw_instance->major = ip->major;
1084			ip_hw_instance->minor = ip->minor;
1085			ip_hw_instance->revision = ip->revision;
1086			ip_hw_instance->harvest =
1087				amdgpu_discovery_get_harvest_info(
1088					adev, ip_hw_instance->hw_id,
1089					ip_hw_instance->num_instance);
1090			ip_hw_instance->num_base_addresses = ip->num_base_address;
1091
1092			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1093				if (reg_base_64)
1094					ip_hw_instance->base_addr[kk] =
1095						lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1096				else
1097					ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1098			}
1099
1100			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1101			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1102			res = kobject_add(&ip_hw_instance->kobj, NULL,
1103					  "%d", ip_hw_instance->num_instance);
1104next_ip:
1105			if (reg_base_64)
1106				ip_offset += struct_size(ip, base_address_64,
1107							 ip->num_base_address);
1108			else
1109				ip_offset += struct_size(ip, base_address,
1110							 ip->num_base_address);
1111		}
1112	}
1113
1114	return 0;
1115}
1116
1117static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1118{
1119	struct binary_header *bhdr;
1120	struct ip_discovery_header *ihdr;
1121	struct die_header *dhdr;
1122	struct kset *die_kset = &adev->ip_top->die_kset;
1123	u16 num_dies, die_offset, num_ips;
1124	size_t ip_offset;
1125	int ii, res;
1126
1127	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1128	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1129					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1130	num_dies = le16_to_cpu(ihdr->num_dies);
1131
1132	DRM_DEBUG("number of dies: %d\n", num_dies);
1133
1134	for (ii = 0; ii < num_dies; ii++) {
1135		struct ip_die_entry *ip_die_entry;
1136
1137		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1138		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1139		num_ips = le16_to_cpu(dhdr->num_ips);
1140		ip_offset = die_offset + sizeof(*dhdr);
1141
1142		/* Add the die to the kset.
1143		 *
1144		 * dhdr->die_id == ii, which was checked in
1145		 * amdgpu_discovery_reg_base_init().
1146		 */
1147
1148		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1149		if (!ip_die_entry)
1150			return -ENOMEM;
1151
1152		ip_die_entry->num_ips = num_ips;
1153
1154		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1155		ip_die_entry->ip_kset.kobj.kset = die_kset;
1156		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1157		res = kset_register(&ip_die_entry->ip_kset);
1158		if (res) {
1159			DRM_ERROR("Couldn't register ip_die_entry kset");
1160			kfree(ip_die_entry);
1161			return res;
1162		}
1163
1164		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1165	}
1166
1167	return 0;
1168}
1169
1170static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1171{
1172	struct kset *die_kset;
1173	int res, ii;
1174
1175	if (!adev->mman.discovery_bin)
1176		return -EINVAL;
1177
1178	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1179	if (!adev->ip_top)
1180		return -ENOMEM;
1181
1182	adev->ip_top->adev = adev;
1183
1184	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1185				   &adev->dev->kobj, "ip_discovery");
1186	if (res) {
1187		DRM_ERROR("Couldn't init and add ip_discovery/");
1188		goto Err;
1189	}
1190
1191	die_kset = &adev->ip_top->die_kset;
1192	kobject_set_name(&die_kset->kobj, "%s", "die");
1193	die_kset->kobj.parent = &adev->ip_top->kobj;
1194	die_kset->kobj.ktype = &die_kobj_ktype;
1195	res = kset_register(&adev->ip_top->die_kset);
1196	if (res) {
1197		DRM_ERROR("Couldn't register die_kset");
1198		goto Err;
1199	}
1200
1201	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1202		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1203	ip_hw_instance_attrs[ii] = NULL;
1204
1205	res = amdgpu_discovery_sysfs_recurse(adev);
1206
1207	return res;
1208Err:
1209	kobject_put(&adev->ip_top->kobj);
1210	return res;
1211}
1212
1213/* -------------------------------------------------- */
1214
1215#define list_to_kobj(el) container_of(el, struct kobject, entry)
1216
1217static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1218{
1219	struct list_head *el, *tmp;
1220	struct kset *hw_id_kset;
1221
1222	hw_id_kset = &ip_hw_id->hw_id_kset;
1223	spin_lock(&hw_id_kset->list_lock);
1224	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1225		list_del_init(el);
1226		spin_unlock(&hw_id_kset->list_lock);
1227		/* kobject is embedded in ip_hw_instance */
1228		kobject_put(list_to_kobj(el));
1229		spin_lock(&hw_id_kset->list_lock);
1230	}
1231	spin_unlock(&hw_id_kset->list_lock);
1232	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1233}
1234
1235static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1236{
1237	struct list_head *el, *tmp;
1238	struct kset *ip_kset;
1239
1240	ip_kset = &ip_die_entry->ip_kset;
1241	spin_lock(&ip_kset->list_lock);
1242	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1243		list_del_init(el);
1244		spin_unlock(&ip_kset->list_lock);
1245		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1246		spin_lock(&ip_kset->list_lock);
1247	}
1248	spin_unlock(&ip_kset->list_lock);
1249	kobject_put(&ip_die_entry->ip_kset.kobj);
1250}
1251
1252static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1253{
1254	struct list_head *el, *tmp;
1255	struct kset *die_kset;
1256
1257	die_kset = &adev->ip_top->die_kset;
1258	spin_lock(&die_kset->list_lock);
1259	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1260		list_del_init(el);
1261		spin_unlock(&die_kset->list_lock);
1262		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1263		spin_lock(&die_kset->list_lock);
1264	}
1265	spin_unlock(&die_kset->list_lock);
1266	kobject_put(&adev->ip_top->die_kset.kobj);
1267	kobject_put(&adev->ip_top->kobj);
1268}
1269
1270/* ================================================== */
1271
1272static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1273{
1274	uint8_t num_base_address, subrev, variant;
1275	struct binary_header *bhdr;
1276	struct ip_discovery_header *ihdr;
1277	struct die_header *dhdr;
1278	struct ip_v4 *ip;
1279	uint16_t die_offset;
1280	uint16_t ip_offset;
1281	uint16_t num_dies;
1282	uint16_t num_ips;
1283	int hw_ip;
1284	int i, j, k;
1285	int r;
1286
1287	r = amdgpu_discovery_init(adev);
1288	if (r) {
1289		DRM_ERROR("amdgpu_discovery_init failed\n");
1290		return r;
1291	}
1292
1293	adev->gfx.xcc_mask = 0;
1294	adev->sdma.sdma_mask = 0;
1295	adev->vcn.inst_mask = 0;
1296	adev->jpeg.inst_mask = 0;
1297	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1298	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1299			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1300	num_dies = le16_to_cpu(ihdr->num_dies);
1301
1302	DRM_DEBUG("number of dies: %d\n", num_dies);
1303
1304	for (i = 0; i < num_dies; i++) {
1305		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1306		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1307		num_ips = le16_to_cpu(dhdr->num_ips);
1308		ip_offset = die_offset + sizeof(*dhdr);
1309
1310		if (le16_to_cpu(dhdr->die_id) != i) {
1311			DRM_ERROR("invalid die id %d, expected %d\n",
1312					le16_to_cpu(dhdr->die_id), i);
1313			return -EINVAL;
1314		}
1315
1316		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1317				le16_to_cpu(dhdr->die_id), num_ips);
1318
1319		for (j = 0; j < num_ips; j++) {
1320			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1321
1322			if (amdgpu_discovery_validate_ip(ip))
1323				goto next_ip;
1324
1325			num_base_address = ip->num_base_address;
1326
1327			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1328				  hw_id_names[le16_to_cpu(ip->hw_id)],
1329				  le16_to_cpu(ip->hw_id),
1330				  ip->instance_number,
1331				  ip->major, ip->minor,
1332				  ip->revision);
1333
1334			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1335				/* Bit [5:0]: original revision value
1336				 * Bit [7:6]: en/decode capability:
1337				 *     0b00 : VCN function normally
1338				 *     0b10 : encode is disabled
1339				 *     0b01 : decode is disabled
1340				 */
1341				if (adev->vcn.num_vcn_inst <
1342				    AMDGPU_MAX_VCN_INSTANCES) {
1343					adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1344						ip->revision & 0xc0;
1345					adev->vcn.num_vcn_inst++;
1346					adev->vcn.inst_mask |=
1347						(1U << ip->instance_number);
1348					adev->jpeg.inst_mask |=
1349						(1U << ip->instance_number);
1350				} else {
1351					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1352						adev->vcn.num_vcn_inst + 1,
1353						AMDGPU_MAX_VCN_INSTANCES);
1354				}
1355				ip->revision &= ~0xc0;
1356			}
1357			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1358			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1359			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1360			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1361				if (adev->sdma.num_instances <
1362				    AMDGPU_MAX_SDMA_INSTANCES) {
1363					adev->sdma.num_instances++;
1364					adev->sdma.sdma_mask |=
1365						(1U << ip->instance_number);
1366				} else {
1367					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1368						adev->sdma.num_instances + 1,
1369						AMDGPU_MAX_SDMA_INSTANCES);
1370				}
1371			}
1372
1373			if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1374				if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1375					adev->vpe.num_instances++;
1376				else
1377					dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1378						adev->vpe.num_instances + 1,
1379						AMDGPU_MAX_VPE_INSTANCES);
1380			}
1381
1382			if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1383				adev->gmc.num_umc++;
1384				adev->umc.node_inst_num++;
1385			}
1386
1387			if (le16_to_cpu(ip->hw_id) == GC_HWID)
1388				adev->gfx.xcc_mask |=
1389					(1U << ip->instance_number);
1390
1391			for (k = 0; k < num_base_address; k++) {
1392				/*
1393				 * convert the endianness of base addresses in place,
1394				 * so that we don't need to convert them when accessing adev->reg_offset.
1395				 */
1396				if (ihdr->base_addr_64_bit)
1397					/* Truncate the 64bit base address from ip discovery
1398					 * and only store lower 32bit ip base in reg_offset[].
1399					 * Bits > 32 follows ASIC specific format, thus just
1400					 * discard them and handle it within specific ASIC.
1401					 * By this way reg_offset[] and related helpers can
1402					 * stay unchanged.
1403					 * The base address is in dwords, thus clear the
1404					 * highest 2 bits to store.
1405					 */
1406					ip->base_address[k] =
1407						lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1408				else
1409					ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1410				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1411			}
1412
1413			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1414				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1415				    hw_id_map[hw_ip] != 0) {
1416					DRM_DEBUG("set register base offset for %s\n",
1417							hw_id_names[le16_to_cpu(ip->hw_id)]);
1418					adev->reg_offset[hw_ip][ip->instance_number] =
1419						ip->base_address;
1420					/* Instance support is somewhat inconsistent.
1421					 * SDMA is a good example.  Sienna cichlid has 4 total
1422					 * SDMA instances, each enumerated separately (HWIDs
1423					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1424					 * but they are enumerated as multiple instances of the
1425					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1426					 * example.  On most chips there are multiple instances
1427					 * with the same HWID.
1428					 */
1429
1430					if (ihdr->version < 3) {
1431						subrev = 0;
1432						variant = 0;
1433					} else {
1434						subrev = ip->sub_revision;
1435						variant = ip->variant;
1436					}
1437
1438					adev->ip_versions[hw_ip]
1439							 [ip->instance_number] =
1440						IP_VERSION_FULL(ip->major,
1441								ip->minor,
1442								ip->revision,
1443								variant,
1444								subrev);
1445				}
1446			}
1447
1448next_ip:
1449			if (ihdr->base_addr_64_bit)
1450				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1451			else
1452				ip_offset += struct_size(ip, base_address, ip->num_base_address);
1453		}
1454	}
1455
1456	return 0;
1457}
1458
1459static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1460{
1461	int vcn_harvest_count = 0;
1462	int umc_harvest_count = 0;
1463
1464	/*
1465	 * Harvest table does not fit Navi1x and legacy GPUs,
1466	 * so read harvest bit per IP data structure to set
1467	 * harvest configuration.
1468	 */
1469	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1470	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
1471	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4)) {
1472		if ((adev->pdev->device == 0x731E &&
1473			(adev->pdev->revision == 0xC6 ||
1474			 adev->pdev->revision == 0xC7)) ||
1475			(adev->pdev->device == 0x7340 &&
1476			 adev->pdev->revision == 0xC9) ||
1477			(adev->pdev->device == 0x7360 &&
1478			 adev->pdev->revision == 0xC7))
1479			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1480				&vcn_harvest_count);
1481	} else {
1482		amdgpu_discovery_read_from_harvest_table(adev,
1483							 &vcn_harvest_count,
1484							 &umc_harvest_count);
1485	}
1486
1487	amdgpu_discovery_harvest_config_quirk(adev);
1488
1489	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1490		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1491		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1492	}
1493
1494	if (umc_harvest_count < adev->gmc.num_umc) {
1495		adev->gmc.num_umc -= umc_harvest_count;
1496	}
1497}
1498
1499union gc_info {
1500	struct gc_info_v1_0 v1;
1501	struct gc_info_v1_1 v1_1;
1502	struct gc_info_v1_2 v1_2;
1503	struct gc_info_v1_3 v1_3;
1504	struct gc_info_v2_0 v2;
1505	struct gc_info_v2_1 v2_1;
1506};
1507
1508static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1509{
1510	struct binary_header *bhdr;
1511	union gc_info *gc_info;
1512	u16 offset;
1513
1514	if (!adev->mman.discovery_bin) {
1515		DRM_ERROR("ip discovery uninitialized\n");
1516		return -EINVAL;
1517	}
1518
1519	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1520	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1521
1522	if (!offset)
1523		return 0;
1524
1525	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1526
1527	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1528	case 1:
1529		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1530		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1531						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1532		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1533		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1534		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1535		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1536		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1537		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1538		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1539		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1540		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1541		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1542		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1543		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1544		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1545			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1546		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1547		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1548			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1549			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1550			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1551		}
1552		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1553			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1554			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1555			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1556			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1557			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1558			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1559			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1560			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1561		}
1562		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) {
1563			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu);
1564			adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size);
1565			adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc);
1566			adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size);
1567			adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc);
1568			adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size);
1569			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size);
1570			adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size);
1571		}
1572		break;
1573	case 2:
1574		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1575		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1576		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1577		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1578		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1579		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1580		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1581		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1582		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1583		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1584		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1585		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1586		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1587		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1588		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1589			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1590		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1591		if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1592			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1593			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1594			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1595			adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1596			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1597			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1598			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1599		}
1600		break;
1601	default:
1602		dev_err(adev->dev,
1603			"Unhandled GC info table %d.%d\n",
1604			le16_to_cpu(gc_info->v1.header.version_major),
1605			le16_to_cpu(gc_info->v1.header.version_minor));
1606		return -EINVAL;
1607	}
1608	return 0;
1609}
1610
1611union mall_info {
1612	struct mall_info_v1_0 v1;
1613	struct mall_info_v2_0 v2;
1614};
1615
1616static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1617{
1618	struct binary_header *bhdr;
1619	union mall_info *mall_info;
1620	u32 u, mall_size_per_umc, m_s_present, half_use;
1621	u64 mall_size;
1622	u16 offset;
1623
1624	if (!adev->mman.discovery_bin) {
1625		DRM_ERROR("ip discovery uninitialized\n");
1626		return -EINVAL;
1627	}
1628
1629	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1630	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1631
1632	if (!offset)
1633		return 0;
1634
1635	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1636
1637	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1638	case 1:
1639		mall_size = 0;
1640		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1641		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1642		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1643		for (u = 0; u < adev->gmc.num_umc; u++) {
1644			if (m_s_present & (1 << u))
1645				mall_size += mall_size_per_umc * 2;
1646			else if (half_use & (1 << u))
1647				mall_size += mall_size_per_umc / 2;
1648			else
1649				mall_size += mall_size_per_umc;
1650		}
1651		adev->gmc.mall_size = mall_size;
1652		adev->gmc.m_half_use = half_use;
1653		break;
1654	case 2:
1655		mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1656		adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
1657		break;
1658	default:
1659		dev_err(adev->dev,
1660			"Unhandled MALL info table %d.%d\n",
1661			le16_to_cpu(mall_info->v1.header.version_major),
1662			le16_to_cpu(mall_info->v1.header.version_minor));
1663		return -EINVAL;
1664	}
1665	return 0;
1666}
1667
1668union vcn_info {
1669	struct vcn_info_v1_0 v1;
1670};
1671
1672static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1673{
1674	struct binary_header *bhdr;
1675	union vcn_info *vcn_info;
1676	u16 offset;
1677	int v;
1678
1679	if (!adev->mman.discovery_bin) {
1680		DRM_ERROR("ip discovery uninitialized\n");
1681		return -EINVAL;
1682	}
1683
1684	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1685	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1686	 * but that may change in the future with new GPUs so keep this
1687	 * check for defensive purposes.
1688	 */
1689	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1690		dev_err(adev->dev, "invalid vcn instances\n");
1691		return -EINVAL;
1692	}
1693
1694	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1695	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1696
1697	if (!offset)
1698		return 0;
1699
1700	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1701
1702	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1703	case 1:
1704		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1705		 * so this won't overflow.
1706		 */
1707		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1708			adev->vcn.vcn_codec_disable_mask[v] =
1709				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1710		}
1711		break;
1712	default:
1713		dev_err(adev->dev,
1714			"Unhandled VCN info table %d.%d\n",
1715			le16_to_cpu(vcn_info->v1.header.version_major),
1716			le16_to_cpu(vcn_info->v1.header.version_minor));
1717		return -EINVAL;
1718	}
1719	return 0;
1720}
1721
1722union nps_info {
1723	struct nps_info_v1_0 v1;
1724};
1725
1726static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
1727					     union nps_info *nps_data)
1728{
1729	uint64_t vram_size, pos, offset;
1730	struct nps_info_header *nhdr;
1731	struct binary_header bhdr;
1732	uint16_t checksum;
1733
1734	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
1735	pos = vram_size - DISCOVERY_TMR_OFFSET;
1736	amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false);
1737
1738	offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
1739	checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
1740
1741	amdgpu_device_vram_access(adev, (pos + offset), nps_data,
1742				  sizeof(*nps_data), false);
1743
1744	nhdr = (struct nps_info_header *)(nps_data);
1745	if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data,
1746					      le32_to_cpu(nhdr->size_bytes),
1747					      checksum)) {
1748		dev_err(adev->dev, "nps data refresh, checksum mismatch\n");
1749		return -EINVAL;
1750	}
1751
1752	return 0;
1753}
1754
1755int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
1756				  uint32_t *nps_type,
1757				  struct amdgpu_gmc_memrange **ranges,
1758				  int *range_cnt, bool refresh)
1759{
1760	struct amdgpu_gmc_memrange *mem_ranges;
1761	struct binary_header *bhdr;
1762	union nps_info *nps_info;
1763	union nps_info nps_data;
1764	u16 offset;
1765	int i, r;
1766
1767	if (!nps_type || !range_cnt || !ranges)
1768		return -EINVAL;
1769
1770	if (refresh) {
1771		r = amdgpu_discovery_refresh_nps_info(adev, &nps_data);
1772		if (r)
1773			return r;
1774		nps_info = &nps_data;
1775	} else {
1776		if (!adev->mman.discovery_bin) {
1777			dev_err(adev->dev,
1778				"fetch mem range failed, ip discovery uninitialized\n");
1779			return -EINVAL;
1780		}
1781
1782		bhdr = (struct binary_header *)adev->mman.discovery_bin;
1783		offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
1784
1785		if (!offset)
1786			return -ENOENT;
1787
1788		/* If verification fails, return as if NPS table doesn't exist */
1789		if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
1790			return -ENOENT;
1791
1792		nps_info =
1793			(union nps_info *)(adev->mman.discovery_bin + offset);
1794	}
1795
1796	switch (le16_to_cpu(nps_info->v1.header.version_major)) {
1797	case 1:
1798		mem_ranges = kvcalloc(nps_info->v1.count,
1799				      sizeof(*mem_ranges),
1800				      GFP_KERNEL);
1801		if (!mem_ranges)
1802			return -ENOMEM;
1803		*nps_type = nps_info->v1.nps_type;
1804		*range_cnt = nps_info->v1.count;
1805		for (i = 0; i < *range_cnt; i++) {
1806			mem_ranges[i].base_address =
1807				nps_info->v1.instance_info[i].base_address;
1808			mem_ranges[i].limit_address =
1809				nps_info->v1.instance_info[i].limit_address;
1810			mem_ranges[i].nid_mask = -1;
1811			mem_ranges[i].flags = 0;
1812		}
1813		*ranges = mem_ranges;
1814		break;
1815	default:
1816		dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
1817			le16_to_cpu(nps_info->v1.header.version_major),
1818			le16_to_cpu(nps_info->v1.header.version_minor));
1819		return -EINVAL;
1820	}
1821
1822	return 0;
1823}
1824
1825static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1826{
1827	/* what IP to use for this? */
1828	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1829	case IP_VERSION(9, 0, 1):
1830	case IP_VERSION(9, 1, 0):
1831	case IP_VERSION(9, 2, 1):
1832	case IP_VERSION(9, 2, 2):
1833	case IP_VERSION(9, 3, 0):
1834	case IP_VERSION(9, 4, 0):
1835	case IP_VERSION(9, 4, 1):
1836	case IP_VERSION(9, 4, 2):
1837	case IP_VERSION(9, 4, 3):
1838	case IP_VERSION(9, 4, 4):
1839		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1840		break;
1841	case IP_VERSION(10, 1, 10):
1842	case IP_VERSION(10, 1, 1):
1843	case IP_VERSION(10, 1, 2):
1844	case IP_VERSION(10, 1, 3):
1845	case IP_VERSION(10, 1, 4):
1846	case IP_VERSION(10, 3, 0):
1847	case IP_VERSION(10, 3, 1):
1848	case IP_VERSION(10, 3, 2):
1849	case IP_VERSION(10, 3, 3):
1850	case IP_VERSION(10, 3, 4):
1851	case IP_VERSION(10, 3, 5):
1852	case IP_VERSION(10, 3, 6):
1853	case IP_VERSION(10, 3, 7):
1854		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1855		break;
1856	case IP_VERSION(11, 0, 0):
1857	case IP_VERSION(11, 0, 1):
1858	case IP_VERSION(11, 0, 2):
1859	case IP_VERSION(11, 0, 3):
1860	case IP_VERSION(11, 0, 4):
1861	case IP_VERSION(11, 5, 0):
1862	case IP_VERSION(11, 5, 1):
1863	case IP_VERSION(11, 5, 2):
1864		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1865		break;
1866	case IP_VERSION(12, 0, 0):
1867	case IP_VERSION(12, 0, 1):
1868		amdgpu_device_ip_block_add(adev, &soc24_common_ip_block);
1869		break;
1870	default:
1871		dev_err(adev->dev,
1872			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1873			amdgpu_ip_version(adev, GC_HWIP, 0));
1874		return -EINVAL;
1875	}
1876	return 0;
1877}
1878
1879static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1880{
1881	/* use GC or MMHUB IP version */
1882	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1883	case IP_VERSION(9, 0, 1):
1884	case IP_VERSION(9, 1, 0):
1885	case IP_VERSION(9, 2, 1):
1886	case IP_VERSION(9, 2, 2):
1887	case IP_VERSION(9, 3, 0):
1888	case IP_VERSION(9, 4, 0):
1889	case IP_VERSION(9, 4, 1):
1890	case IP_VERSION(9, 4, 2):
1891	case IP_VERSION(9, 4, 3):
1892	case IP_VERSION(9, 4, 4):
1893		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1894		break;
1895	case IP_VERSION(10, 1, 10):
1896	case IP_VERSION(10, 1, 1):
1897	case IP_VERSION(10, 1, 2):
1898	case IP_VERSION(10, 1, 3):
1899	case IP_VERSION(10, 1, 4):
1900	case IP_VERSION(10, 3, 0):
1901	case IP_VERSION(10, 3, 1):
1902	case IP_VERSION(10, 3, 2):
1903	case IP_VERSION(10, 3, 3):
1904	case IP_VERSION(10, 3, 4):
1905	case IP_VERSION(10, 3, 5):
1906	case IP_VERSION(10, 3, 6):
1907	case IP_VERSION(10, 3, 7):
1908		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1909		break;
1910	case IP_VERSION(11, 0, 0):
1911	case IP_VERSION(11, 0, 1):
1912	case IP_VERSION(11, 0, 2):
1913	case IP_VERSION(11, 0, 3):
1914	case IP_VERSION(11, 0, 4):
1915	case IP_VERSION(11, 5, 0):
1916	case IP_VERSION(11, 5, 1):
1917	case IP_VERSION(11, 5, 2):
1918		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1919		break;
1920	case IP_VERSION(12, 0, 0):
1921	case IP_VERSION(12, 0, 1):
1922		amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
1923		break;
1924	default:
1925		dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1926			amdgpu_ip_version(adev, GC_HWIP, 0));
1927		return -EINVAL;
1928	}
1929	return 0;
1930}
1931
1932static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1933{
1934	switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1935	case IP_VERSION(4, 0, 0):
1936	case IP_VERSION(4, 0, 1):
1937	case IP_VERSION(4, 1, 0):
1938	case IP_VERSION(4, 1, 1):
1939	case IP_VERSION(4, 3, 0):
1940		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1941		break;
1942	case IP_VERSION(4, 2, 0):
1943	case IP_VERSION(4, 2, 1):
1944	case IP_VERSION(4, 4, 0):
1945	case IP_VERSION(4, 4, 2):
1946	case IP_VERSION(4, 4, 5):
1947		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1948		break;
1949	case IP_VERSION(5, 0, 0):
1950	case IP_VERSION(5, 0, 1):
1951	case IP_VERSION(5, 0, 2):
1952	case IP_VERSION(5, 0, 3):
1953	case IP_VERSION(5, 2, 0):
1954	case IP_VERSION(5, 2, 1):
1955		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1956		break;
1957	case IP_VERSION(6, 0, 0):
1958	case IP_VERSION(6, 0, 1):
1959	case IP_VERSION(6, 0, 2):
1960		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1961		break;
1962	case IP_VERSION(6, 1, 0):
1963		amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1964		break;
1965	case IP_VERSION(7, 0, 0):
1966		amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
1967		break;
1968	default:
1969		dev_err(adev->dev,
1970			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1971			amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
1972		return -EINVAL;
1973	}
1974	return 0;
1975}
1976
1977static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1978{
1979	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1980	case IP_VERSION(9, 0, 0):
1981		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1982		break;
1983	case IP_VERSION(10, 0, 0):
1984	case IP_VERSION(10, 0, 1):
1985		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1986		break;
1987	case IP_VERSION(11, 0, 0):
1988	case IP_VERSION(11, 0, 2):
1989	case IP_VERSION(11, 0, 4):
1990	case IP_VERSION(11, 0, 5):
1991	case IP_VERSION(11, 0, 9):
1992	case IP_VERSION(11, 0, 7):
1993	case IP_VERSION(11, 0, 11):
1994	case IP_VERSION(11, 0, 12):
1995	case IP_VERSION(11, 0, 13):
1996	case IP_VERSION(11, 5, 0):
1997		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1998		break;
1999	case IP_VERSION(11, 0, 8):
2000		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
2001		break;
2002	case IP_VERSION(11, 0, 3):
2003	case IP_VERSION(12, 0, 1):
2004		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
2005		break;
2006	case IP_VERSION(13, 0, 0):
2007	case IP_VERSION(13, 0, 1):
2008	case IP_VERSION(13, 0, 2):
2009	case IP_VERSION(13, 0, 3):
2010	case IP_VERSION(13, 0, 5):
2011	case IP_VERSION(13, 0, 6):
2012	case IP_VERSION(13, 0, 7):
2013	case IP_VERSION(13, 0, 8):
2014	case IP_VERSION(13, 0, 10):
2015	case IP_VERSION(13, 0, 11):
2016	case IP_VERSION(13, 0, 14):
2017	case IP_VERSION(14, 0, 0):
2018	case IP_VERSION(14, 0, 1):
2019	case IP_VERSION(14, 0, 4):
2020		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
2021		break;
2022	case IP_VERSION(13, 0, 4):
2023		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
2024		break;
2025	case IP_VERSION(14, 0, 2):
2026	case IP_VERSION(14, 0, 3):
2027		amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
2028		break;
2029	default:
2030		dev_err(adev->dev,
2031			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
2032			amdgpu_ip_version(adev, MP0_HWIP, 0));
2033		return -EINVAL;
2034	}
2035	return 0;
2036}
2037
2038static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
2039{
2040	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2041	case IP_VERSION(9, 0, 0):
2042	case IP_VERSION(10, 0, 0):
2043	case IP_VERSION(10, 0, 1):
2044	case IP_VERSION(11, 0, 2):
2045		if (adev->asic_type == CHIP_ARCTURUS)
2046			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2047		else
2048			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2049		break;
2050	case IP_VERSION(11, 0, 0):
2051	case IP_VERSION(11, 0, 5):
2052	case IP_VERSION(11, 0, 9):
2053	case IP_VERSION(11, 0, 7):
2054	case IP_VERSION(11, 0, 8):
2055	case IP_VERSION(11, 0, 11):
2056	case IP_VERSION(11, 0, 12):
2057	case IP_VERSION(11, 0, 13):
2058	case IP_VERSION(11, 5, 0):
2059		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2060		break;
2061	case IP_VERSION(12, 0, 0):
2062	case IP_VERSION(12, 0, 1):
2063		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
2064		break;
2065	case IP_VERSION(13, 0, 0):
2066	case IP_VERSION(13, 0, 1):
2067	case IP_VERSION(13, 0, 2):
2068	case IP_VERSION(13, 0, 3):
2069	case IP_VERSION(13, 0, 4):
2070	case IP_VERSION(13, 0, 5):
2071	case IP_VERSION(13, 0, 6):
2072	case IP_VERSION(13, 0, 7):
2073	case IP_VERSION(13, 0, 8):
2074	case IP_VERSION(13, 0, 10):
2075	case IP_VERSION(13, 0, 11):
2076	case IP_VERSION(13, 0, 14):
2077		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
2078		break;
2079	case IP_VERSION(14, 0, 0):
2080	case IP_VERSION(14, 0, 1):
2081	case IP_VERSION(14, 0, 2):
2082	case IP_VERSION(14, 0, 3):
2083	case IP_VERSION(14, 0, 4):
2084		amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
2085		break;
2086	default:
2087		dev_err(adev->dev,
2088			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
2089			amdgpu_ip_version(adev, MP1_HWIP, 0));
2090		return -EINVAL;
2091	}
2092	return 0;
2093}
2094
2095#if defined(CONFIG_DRM_AMD_DC)
2096static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
2097{
2098	amdgpu_device_set_sriov_virtual_display(adev);
2099	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2100}
2101#endif
2102
2103static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
2104{
2105	if (adev->enable_virtual_display) {
2106		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2107		return 0;
2108	}
2109
2110	if (!amdgpu_device_has_dc_support(adev))
2111		return 0;
2112
2113#if defined(CONFIG_DRM_AMD_DC)
2114	if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2115		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2116		case IP_VERSION(1, 0, 0):
2117		case IP_VERSION(1, 0, 1):
2118		case IP_VERSION(2, 0, 2):
2119		case IP_VERSION(2, 0, 0):
2120		case IP_VERSION(2, 0, 3):
2121		case IP_VERSION(2, 1, 0):
2122		case IP_VERSION(3, 0, 0):
2123		case IP_VERSION(3, 0, 2):
2124		case IP_VERSION(3, 0, 3):
2125		case IP_VERSION(3, 0, 1):
2126		case IP_VERSION(3, 1, 2):
2127		case IP_VERSION(3, 1, 3):
2128		case IP_VERSION(3, 1, 4):
2129		case IP_VERSION(3, 1, 5):
2130		case IP_VERSION(3, 1, 6):
2131		case IP_VERSION(3, 2, 0):
2132		case IP_VERSION(3, 2, 1):
2133		case IP_VERSION(3, 5, 0):
2134		case IP_VERSION(3, 5, 1):
2135		case IP_VERSION(4, 1, 0):
2136			/* TODO: Fix IP version. DC code expects version 4.0.1 */
2137			if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
2138				adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
2139
2140			if (amdgpu_sriov_vf(adev))
2141				amdgpu_discovery_set_sriov_display(adev);
2142			else
2143				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2144			break;
2145		default:
2146			dev_err(adev->dev,
2147				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
2148				amdgpu_ip_version(adev, DCE_HWIP, 0));
2149			return -EINVAL;
2150		}
2151	} else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2152		switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2153		case IP_VERSION(12, 0, 0):
2154		case IP_VERSION(12, 0, 1):
2155		case IP_VERSION(12, 1, 0):
2156			if (amdgpu_sriov_vf(adev))
2157				amdgpu_discovery_set_sriov_display(adev);
2158			else
2159				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2160			break;
2161		default:
2162			dev_err(adev->dev,
2163				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
2164				amdgpu_ip_version(adev, DCI_HWIP, 0));
2165			return -EINVAL;
2166		}
2167	}
2168#endif
2169	return 0;
2170}
2171
2172static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
2173{
2174	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2175	case IP_VERSION(9, 0, 1):
2176	case IP_VERSION(9, 1, 0):
2177	case IP_VERSION(9, 2, 1):
2178	case IP_VERSION(9, 2, 2):
2179	case IP_VERSION(9, 3, 0):
2180	case IP_VERSION(9, 4, 0):
2181	case IP_VERSION(9, 4, 1):
2182	case IP_VERSION(9, 4, 2):
2183		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
2184		break;
2185	case IP_VERSION(9, 4, 3):
2186	case IP_VERSION(9, 4, 4):
2187		amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
2188		break;
2189	case IP_VERSION(10, 1, 10):
2190	case IP_VERSION(10, 1, 2):
2191	case IP_VERSION(10, 1, 1):
2192	case IP_VERSION(10, 1, 3):
2193	case IP_VERSION(10, 1, 4):
2194	case IP_VERSION(10, 3, 0):
2195	case IP_VERSION(10, 3, 2):
2196	case IP_VERSION(10, 3, 1):
2197	case IP_VERSION(10, 3, 4):
2198	case IP_VERSION(10, 3, 5):
2199	case IP_VERSION(10, 3, 6):
2200	case IP_VERSION(10, 3, 3):
2201	case IP_VERSION(10, 3, 7):
2202		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2203		break;
2204	case IP_VERSION(11, 0, 0):
2205	case IP_VERSION(11, 0, 1):
2206	case IP_VERSION(11, 0, 2):
2207	case IP_VERSION(11, 0, 3):
2208	case IP_VERSION(11, 0, 4):
2209	case IP_VERSION(11, 5, 0):
2210	case IP_VERSION(11, 5, 1):
2211	case IP_VERSION(11, 5, 2):
2212		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2213		break;
2214	case IP_VERSION(12, 0, 0):
2215	case IP_VERSION(12, 0, 1):
2216		amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block);
2217		break;
2218	default:
2219		dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2220			amdgpu_ip_version(adev, GC_HWIP, 0));
2221		return -EINVAL;
2222	}
2223	return 0;
2224}
2225
2226static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2227{
2228	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2229	case IP_VERSION(4, 0, 0):
2230	case IP_VERSION(4, 0, 1):
2231	case IP_VERSION(4, 1, 0):
2232	case IP_VERSION(4, 1, 1):
2233	case IP_VERSION(4, 1, 2):
2234	case IP_VERSION(4, 2, 0):
2235	case IP_VERSION(4, 2, 2):
2236	case IP_VERSION(4, 4, 0):
2237		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2238		break;
2239	case IP_VERSION(4, 4, 2):
2240	case IP_VERSION(4, 4, 5):
2241		amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2242		break;
2243	case IP_VERSION(5, 0, 0):
2244	case IP_VERSION(5, 0, 1):
2245	case IP_VERSION(5, 0, 2):
2246	case IP_VERSION(5, 0, 5):
2247		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2248		break;
2249	case IP_VERSION(5, 2, 0):
2250	case IP_VERSION(5, 2, 2):
2251	case IP_VERSION(5, 2, 4):
2252	case IP_VERSION(5, 2, 5):
2253	case IP_VERSION(5, 2, 6):
2254	case IP_VERSION(5, 2, 3):
2255	case IP_VERSION(5, 2, 1):
2256	case IP_VERSION(5, 2, 7):
2257		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2258		break;
2259	case IP_VERSION(6, 0, 0):
2260	case IP_VERSION(6, 0, 1):
2261	case IP_VERSION(6, 0, 2):
2262	case IP_VERSION(6, 0, 3):
2263	case IP_VERSION(6, 1, 0):
2264	case IP_VERSION(6, 1, 1):
2265	case IP_VERSION(6, 1, 2):
2266		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2267		break;
2268	case IP_VERSION(7, 0, 0):
2269	case IP_VERSION(7, 0, 1):
2270		amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block);
2271		break;
2272	default:
2273		dev_err(adev->dev,
2274			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2275			amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2276		return -EINVAL;
2277	}
2278	return 0;
2279}
2280
2281static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2282{
2283	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2284		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2285		case IP_VERSION(7, 0, 0):
2286		case IP_VERSION(7, 2, 0):
2287			/* UVD is not supported on vega20 SR-IOV */
2288			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2289				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2290			break;
2291		default:
2292			dev_err(adev->dev,
2293				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2294				amdgpu_ip_version(adev, UVD_HWIP, 0));
2295			return -EINVAL;
2296		}
2297		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2298		case IP_VERSION(4, 0, 0):
2299		case IP_VERSION(4, 1, 0):
2300			/* VCE is not supported on vega20 SR-IOV */
2301			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2302				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2303			break;
2304		default:
2305			dev_err(adev->dev,
2306				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2307				amdgpu_ip_version(adev, VCE_HWIP, 0));
2308			return -EINVAL;
2309		}
2310	} else {
2311		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2312		case IP_VERSION(1, 0, 0):
2313		case IP_VERSION(1, 0, 1):
2314			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2315			break;
2316		case IP_VERSION(2, 0, 0):
2317		case IP_VERSION(2, 0, 2):
2318		case IP_VERSION(2, 2, 0):
2319			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2320			if (!amdgpu_sriov_vf(adev))
2321				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2322			break;
2323		case IP_VERSION(2, 0, 3):
2324			break;
2325		case IP_VERSION(2, 5, 0):
2326			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2327			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2328			break;
2329		case IP_VERSION(2, 6, 0):
2330			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2331			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2332			break;
2333		case IP_VERSION(3, 0, 0):
2334		case IP_VERSION(3, 0, 16):
2335		case IP_VERSION(3, 1, 1):
2336		case IP_VERSION(3, 1, 2):
2337		case IP_VERSION(3, 0, 2):
2338			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2339			if (!amdgpu_sriov_vf(adev))
2340				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2341			break;
2342		case IP_VERSION(3, 0, 33):
2343			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2344			break;
2345		case IP_VERSION(4, 0, 0):
2346		case IP_VERSION(4, 0, 2):
2347		case IP_VERSION(4, 0, 4):
2348			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2349			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2350			break;
2351		case IP_VERSION(4, 0, 3):
2352			amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2353			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2354			break;
2355		case IP_VERSION(4, 0, 5):
2356		case IP_VERSION(4, 0, 6):
2357			amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2358			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2359			break;
2360		case IP_VERSION(5, 0, 0):
2361			amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2362			amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2363			break;
2364		default:
2365			dev_err(adev->dev,
2366				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2367				amdgpu_ip_version(adev, UVD_HWIP, 0));
2368			return -EINVAL;
2369		}
2370	}
2371	return 0;
2372}
2373
2374static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2375{
2376	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2377	case IP_VERSION(11, 0, 0):
2378	case IP_VERSION(11, 0, 1):
2379	case IP_VERSION(11, 0, 2):
2380	case IP_VERSION(11, 0, 3):
2381	case IP_VERSION(11, 0, 4):
2382	case IP_VERSION(11, 5, 0):
2383	case IP_VERSION(11, 5, 1):
2384	case IP_VERSION(11, 5, 2):
2385		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2386		adev->enable_mes = true;
2387		adev->enable_mes_kiq = true;
2388		break;
2389	case IP_VERSION(12, 0, 0):
2390	case IP_VERSION(12, 0, 1):
2391		amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block);
2392		adev->enable_mes = true;
2393		adev->enable_mes_kiq = true;
2394		if (amdgpu_uni_mes)
2395			adev->enable_uni_mes = true;
2396		break;
2397	default:
2398		break;
2399	}
2400	return 0;
2401}
2402
2403static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2404{
2405	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2406	case IP_VERSION(9, 4, 3):
2407	case IP_VERSION(9, 4, 4):
2408		aqua_vanjaram_init_soc_config(adev);
2409		break;
2410	default:
2411		break;
2412	}
2413}
2414
2415static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2416{
2417	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2418	case IP_VERSION(6, 1, 0):
2419	case IP_VERSION(6, 1, 1):
2420	case IP_VERSION(6, 1, 3):
2421		amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2422		break;
2423	default:
2424		break;
2425	}
2426
2427	return 0;
2428}
2429
2430static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2431{
2432	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2433	case IP_VERSION(4, 0, 5):
2434	case IP_VERSION(4, 0, 6):
2435		if (amdgpu_umsch_mm & 0x1) {
2436			amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2437			adev->enable_umsch_mm = true;
2438		}
2439		break;
2440	default:
2441		break;
2442	}
2443
2444	return 0;
2445}
2446
2447static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev)
2448{
2449#if defined(CONFIG_DRM_AMD_ISP)
2450	switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
2451	case IP_VERSION(4, 1, 0):
2452		amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block);
2453		break;
2454	case IP_VERSION(4, 1, 1):
2455		amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block);
2456		break;
2457	default:
2458		break;
2459	}
2460#endif
2461
2462	return 0;
2463}
2464
2465int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2466{
2467	int r;
2468
2469	switch (adev->asic_type) {
2470	case CHIP_VEGA10:
2471		vega10_reg_base_init(adev);
2472		adev->sdma.num_instances = 2;
2473		adev->gmc.num_umc = 4;
2474		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2475		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2476		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2477		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2478		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2479		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2480		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2481		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2482		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2483		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2484		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2485		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2486		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2487		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2488		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2489		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2490		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2491		break;
2492	case CHIP_VEGA12:
2493		vega10_reg_base_init(adev);
2494		adev->sdma.num_instances = 2;
2495		adev->gmc.num_umc = 4;
2496		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2497		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2498		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2499		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2500		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2501		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2502		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2503		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2504		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2505		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2506		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2507		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2508		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2509		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2510		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2511		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2512		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2513		break;
2514	case CHIP_RAVEN:
2515		vega10_reg_base_init(adev);
2516		adev->sdma.num_instances = 1;
2517		adev->vcn.num_vcn_inst = 1;
2518		adev->gmc.num_umc = 2;
2519		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2520			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2521			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2522			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2523			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2524			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2525			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2526			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2527			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2528			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2529			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2530			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2531			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2532			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2533			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2534			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2535			adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2536		} else {
2537			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2538			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2539			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2540			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2541			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2542			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2543			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2544			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2545			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2546			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2547			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2548			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2549			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2550			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2551			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2552			adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
2553		}
2554		break;
2555	case CHIP_VEGA20:
2556		vega20_reg_base_init(adev);
2557		adev->sdma.num_instances = 2;
2558		adev->gmc.num_umc = 8;
2559		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2560		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2561		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2562		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2563		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2564		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2565		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2566		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2567		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2568		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2569		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2570		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2571		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2572		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2573		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2574		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2575		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2576		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2577		break;
2578	case CHIP_ARCTURUS:
2579		arct_reg_base_init(adev);
2580		adev->sdma.num_instances = 8;
2581		adev->vcn.num_vcn_inst = 2;
2582		adev->gmc.num_umc = 8;
2583		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2584		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2585		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2586		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2587		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2588		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2589		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2590		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2591		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2592		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2593		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2594		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2595		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2596		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2597		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2598		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2599		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2600		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2601		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2602		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2603		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2604		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2605		break;
2606	case CHIP_ALDEBARAN:
2607		aldebaran_reg_base_init(adev);
2608		adev->sdma.num_instances = 5;
2609		adev->vcn.num_vcn_inst = 2;
2610		adev->gmc.num_umc = 4;
2611		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2612		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2613		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2614		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2615		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2616		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2617		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2618		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2619		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2620		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2621		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2622		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2623		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2624		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2625		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2626		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2627		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2628		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2629		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2630		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2631		break;
2632	default:
2633		r = amdgpu_discovery_reg_base_init(adev);
2634		if (r)
2635			return -EINVAL;
2636
2637		amdgpu_discovery_harvest_ip(adev);
2638		amdgpu_discovery_get_gfx_info(adev);
2639		amdgpu_discovery_get_mall_info(adev);
2640		amdgpu_discovery_get_vcn_info(adev);
2641		break;
2642	}
2643
2644	amdgpu_discovery_init_soc_config(adev);
2645	amdgpu_discovery_sysfs_init(adev);
2646
2647	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2648	case IP_VERSION(9, 0, 1):
2649	case IP_VERSION(9, 2, 1):
2650	case IP_VERSION(9, 4, 0):
2651	case IP_VERSION(9, 4, 1):
2652	case IP_VERSION(9, 4, 2):
2653	case IP_VERSION(9, 4, 3):
2654	case IP_VERSION(9, 4, 4):
2655		adev->family = AMDGPU_FAMILY_AI;
2656		break;
2657	case IP_VERSION(9, 1, 0):
2658	case IP_VERSION(9, 2, 2):
2659	case IP_VERSION(9, 3, 0):
2660		adev->family = AMDGPU_FAMILY_RV;
2661		break;
2662	case IP_VERSION(10, 1, 10):
2663	case IP_VERSION(10, 1, 1):
2664	case IP_VERSION(10, 1, 2):
2665	case IP_VERSION(10, 1, 3):
2666	case IP_VERSION(10, 1, 4):
2667	case IP_VERSION(10, 3, 0):
2668	case IP_VERSION(10, 3, 2):
2669	case IP_VERSION(10, 3, 4):
2670	case IP_VERSION(10, 3, 5):
2671		adev->family = AMDGPU_FAMILY_NV;
2672		break;
2673	case IP_VERSION(10, 3, 1):
2674		adev->family = AMDGPU_FAMILY_VGH;
2675		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2676		break;
2677	case IP_VERSION(10, 3, 3):
2678		adev->family = AMDGPU_FAMILY_YC;
2679		break;
2680	case IP_VERSION(10, 3, 6):
2681		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2682		break;
2683	case IP_VERSION(10, 3, 7):
2684		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2685		break;
2686	case IP_VERSION(11, 0, 0):
2687	case IP_VERSION(11, 0, 2):
2688	case IP_VERSION(11, 0, 3):
2689		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2690		break;
2691	case IP_VERSION(11, 0, 1):
2692	case IP_VERSION(11, 0, 4):
2693		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2694		break;
2695	case IP_VERSION(11, 5, 0):
2696	case IP_VERSION(11, 5, 1):
2697	case IP_VERSION(11, 5, 2):
2698		adev->family = AMDGPU_FAMILY_GC_11_5_0;
2699		break;
2700	case IP_VERSION(12, 0, 0):
2701	case IP_VERSION(12, 0, 1):
2702		adev->family = AMDGPU_FAMILY_GC_12_0_0;
2703		break;
2704	default:
2705		return -EINVAL;
2706	}
2707
2708	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2709	case IP_VERSION(9, 1, 0):
2710	case IP_VERSION(9, 2, 2):
2711	case IP_VERSION(9, 3, 0):
2712	case IP_VERSION(10, 1, 3):
2713	case IP_VERSION(10, 1, 4):
2714	case IP_VERSION(10, 3, 1):
2715	case IP_VERSION(10, 3, 3):
2716	case IP_VERSION(10, 3, 6):
2717	case IP_VERSION(10, 3, 7):
2718	case IP_VERSION(11, 0, 1):
2719	case IP_VERSION(11, 0, 4):
2720	case IP_VERSION(11, 5, 0):
2721	case IP_VERSION(11, 5, 1):
2722	case IP_VERSION(11, 5, 2):
2723		adev->flags |= AMD_IS_APU;
2724		break;
2725	default:
2726		break;
2727	}
2728
2729	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
2730		adev->gmc.xgmi.supported = true;
2731
2732	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2733	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
2734		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
2735
2736	/* set NBIO version */
2737	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2738	case IP_VERSION(6, 1, 0):
2739	case IP_VERSION(6, 2, 0):
2740		adev->nbio.funcs = &nbio_v6_1_funcs;
2741		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2742		break;
2743	case IP_VERSION(7, 0, 0):
2744	case IP_VERSION(7, 0, 1):
2745	case IP_VERSION(2, 5, 0):
2746		adev->nbio.funcs = &nbio_v7_0_funcs;
2747		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2748		break;
2749	case IP_VERSION(7, 4, 0):
2750	case IP_VERSION(7, 4, 1):
2751	case IP_VERSION(7, 4, 4):
2752		adev->nbio.funcs = &nbio_v7_4_funcs;
2753		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2754		break;
2755	case IP_VERSION(7, 9, 0):
2756		adev->nbio.funcs = &nbio_v7_9_funcs;
2757		adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2758		break;
2759	case IP_VERSION(7, 11, 0):
2760	case IP_VERSION(7, 11, 1):
2761	case IP_VERSION(7, 11, 3):
2762		adev->nbio.funcs = &nbio_v7_11_funcs;
2763		adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2764		break;
2765	case IP_VERSION(7, 2, 0):
2766	case IP_VERSION(7, 2, 1):
2767	case IP_VERSION(7, 3, 0):
2768	case IP_VERSION(7, 5, 0):
2769	case IP_VERSION(7, 5, 1):
2770		adev->nbio.funcs = &nbio_v7_2_funcs;
2771		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2772		break;
2773	case IP_VERSION(2, 1, 1):
2774	case IP_VERSION(2, 3, 0):
2775	case IP_VERSION(2, 3, 1):
2776	case IP_VERSION(2, 3, 2):
2777	case IP_VERSION(3, 3, 0):
2778	case IP_VERSION(3, 3, 1):
2779	case IP_VERSION(3, 3, 2):
2780	case IP_VERSION(3, 3, 3):
2781		adev->nbio.funcs = &nbio_v2_3_funcs;
2782		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2783		break;
2784	case IP_VERSION(4, 3, 0):
2785	case IP_VERSION(4, 3, 1):
2786		if (amdgpu_sriov_vf(adev))
2787			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2788		else
2789			adev->nbio.funcs = &nbio_v4_3_funcs;
2790		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2791		break;
2792	case IP_VERSION(7, 7, 0):
2793	case IP_VERSION(7, 7, 1):
2794		adev->nbio.funcs = &nbio_v7_7_funcs;
2795		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2796		break;
2797	case IP_VERSION(6, 3, 1):
2798		adev->nbio.funcs = &nbif_v6_3_1_funcs;
2799		adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
2800		break;
2801	default:
2802		break;
2803	}
2804
2805	switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2806	case IP_VERSION(4, 0, 0):
2807	case IP_VERSION(4, 0, 1):
2808	case IP_VERSION(4, 1, 0):
2809	case IP_VERSION(4, 1, 1):
2810	case IP_VERSION(4, 1, 2):
2811	case IP_VERSION(4, 2, 0):
2812	case IP_VERSION(4, 2, 1):
2813	case IP_VERSION(4, 4, 0):
2814	case IP_VERSION(4, 4, 2):
2815	case IP_VERSION(4, 4, 5):
2816		adev->hdp.funcs = &hdp_v4_0_funcs;
2817		break;
2818	case IP_VERSION(5, 0, 0):
2819	case IP_VERSION(5, 0, 1):
2820	case IP_VERSION(5, 0, 2):
2821	case IP_VERSION(5, 0, 3):
2822	case IP_VERSION(5, 0, 4):
2823	case IP_VERSION(5, 2, 0):
2824		adev->hdp.funcs = &hdp_v5_0_funcs;
2825		break;
2826	case IP_VERSION(5, 2, 1):
2827		adev->hdp.funcs = &hdp_v5_2_funcs;
2828		break;
2829	case IP_VERSION(6, 0, 0):
2830	case IP_VERSION(6, 0, 1):
2831	case IP_VERSION(6, 1, 0):
2832		adev->hdp.funcs = &hdp_v6_0_funcs;
2833		break;
2834	case IP_VERSION(7, 0, 0):
2835		adev->hdp.funcs = &hdp_v7_0_funcs;
2836		break;
2837	default:
2838		break;
2839	}
2840
2841	switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2842	case IP_VERSION(3, 6, 0):
2843	case IP_VERSION(3, 6, 1):
2844	case IP_VERSION(3, 6, 2):
2845		adev->df.funcs = &df_v3_6_funcs;
2846		break;
2847	case IP_VERSION(2, 1, 0):
2848	case IP_VERSION(2, 1, 1):
2849	case IP_VERSION(2, 5, 0):
2850	case IP_VERSION(3, 5, 1):
2851	case IP_VERSION(3, 5, 2):
2852		adev->df.funcs = &df_v1_7_funcs;
2853		break;
2854	case IP_VERSION(4, 3, 0):
2855		adev->df.funcs = &df_v4_3_funcs;
2856		break;
2857	case IP_VERSION(4, 6, 2):
2858		adev->df.funcs = &df_v4_6_2_funcs;
2859		break;
2860	case IP_VERSION(4, 15, 0):
2861	case IP_VERSION(4, 15, 1):
2862		adev->df.funcs = &df_v4_15_funcs;
2863		break;
2864	default:
2865		break;
2866	}
2867
2868	switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2869	case IP_VERSION(9, 0, 0):
2870	case IP_VERSION(9, 0, 1):
2871	case IP_VERSION(10, 0, 0):
2872	case IP_VERSION(10, 0, 1):
2873	case IP_VERSION(10, 0, 2):
2874		adev->smuio.funcs = &smuio_v9_0_funcs;
2875		break;
2876	case IP_VERSION(11, 0, 0):
2877	case IP_VERSION(11, 0, 2):
2878	case IP_VERSION(11, 0, 3):
2879	case IP_VERSION(11, 0, 4):
2880	case IP_VERSION(11, 0, 7):
2881	case IP_VERSION(11, 0, 8):
2882		adev->smuio.funcs = &smuio_v11_0_funcs;
2883		break;
2884	case IP_VERSION(11, 0, 6):
2885	case IP_VERSION(11, 0, 10):
2886	case IP_VERSION(11, 0, 11):
2887	case IP_VERSION(11, 5, 0):
2888	case IP_VERSION(13, 0, 1):
2889	case IP_VERSION(13, 0, 9):
2890	case IP_VERSION(13, 0, 10):
2891		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2892		break;
2893	case IP_VERSION(13, 0, 2):
2894		adev->smuio.funcs = &smuio_v13_0_funcs;
2895		break;
2896	case IP_VERSION(13, 0, 3):
2897		adev->smuio.funcs = &smuio_v13_0_3_funcs;
2898		if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2899			adev->flags |= AMD_IS_APU;
2900		}
2901		break;
2902	case IP_VERSION(13, 0, 6):
2903	case IP_VERSION(13, 0, 8):
2904	case IP_VERSION(14, 0, 0):
2905	case IP_VERSION(14, 0, 1):
2906		adev->smuio.funcs = &smuio_v13_0_6_funcs;
2907		break;
2908	case IP_VERSION(14, 0, 2):
2909		adev->smuio.funcs = &smuio_v14_0_2_funcs;
2910		break;
2911	default:
2912		break;
2913	}
2914
2915	switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
2916	case IP_VERSION(6, 0, 0):
2917	case IP_VERSION(6, 0, 1):
2918	case IP_VERSION(6, 0, 2):
2919	case IP_VERSION(6, 0, 3):
2920		adev->lsdma.funcs = &lsdma_v6_0_funcs;
2921		break;
2922	case IP_VERSION(7, 0, 0):
2923	case IP_VERSION(7, 0, 1):
2924		adev->lsdma.funcs = &lsdma_v7_0_funcs;
2925		break;
2926	default:
2927		break;
2928	}
2929
2930	r = amdgpu_discovery_set_common_ip_blocks(adev);
2931	if (r)
2932		return r;
2933
2934	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2935	if (r)
2936		return r;
2937
2938	/* For SR-IOV, PSP needs to be initialized before IH */
2939	if (amdgpu_sriov_vf(adev)) {
2940		r = amdgpu_discovery_set_psp_ip_blocks(adev);
2941		if (r)
2942			return r;
2943		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2944		if (r)
2945			return r;
2946	} else {
2947		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2948		if (r)
2949			return r;
2950
2951		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2952			r = amdgpu_discovery_set_psp_ip_blocks(adev);
2953			if (r)
2954				return r;
2955		}
2956	}
2957
2958	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2959		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2960		if (r)
2961			return r;
2962	}
2963
2964	r = amdgpu_discovery_set_display_ip_blocks(adev);
2965	if (r)
2966		return r;
2967
2968	r = amdgpu_discovery_set_gc_ip_blocks(adev);
2969	if (r)
2970		return r;
2971
2972	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2973	if (r)
2974		return r;
2975
2976	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2977	     !amdgpu_sriov_vf(adev)) ||
2978	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2979		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2980		if (r)
2981			return r;
2982	}
2983
2984	r = amdgpu_discovery_set_mm_ip_blocks(adev);
2985	if (r)
2986		return r;
2987
2988	r = amdgpu_discovery_set_mes_ip_blocks(adev);
2989	if (r)
2990		return r;
2991
2992	r = amdgpu_discovery_set_vpe_ip_blocks(adev);
2993	if (r)
2994		return r;
2995
2996	r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
2997	if (r)
2998		return r;
2999
3000	r = amdgpu_discovery_set_isp_ip_blocks(adev);
3001	if (r)
3002		return r;
3003	return 0;
3004}
3005