Loading...
Note: File does not exist in v4.6.
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25
26#include "amdgpu.h"
27#include "amdgpu_discovery.h"
28#include "soc15_hw_ip.h"
29#include "discovery.h"
30
31#include "soc15.h"
32#include "gfx_v9_0.h"
33#include "gfx_v9_4_3.h"
34#include "gmc_v9_0.h"
35#include "df_v1_7.h"
36#include "df_v3_6.h"
37#include "df_v4_3.h"
38#include "df_v4_6_2.h"
39#include "nbio_v6_1.h"
40#include "nbio_v7_0.h"
41#include "nbio_v7_4.h"
42#include "nbio_v7_9.h"
43#include "nbio_v7_11.h"
44#include "hdp_v4_0.h"
45#include "vega10_ih.h"
46#include "vega20_ih.h"
47#include "sdma_v4_0.h"
48#include "sdma_v4_4_2.h"
49#include "uvd_v7_0.h"
50#include "vce_v4_0.h"
51#include "vcn_v1_0.h"
52#include "vcn_v2_5.h"
53#include "jpeg_v2_5.h"
54#include "smuio_v9_0.h"
55#include "gmc_v10_0.h"
56#include "gmc_v11_0.h"
57#include "gfxhub_v2_0.h"
58#include "mmhub_v2_0.h"
59#include "nbio_v2_3.h"
60#include "nbio_v4_3.h"
61#include "nbio_v7_2.h"
62#include "nbio_v7_7.h"
63#include "hdp_v5_0.h"
64#include "hdp_v5_2.h"
65#include "hdp_v6_0.h"
66#include "nv.h"
67#include "soc21.h"
68#include "navi10_ih.h"
69#include "ih_v6_0.h"
70#include "ih_v6_1.h"
71#include "gfx_v10_0.h"
72#include "gfx_v11_0.h"
73#include "sdma_v5_0.h"
74#include "sdma_v5_2.h"
75#include "sdma_v6_0.h"
76#include "lsdma_v6_0.h"
77#include "vcn_v2_0.h"
78#include "jpeg_v2_0.h"
79#include "vcn_v3_0.h"
80#include "jpeg_v3_0.h"
81#include "vcn_v4_0.h"
82#include "jpeg_v4_0.h"
83#include "vcn_v4_0_3.h"
84#include "jpeg_v4_0_3.h"
85#include "vcn_v4_0_5.h"
86#include "jpeg_v4_0_5.h"
87#include "amdgpu_vkms.h"
88#include "mes_v10_1.h"
89#include "mes_v11_0.h"
90#include "smuio_v11_0.h"
91#include "smuio_v11_0_6.h"
92#include "smuio_v13_0.h"
93#include "smuio_v13_0_3.h"
94#include "smuio_v13_0_6.h"
95
96#include "amdgpu_vpe.h"
97
98#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
99MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
100
101#define mmRCC_CONFIG_MEMSIZE 0xde3
102#define mmMP0_SMN_C2PMSG_33 0x16061
103#define mmMM_INDEX 0x0
104#define mmMM_INDEX_HI 0x6
105#define mmMM_DATA 0x1
106
107static const char *hw_id_names[HW_ID_MAX] = {
108 [MP1_HWID] = "MP1",
109 [MP2_HWID] = "MP2",
110 [THM_HWID] = "THM",
111 [SMUIO_HWID] = "SMUIO",
112 [FUSE_HWID] = "FUSE",
113 [CLKA_HWID] = "CLKA",
114 [PWR_HWID] = "PWR",
115 [GC_HWID] = "GC",
116 [UVD_HWID] = "UVD",
117 [AUDIO_AZ_HWID] = "AUDIO_AZ",
118 [ACP_HWID] = "ACP",
119 [DCI_HWID] = "DCI",
120 [DMU_HWID] = "DMU",
121 [DCO_HWID] = "DCO",
122 [DIO_HWID] = "DIO",
123 [XDMA_HWID] = "XDMA",
124 [DCEAZ_HWID] = "DCEAZ",
125 [DAZ_HWID] = "DAZ",
126 [SDPMUX_HWID] = "SDPMUX",
127 [NTB_HWID] = "NTB",
128 [IOHC_HWID] = "IOHC",
129 [L2IMU_HWID] = "L2IMU",
130 [VCE_HWID] = "VCE",
131 [MMHUB_HWID] = "MMHUB",
132 [ATHUB_HWID] = "ATHUB",
133 [DBGU_NBIO_HWID] = "DBGU_NBIO",
134 [DFX_HWID] = "DFX",
135 [DBGU0_HWID] = "DBGU0",
136 [DBGU1_HWID] = "DBGU1",
137 [OSSSYS_HWID] = "OSSSYS",
138 [HDP_HWID] = "HDP",
139 [SDMA0_HWID] = "SDMA0",
140 [SDMA1_HWID] = "SDMA1",
141 [SDMA2_HWID] = "SDMA2",
142 [SDMA3_HWID] = "SDMA3",
143 [LSDMA_HWID] = "LSDMA",
144 [ISP_HWID] = "ISP",
145 [DBGU_IO_HWID] = "DBGU_IO",
146 [DF_HWID] = "DF",
147 [CLKB_HWID] = "CLKB",
148 [FCH_HWID] = "FCH",
149 [DFX_DAP_HWID] = "DFX_DAP",
150 [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
151 [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
152 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
153 [L1IMU3_HWID] = "L1IMU3",
154 [L1IMU4_HWID] = "L1IMU4",
155 [L1IMU5_HWID] = "L1IMU5",
156 [L1IMU6_HWID] = "L1IMU6",
157 [L1IMU7_HWID] = "L1IMU7",
158 [L1IMU8_HWID] = "L1IMU8",
159 [L1IMU9_HWID] = "L1IMU9",
160 [L1IMU10_HWID] = "L1IMU10",
161 [L1IMU11_HWID] = "L1IMU11",
162 [L1IMU12_HWID] = "L1IMU12",
163 [L1IMU13_HWID] = "L1IMU13",
164 [L1IMU14_HWID] = "L1IMU14",
165 [L1IMU15_HWID] = "L1IMU15",
166 [WAFLC_HWID] = "WAFLC",
167 [FCH_USB_PD_HWID] = "FCH_USB_PD",
168 [PCIE_HWID] = "PCIE",
169 [PCS_HWID] = "PCS",
170 [DDCL_HWID] = "DDCL",
171 [SST_HWID] = "SST",
172 [IOAGR_HWID] = "IOAGR",
173 [NBIF_HWID] = "NBIF",
174 [IOAPIC_HWID] = "IOAPIC",
175 [SYSTEMHUB_HWID] = "SYSTEMHUB",
176 [NTBCCP_HWID] = "NTBCCP",
177 [UMC_HWID] = "UMC",
178 [SATA_HWID] = "SATA",
179 [USB_HWID] = "USB",
180 [CCXSEC_HWID] = "CCXSEC",
181 [XGMI_HWID] = "XGMI",
182 [XGBE_HWID] = "XGBE",
183 [MP0_HWID] = "MP0",
184 [VPE_HWID] = "VPE",
185};
186
187static int hw_id_map[MAX_HWIP] = {
188 [GC_HWIP] = GC_HWID,
189 [HDP_HWIP] = HDP_HWID,
190 [SDMA0_HWIP] = SDMA0_HWID,
191 [SDMA1_HWIP] = SDMA1_HWID,
192 [SDMA2_HWIP] = SDMA2_HWID,
193 [SDMA3_HWIP] = SDMA3_HWID,
194 [LSDMA_HWIP] = LSDMA_HWID,
195 [MMHUB_HWIP] = MMHUB_HWID,
196 [ATHUB_HWIP] = ATHUB_HWID,
197 [NBIO_HWIP] = NBIF_HWID,
198 [MP0_HWIP] = MP0_HWID,
199 [MP1_HWIP] = MP1_HWID,
200 [UVD_HWIP] = UVD_HWID,
201 [VCE_HWIP] = VCE_HWID,
202 [DF_HWIP] = DF_HWID,
203 [DCE_HWIP] = DMU_HWID,
204 [OSSSYS_HWIP] = OSSSYS_HWID,
205 [SMUIO_HWIP] = SMUIO_HWID,
206 [PWR_HWIP] = PWR_HWID,
207 [NBIF_HWIP] = NBIF_HWID,
208 [THM_HWIP] = THM_HWID,
209 [CLK_HWIP] = CLKA_HWID,
210 [UMC_HWIP] = UMC_HWID,
211 [XGMI_HWIP] = XGMI_HWID,
212 [DCI_HWIP] = DCI_HWID,
213 [PCIE_HWIP] = PCIE_HWID,
214 [VPE_HWIP] = VPE_HWID,
215};
216
217static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
218{
219 u64 tmr_offset, tmr_size, pos;
220 void *discv_regn;
221 int ret;
222
223 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
224 if (ret)
225 return ret;
226
227 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
228
229 /* This region is read-only and reserved from system use */
230 discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
231 if (discv_regn) {
232 memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
233 memunmap(discv_regn);
234 return 0;
235 }
236
237 return -ENOENT;
238}
239
240static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
241 uint8_t *binary)
242{
243 uint64_t vram_size;
244 u32 msg;
245 int i, ret = 0;
246
247 /* It can take up to a second for IFWI init to complete on some dGPUs,
248 * but generally it should be in the 60-100ms range. Normally this starts
249 * as soon as the device gets power so by the time the OS loads this has long
250 * completed. However, when a card is hotplugged via e.g., USB4, we need to
251 * wait for this to complete. Once the C2PMSG is updated, we can
252 * continue.
253 */
254 if (dev_is_removable(&adev->pdev->dev)) {
255 for (i = 0; i < 1000; i++) {
256 msg = RREG32(mmMP0_SMN_C2PMSG_33);
257 if (msg & 0x80000000)
258 break;
259 msleep(1);
260 }
261 }
262 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
263
264 if (vram_size) {
265 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
266 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
267 adev->mman.discovery_tmr_size, false);
268 } else {
269 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
270 }
271
272 return ret;
273}
274
275static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
276{
277 const struct firmware *fw;
278 const char *fw_name;
279 int r;
280
281 switch (amdgpu_discovery) {
282 case 2:
283 fw_name = FIRMWARE_IP_DISCOVERY;
284 break;
285 default:
286 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
287 return -EINVAL;
288 }
289
290 r = request_firmware(&fw, fw_name, adev->dev);
291 if (r) {
292 dev_err(adev->dev, "can't load firmware \"%s\"\n",
293 fw_name);
294 return r;
295 }
296
297 memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
298 release_firmware(fw);
299
300 return 0;
301}
302
303static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
304{
305 uint16_t checksum = 0;
306 int i;
307
308 for (i = 0; i < size; i++)
309 checksum += data[i];
310
311 return checksum;
312}
313
314static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
315 uint16_t expected)
316{
317 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
318}
319
320static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
321{
322 struct binary_header *bhdr;
323 bhdr = (struct binary_header *)binary;
324
325 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
326}
327
328static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
329{
330 /*
331 * So far, apply this quirk only on those Navy Flounder boards which
332 * have a bad harvest table of VCN config.
333 */
334 if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
335 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
336 switch (adev->pdev->revision) {
337 case 0xC1:
338 case 0xC2:
339 case 0xC3:
340 case 0xC5:
341 case 0xC7:
342 case 0xCF:
343 case 0xDF:
344 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
345 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
346 break;
347 default:
348 break;
349 }
350 }
351}
352
353static int amdgpu_discovery_init(struct amdgpu_device *adev)
354{
355 struct table_info *info;
356 struct binary_header *bhdr;
357 uint16_t offset;
358 uint16_t size;
359 uint16_t checksum;
360 int r;
361
362 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
363 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
364 if (!adev->mman.discovery_bin)
365 return -ENOMEM;
366
367 /* Read from file if it is the preferred option */
368 if (amdgpu_discovery == 2) {
369 dev_info(adev->dev, "use ip discovery information from file");
370 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
371
372 if (r) {
373 dev_err(adev->dev, "failed to read ip discovery binary from file\n");
374 r = -EINVAL;
375 goto out;
376 }
377
378 } else {
379 r = amdgpu_discovery_read_binary_from_mem(
380 adev, adev->mman.discovery_bin);
381 if (r)
382 goto out;
383 }
384
385 /* check the ip discovery binary signature */
386 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
387 dev_err(adev->dev,
388 "get invalid ip discovery binary signature\n");
389 r = -EINVAL;
390 goto out;
391 }
392
393 bhdr = (struct binary_header *)adev->mman.discovery_bin;
394
395 offset = offsetof(struct binary_header, binary_checksum) +
396 sizeof(bhdr->binary_checksum);
397 size = le16_to_cpu(bhdr->binary_size) - offset;
398 checksum = le16_to_cpu(bhdr->binary_checksum);
399
400 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
401 size, checksum)) {
402 dev_err(adev->dev, "invalid ip discovery binary checksum\n");
403 r = -EINVAL;
404 goto out;
405 }
406
407 info = &bhdr->table_list[IP_DISCOVERY];
408 offset = le16_to_cpu(info->offset);
409 checksum = le16_to_cpu(info->checksum);
410
411 if (offset) {
412 struct ip_discovery_header *ihdr =
413 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
414 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
415 dev_err(adev->dev, "invalid ip discovery data table signature\n");
416 r = -EINVAL;
417 goto out;
418 }
419
420 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
421 le16_to_cpu(ihdr->size), checksum)) {
422 dev_err(adev->dev, "invalid ip discovery data table checksum\n");
423 r = -EINVAL;
424 goto out;
425 }
426 }
427
428 info = &bhdr->table_list[GC];
429 offset = le16_to_cpu(info->offset);
430 checksum = le16_to_cpu(info->checksum);
431
432 if (offset) {
433 struct gpu_info_header *ghdr =
434 (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
435
436 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
437 dev_err(adev->dev, "invalid ip discovery gc table id\n");
438 r = -EINVAL;
439 goto out;
440 }
441
442 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
443 le32_to_cpu(ghdr->size), checksum)) {
444 dev_err(adev->dev, "invalid gc data table checksum\n");
445 r = -EINVAL;
446 goto out;
447 }
448 }
449
450 info = &bhdr->table_list[HARVEST_INFO];
451 offset = le16_to_cpu(info->offset);
452 checksum = le16_to_cpu(info->checksum);
453
454 if (offset) {
455 struct harvest_info_header *hhdr =
456 (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
457
458 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
459 dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
460 r = -EINVAL;
461 goto out;
462 }
463
464 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
465 sizeof(struct harvest_table), checksum)) {
466 dev_err(adev->dev, "invalid harvest data table checksum\n");
467 r = -EINVAL;
468 goto out;
469 }
470 }
471
472 info = &bhdr->table_list[VCN_INFO];
473 offset = le16_to_cpu(info->offset);
474 checksum = le16_to_cpu(info->checksum);
475
476 if (offset) {
477 struct vcn_info_header *vhdr =
478 (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
479
480 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
481 dev_err(adev->dev, "invalid ip discovery vcn table id\n");
482 r = -EINVAL;
483 goto out;
484 }
485
486 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
487 le32_to_cpu(vhdr->size_bytes), checksum)) {
488 dev_err(adev->dev, "invalid vcn data table checksum\n");
489 r = -EINVAL;
490 goto out;
491 }
492 }
493
494 info = &bhdr->table_list[MALL_INFO];
495 offset = le16_to_cpu(info->offset);
496 checksum = le16_to_cpu(info->checksum);
497
498 if (0 && offset) {
499 struct mall_info_header *mhdr =
500 (struct mall_info_header *)(adev->mman.discovery_bin + offset);
501
502 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
503 dev_err(adev->dev, "invalid ip discovery mall table id\n");
504 r = -EINVAL;
505 goto out;
506 }
507
508 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
509 le32_to_cpu(mhdr->size_bytes), checksum)) {
510 dev_err(adev->dev, "invalid mall data table checksum\n");
511 r = -EINVAL;
512 goto out;
513 }
514 }
515
516 return 0;
517
518out:
519 kfree(adev->mman.discovery_bin);
520 adev->mman.discovery_bin = NULL;
521
522 return r;
523}
524
525static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
526
527void amdgpu_discovery_fini(struct amdgpu_device *adev)
528{
529 amdgpu_discovery_sysfs_fini(adev);
530 kfree(adev->mman.discovery_bin);
531 adev->mman.discovery_bin = NULL;
532}
533
534static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
535{
536 if (ip->instance_number >= HWIP_MAX_INSTANCE) {
537 DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
538 ip->instance_number);
539 return -EINVAL;
540 }
541 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
542 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
543 le16_to_cpu(ip->hw_id));
544 return -EINVAL;
545 }
546
547 return 0;
548}
549
550static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
551 uint32_t *vcn_harvest_count)
552{
553 struct binary_header *bhdr;
554 struct ip_discovery_header *ihdr;
555 struct die_header *dhdr;
556 struct ip_v4 *ip;
557 uint16_t die_offset, ip_offset, num_dies, num_ips;
558 int i, j;
559
560 bhdr = (struct binary_header *)adev->mman.discovery_bin;
561 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
562 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
563 num_dies = le16_to_cpu(ihdr->num_dies);
564
565 /* scan harvest bit of all IP data structures */
566 for (i = 0; i < num_dies; i++) {
567 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
568 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
569 num_ips = le16_to_cpu(dhdr->num_ips);
570 ip_offset = die_offset + sizeof(*dhdr);
571
572 for (j = 0; j < num_ips; j++) {
573 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
574
575 if (amdgpu_discovery_validate_ip(ip))
576 goto next_ip;
577
578 if (le16_to_cpu(ip->variant) == 1) {
579 switch (le16_to_cpu(ip->hw_id)) {
580 case VCN_HWID:
581 (*vcn_harvest_count)++;
582 if (ip->instance_number == 0) {
583 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
584 adev->vcn.inst_mask &=
585 ~AMDGPU_VCN_HARVEST_VCN0;
586 adev->jpeg.inst_mask &=
587 ~AMDGPU_VCN_HARVEST_VCN0;
588 } else {
589 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
590 adev->vcn.inst_mask &=
591 ~AMDGPU_VCN_HARVEST_VCN1;
592 adev->jpeg.inst_mask &=
593 ~AMDGPU_VCN_HARVEST_VCN1;
594 }
595 break;
596 case DMU_HWID:
597 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
598 break;
599 default:
600 break;
601 }
602 }
603next_ip:
604 if (ihdr->base_addr_64_bit)
605 ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
606 else
607 ip_offset += struct_size(ip, base_address, ip->num_base_address);
608 }
609 }
610}
611
612static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
613 uint32_t *vcn_harvest_count,
614 uint32_t *umc_harvest_count)
615{
616 struct binary_header *bhdr;
617 struct harvest_table *harvest_info;
618 u16 offset;
619 int i;
620 uint32_t umc_harvest_config = 0;
621
622 bhdr = (struct binary_header *)adev->mman.discovery_bin;
623 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
624
625 if (!offset) {
626 dev_err(adev->dev, "invalid harvest table offset\n");
627 return;
628 }
629
630 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
631
632 for (i = 0; i < 32; i++) {
633 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
634 break;
635
636 switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
637 case VCN_HWID:
638 (*vcn_harvest_count)++;
639 adev->vcn.harvest_config |=
640 (1 << harvest_info->list[i].number_instance);
641 adev->jpeg.harvest_config |=
642 (1 << harvest_info->list[i].number_instance);
643
644 adev->vcn.inst_mask &=
645 ~(1U << harvest_info->list[i].number_instance);
646 adev->jpeg.inst_mask &=
647 ~(1U << harvest_info->list[i].number_instance);
648 break;
649 case DMU_HWID:
650 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
651 break;
652 case UMC_HWID:
653 umc_harvest_config |=
654 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
655 (*umc_harvest_count)++;
656 break;
657 case GC_HWID:
658 adev->gfx.xcc_mask &=
659 ~(1U << harvest_info->list[i].number_instance);
660 break;
661 case SDMA0_HWID:
662 adev->sdma.sdma_mask &=
663 ~(1U << harvest_info->list[i].number_instance);
664 break;
665 default:
666 break;
667 }
668 }
669
670 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
671 ~umc_harvest_config;
672}
673
674/* ================================================== */
675
676struct ip_hw_instance {
677 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
678
679 int hw_id;
680 u8 num_instance;
681 u8 major, minor, revision;
682 u8 harvest;
683
684 int num_base_addresses;
685 u32 base_addr[] __counted_by(num_base_addresses);
686};
687
688struct ip_hw_id {
689 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
690 int hw_id;
691};
692
693struct ip_die_entry {
694 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
695 u16 num_ips;
696};
697
698/* -------------------------------------------------- */
699
700struct ip_hw_instance_attr {
701 struct attribute attr;
702 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
703};
704
705static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
706{
707 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
708}
709
710static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
711{
712 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
713}
714
715static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
716{
717 return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
718}
719
720static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
721{
722 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
723}
724
725static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
726{
727 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
728}
729
730static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
731{
732 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
733}
734
735static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
736{
737 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
738}
739
740static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
741{
742 ssize_t res, at;
743 int ii;
744
745 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
746 /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
747 */
748 if (at + 12 > PAGE_SIZE)
749 break;
750 res = sysfs_emit_at(buf, at, "0x%08X\n",
751 ip_hw_instance->base_addr[ii]);
752 if (res <= 0)
753 break;
754 at += res;
755 }
756
757 return res < 0 ? res : at;
758}
759
760static struct ip_hw_instance_attr ip_hw_attr[] = {
761 __ATTR_RO(hw_id),
762 __ATTR_RO(num_instance),
763 __ATTR_RO(major),
764 __ATTR_RO(minor),
765 __ATTR_RO(revision),
766 __ATTR_RO(harvest),
767 __ATTR_RO(num_base_addresses),
768 __ATTR_RO(base_addr),
769};
770
771static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
772ATTRIBUTE_GROUPS(ip_hw_instance);
773
774#define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
775#define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
776
777static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
778 struct attribute *attr,
779 char *buf)
780{
781 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
782 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
783
784 if (!ip_hw_attr->show)
785 return -EIO;
786
787 return ip_hw_attr->show(ip_hw_instance, buf);
788}
789
790static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
791 .show = ip_hw_instance_attr_show,
792};
793
794static void ip_hw_instance_release(struct kobject *kobj)
795{
796 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
797
798 kfree(ip_hw_instance);
799}
800
801static const struct kobj_type ip_hw_instance_ktype = {
802 .release = ip_hw_instance_release,
803 .sysfs_ops = &ip_hw_instance_sysfs_ops,
804 .default_groups = ip_hw_instance_groups,
805};
806
807/* -------------------------------------------------- */
808
809#define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
810
811static void ip_hw_id_release(struct kobject *kobj)
812{
813 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
814
815 if (!list_empty(&ip_hw_id->hw_id_kset.list))
816 DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
817 kfree(ip_hw_id);
818}
819
820static const struct kobj_type ip_hw_id_ktype = {
821 .release = ip_hw_id_release,
822 .sysfs_ops = &kobj_sysfs_ops,
823};
824
825/* -------------------------------------------------- */
826
827static void die_kobj_release(struct kobject *kobj);
828static void ip_disc_release(struct kobject *kobj);
829
830struct ip_die_entry_attribute {
831 struct attribute attr;
832 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
833};
834
835#define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
836
837static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
838{
839 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
840}
841
842/* If there are more ip_die_entry attrs, other than the number of IPs,
843 * we can make this intro an array of attrs, and then initialize
844 * ip_die_entry_attrs in a loop.
845 */
846static struct ip_die_entry_attribute num_ips_attr =
847 __ATTR_RO(num_ips);
848
849static struct attribute *ip_die_entry_attrs[] = {
850 &num_ips_attr.attr,
851 NULL,
852};
853ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
854
855#define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
856
857static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
858 struct attribute *attr,
859 char *buf)
860{
861 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
862 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
863
864 if (!ip_die_entry_attr->show)
865 return -EIO;
866
867 return ip_die_entry_attr->show(ip_die_entry, buf);
868}
869
870static void ip_die_entry_release(struct kobject *kobj)
871{
872 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
873
874 if (!list_empty(&ip_die_entry->ip_kset.list))
875 DRM_ERROR("ip_die_entry->ip_kset is not empty");
876 kfree(ip_die_entry);
877}
878
879static const struct sysfs_ops ip_die_entry_sysfs_ops = {
880 .show = ip_die_entry_attr_show,
881};
882
883static const struct kobj_type ip_die_entry_ktype = {
884 .release = ip_die_entry_release,
885 .sysfs_ops = &ip_die_entry_sysfs_ops,
886 .default_groups = ip_die_entry_groups,
887};
888
889static const struct kobj_type die_kobj_ktype = {
890 .release = die_kobj_release,
891 .sysfs_ops = &kobj_sysfs_ops,
892};
893
894static const struct kobj_type ip_discovery_ktype = {
895 .release = ip_disc_release,
896 .sysfs_ops = &kobj_sysfs_ops,
897};
898
899struct ip_discovery_top {
900 struct kobject kobj; /* ip_discovery/ */
901 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
902 struct amdgpu_device *adev;
903};
904
905static void die_kobj_release(struct kobject *kobj)
906{
907 struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
908 struct ip_discovery_top,
909 die_kset);
910 if (!list_empty(&ip_top->die_kset.list))
911 DRM_ERROR("ip_top->die_kset is not empty");
912}
913
914static void ip_disc_release(struct kobject *kobj)
915{
916 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
917 kobj);
918 struct amdgpu_device *adev = ip_top->adev;
919
920 adev->ip_top = NULL;
921 kfree(ip_top);
922}
923
924static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
925 uint16_t hw_id, uint8_t inst)
926{
927 uint8_t harvest = 0;
928
929 /* Until a uniform way is figured, get mask based on hwid */
930 switch (hw_id) {
931 case VCN_HWID:
932 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
933 break;
934 case DMU_HWID:
935 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
936 harvest = 0x1;
937 break;
938 case UMC_HWID:
939 /* TODO: It needs another parsing; for now, ignore.*/
940 break;
941 case GC_HWID:
942 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
943 break;
944 case SDMA0_HWID:
945 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
946 break;
947 default:
948 break;
949 }
950
951 return harvest;
952}
953
954static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
955 struct ip_die_entry *ip_die_entry,
956 const size_t _ip_offset, const int num_ips,
957 bool reg_base_64)
958{
959 int ii, jj, kk, res;
960
961 DRM_DEBUG("num_ips:%d", num_ips);
962
963 /* Find all IPs of a given HW ID, and add their instance to
964 * #die/#hw_id/#instance/<attributes>
965 */
966 for (ii = 0; ii < HW_ID_MAX; ii++) {
967 struct ip_hw_id *ip_hw_id = NULL;
968 size_t ip_offset = _ip_offset;
969
970 for (jj = 0; jj < num_ips; jj++) {
971 struct ip_v4 *ip;
972 struct ip_hw_instance *ip_hw_instance;
973
974 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
975 if (amdgpu_discovery_validate_ip(ip) ||
976 le16_to_cpu(ip->hw_id) != ii)
977 goto next_ip;
978
979 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
980
981 /* We have a hw_id match; register the hw
982 * block if not yet registered.
983 */
984 if (!ip_hw_id) {
985 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
986 if (!ip_hw_id)
987 return -ENOMEM;
988 ip_hw_id->hw_id = ii;
989
990 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
991 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
992 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
993 res = kset_register(&ip_hw_id->hw_id_kset);
994 if (res) {
995 DRM_ERROR("Couldn't register ip_hw_id kset");
996 kfree(ip_hw_id);
997 return res;
998 }
999 if (hw_id_names[ii]) {
1000 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1001 &ip_hw_id->hw_id_kset.kobj,
1002 hw_id_names[ii]);
1003 if (res) {
1004 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1005 hw_id_names[ii],
1006 kobject_name(&ip_die_entry->ip_kset.kobj));
1007 }
1008 }
1009 }
1010
1011 /* Now register its instance.
1012 */
1013 ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1014 base_addr,
1015 ip->num_base_address),
1016 GFP_KERNEL);
1017 if (!ip_hw_instance) {
1018 DRM_ERROR("no memory for ip_hw_instance");
1019 return -ENOMEM;
1020 }
1021 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1022 ip_hw_instance->num_instance = ip->instance_number;
1023 ip_hw_instance->major = ip->major;
1024 ip_hw_instance->minor = ip->minor;
1025 ip_hw_instance->revision = ip->revision;
1026 ip_hw_instance->harvest =
1027 amdgpu_discovery_get_harvest_info(
1028 adev, ip_hw_instance->hw_id,
1029 ip_hw_instance->num_instance);
1030 ip_hw_instance->num_base_addresses = ip->num_base_address;
1031
1032 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1033 if (reg_base_64)
1034 ip_hw_instance->base_addr[kk] =
1035 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1036 else
1037 ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1038 }
1039
1040 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1041 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1042 res = kobject_add(&ip_hw_instance->kobj, NULL,
1043 "%d", ip_hw_instance->num_instance);
1044next_ip:
1045 if (reg_base_64)
1046 ip_offset += struct_size(ip, base_address_64,
1047 ip->num_base_address);
1048 else
1049 ip_offset += struct_size(ip, base_address,
1050 ip->num_base_address);
1051 }
1052 }
1053
1054 return 0;
1055}
1056
1057static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1058{
1059 struct binary_header *bhdr;
1060 struct ip_discovery_header *ihdr;
1061 struct die_header *dhdr;
1062 struct kset *die_kset = &adev->ip_top->die_kset;
1063 u16 num_dies, die_offset, num_ips;
1064 size_t ip_offset;
1065 int ii, res;
1066
1067 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1068 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1069 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1070 num_dies = le16_to_cpu(ihdr->num_dies);
1071
1072 DRM_DEBUG("number of dies: %d\n", num_dies);
1073
1074 for (ii = 0; ii < num_dies; ii++) {
1075 struct ip_die_entry *ip_die_entry;
1076
1077 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1078 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1079 num_ips = le16_to_cpu(dhdr->num_ips);
1080 ip_offset = die_offset + sizeof(*dhdr);
1081
1082 /* Add the die to the kset.
1083 *
1084 * dhdr->die_id == ii, which was checked in
1085 * amdgpu_discovery_reg_base_init().
1086 */
1087
1088 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1089 if (!ip_die_entry)
1090 return -ENOMEM;
1091
1092 ip_die_entry->num_ips = num_ips;
1093
1094 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1095 ip_die_entry->ip_kset.kobj.kset = die_kset;
1096 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1097 res = kset_register(&ip_die_entry->ip_kset);
1098 if (res) {
1099 DRM_ERROR("Couldn't register ip_die_entry kset");
1100 kfree(ip_die_entry);
1101 return res;
1102 }
1103
1104 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1105 }
1106
1107 return 0;
1108}
1109
1110static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1111{
1112 struct kset *die_kset;
1113 int res, ii;
1114
1115 if (!adev->mman.discovery_bin)
1116 return -EINVAL;
1117
1118 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1119 if (!adev->ip_top)
1120 return -ENOMEM;
1121
1122 adev->ip_top->adev = adev;
1123
1124 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1125 &adev->dev->kobj, "ip_discovery");
1126 if (res) {
1127 DRM_ERROR("Couldn't init and add ip_discovery/");
1128 goto Err;
1129 }
1130
1131 die_kset = &adev->ip_top->die_kset;
1132 kobject_set_name(&die_kset->kobj, "%s", "die");
1133 die_kset->kobj.parent = &adev->ip_top->kobj;
1134 die_kset->kobj.ktype = &die_kobj_ktype;
1135 res = kset_register(&adev->ip_top->die_kset);
1136 if (res) {
1137 DRM_ERROR("Couldn't register die_kset");
1138 goto Err;
1139 }
1140
1141 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1142 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1143 ip_hw_instance_attrs[ii] = NULL;
1144
1145 res = amdgpu_discovery_sysfs_recurse(adev);
1146
1147 return res;
1148Err:
1149 kobject_put(&adev->ip_top->kobj);
1150 return res;
1151}
1152
1153/* -------------------------------------------------- */
1154
1155#define list_to_kobj(el) container_of(el, struct kobject, entry)
1156
1157static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1158{
1159 struct list_head *el, *tmp;
1160 struct kset *hw_id_kset;
1161
1162 hw_id_kset = &ip_hw_id->hw_id_kset;
1163 spin_lock(&hw_id_kset->list_lock);
1164 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1165 list_del_init(el);
1166 spin_unlock(&hw_id_kset->list_lock);
1167 /* kobject is embedded in ip_hw_instance */
1168 kobject_put(list_to_kobj(el));
1169 spin_lock(&hw_id_kset->list_lock);
1170 }
1171 spin_unlock(&hw_id_kset->list_lock);
1172 kobject_put(&ip_hw_id->hw_id_kset.kobj);
1173}
1174
1175static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1176{
1177 struct list_head *el, *tmp;
1178 struct kset *ip_kset;
1179
1180 ip_kset = &ip_die_entry->ip_kset;
1181 spin_lock(&ip_kset->list_lock);
1182 list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1183 list_del_init(el);
1184 spin_unlock(&ip_kset->list_lock);
1185 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1186 spin_lock(&ip_kset->list_lock);
1187 }
1188 spin_unlock(&ip_kset->list_lock);
1189 kobject_put(&ip_die_entry->ip_kset.kobj);
1190}
1191
1192static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1193{
1194 struct list_head *el, *tmp;
1195 struct kset *die_kset;
1196
1197 die_kset = &adev->ip_top->die_kset;
1198 spin_lock(&die_kset->list_lock);
1199 list_for_each_prev_safe(el, tmp, &die_kset->list) {
1200 list_del_init(el);
1201 spin_unlock(&die_kset->list_lock);
1202 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1203 spin_lock(&die_kset->list_lock);
1204 }
1205 spin_unlock(&die_kset->list_lock);
1206 kobject_put(&adev->ip_top->die_kset.kobj);
1207 kobject_put(&adev->ip_top->kobj);
1208}
1209
1210/* ================================================== */
1211
1212static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1213{
1214 uint8_t num_base_address, subrev, variant;
1215 struct binary_header *bhdr;
1216 struct ip_discovery_header *ihdr;
1217 struct die_header *dhdr;
1218 struct ip_v4 *ip;
1219 uint16_t die_offset;
1220 uint16_t ip_offset;
1221 uint16_t num_dies;
1222 uint16_t num_ips;
1223 int hw_ip;
1224 int i, j, k;
1225 int r;
1226
1227 r = amdgpu_discovery_init(adev);
1228 if (r) {
1229 DRM_ERROR("amdgpu_discovery_init failed\n");
1230 return r;
1231 }
1232
1233 adev->gfx.xcc_mask = 0;
1234 adev->sdma.sdma_mask = 0;
1235 adev->vcn.inst_mask = 0;
1236 adev->jpeg.inst_mask = 0;
1237 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1238 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1239 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1240 num_dies = le16_to_cpu(ihdr->num_dies);
1241
1242 DRM_DEBUG("number of dies: %d\n", num_dies);
1243
1244 for (i = 0; i < num_dies; i++) {
1245 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1246 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1247 num_ips = le16_to_cpu(dhdr->num_ips);
1248 ip_offset = die_offset + sizeof(*dhdr);
1249
1250 if (le16_to_cpu(dhdr->die_id) != i) {
1251 DRM_ERROR("invalid die id %d, expected %d\n",
1252 le16_to_cpu(dhdr->die_id), i);
1253 return -EINVAL;
1254 }
1255
1256 DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1257 le16_to_cpu(dhdr->die_id), num_ips);
1258
1259 for (j = 0; j < num_ips; j++) {
1260 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1261
1262 if (amdgpu_discovery_validate_ip(ip))
1263 goto next_ip;
1264
1265 num_base_address = ip->num_base_address;
1266
1267 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1268 hw_id_names[le16_to_cpu(ip->hw_id)],
1269 le16_to_cpu(ip->hw_id),
1270 ip->instance_number,
1271 ip->major, ip->minor,
1272 ip->revision);
1273
1274 if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1275 /* Bit [5:0]: original revision value
1276 * Bit [7:6]: en/decode capability:
1277 * 0b00 : VCN function normally
1278 * 0b10 : encode is disabled
1279 * 0b01 : decode is disabled
1280 */
1281 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1282 ip->revision & 0xc0;
1283 ip->revision &= ~0xc0;
1284 if (adev->vcn.num_vcn_inst <
1285 AMDGPU_MAX_VCN_INSTANCES) {
1286 adev->vcn.num_vcn_inst++;
1287 adev->vcn.inst_mask |=
1288 (1U << ip->instance_number);
1289 adev->jpeg.inst_mask |=
1290 (1U << ip->instance_number);
1291 } else {
1292 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1293 adev->vcn.num_vcn_inst + 1,
1294 AMDGPU_MAX_VCN_INSTANCES);
1295 }
1296 }
1297 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1298 le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1299 le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1300 le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1301 if (adev->sdma.num_instances <
1302 AMDGPU_MAX_SDMA_INSTANCES) {
1303 adev->sdma.num_instances++;
1304 adev->sdma.sdma_mask |=
1305 (1U << ip->instance_number);
1306 } else {
1307 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1308 adev->sdma.num_instances + 1,
1309 AMDGPU_MAX_SDMA_INSTANCES);
1310 }
1311 }
1312
1313 if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1314 adev->gmc.num_umc++;
1315 adev->umc.node_inst_num++;
1316 }
1317
1318 if (le16_to_cpu(ip->hw_id) == GC_HWID)
1319 adev->gfx.xcc_mask |=
1320 (1U << ip->instance_number);
1321
1322 for (k = 0; k < num_base_address; k++) {
1323 /*
1324 * convert the endianness of base addresses in place,
1325 * so that we don't need to convert them when accessing adev->reg_offset.
1326 */
1327 if (ihdr->base_addr_64_bit)
1328 /* Truncate the 64bit base address from ip discovery
1329 * and only store lower 32bit ip base in reg_offset[].
1330 * Bits > 32 follows ASIC specific format, thus just
1331 * discard them and handle it within specific ASIC.
1332 * By this way reg_offset[] and related helpers can
1333 * stay unchanged.
1334 * The base address is in dwords, thus clear the
1335 * highest 2 bits to store.
1336 */
1337 ip->base_address[k] =
1338 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1339 else
1340 ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1341 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1342 }
1343
1344 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1345 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1346 hw_id_map[hw_ip] != 0) {
1347 DRM_DEBUG("set register base offset for %s\n",
1348 hw_id_names[le16_to_cpu(ip->hw_id)]);
1349 adev->reg_offset[hw_ip][ip->instance_number] =
1350 ip->base_address;
1351 /* Instance support is somewhat inconsistent.
1352 * SDMA is a good example. Sienna cichlid has 4 total
1353 * SDMA instances, each enumerated separately (HWIDs
1354 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
1355 * but they are enumerated as multiple instances of the
1356 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
1357 * example. On most chips there are multiple instances
1358 * with the same HWID.
1359 */
1360
1361 if (ihdr->version < 3) {
1362 subrev = 0;
1363 variant = 0;
1364 } else {
1365 subrev = ip->sub_revision;
1366 variant = ip->variant;
1367 }
1368
1369 adev->ip_versions[hw_ip]
1370 [ip->instance_number] =
1371 IP_VERSION_FULL(ip->major,
1372 ip->minor,
1373 ip->revision,
1374 variant,
1375 subrev);
1376 }
1377 }
1378
1379next_ip:
1380 if (ihdr->base_addr_64_bit)
1381 ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1382 else
1383 ip_offset += struct_size(ip, base_address, ip->num_base_address);
1384 }
1385 }
1386
1387 return 0;
1388}
1389
1390static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1391{
1392 int vcn_harvest_count = 0;
1393 int umc_harvest_count = 0;
1394
1395 /*
1396 * Harvest table does not fit Navi1x and legacy GPUs,
1397 * so read harvest bit per IP data structure to set
1398 * harvest configuration.
1399 */
1400 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1401 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) {
1402 if ((adev->pdev->device == 0x731E &&
1403 (adev->pdev->revision == 0xC6 ||
1404 adev->pdev->revision == 0xC7)) ||
1405 (adev->pdev->device == 0x7340 &&
1406 adev->pdev->revision == 0xC9) ||
1407 (adev->pdev->device == 0x7360 &&
1408 adev->pdev->revision == 0xC7))
1409 amdgpu_discovery_read_harvest_bit_per_ip(adev,
1410 &vcn_harvest_count);
1411 } else {
1412 amdgpu_discovery_read_from_harvest_table(adev,
1413 &vcn_harvest_count,
1414 &umc_harvest_count);
1415 }
1416
1417 amdgpu_discovery_harvest_config_quirk(adev);
1418
1419 if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1420 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1421 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1422 }
1423
1424 if (umc_harvest_count < adev->gmc.num_umc) {
1425 adev->gmc.num_umc -= umc_harvest_count;
1426 }
1427}
1428
1429union gc_info {
1430 struct gc_info_v1_0 v1;
1431 struct gc_info_v1_1 v1_1;
1432 struct gc_info_v1_2 v1_2;
1433 struct gc_info_v2_0 v2;
1434 struct gc_info_v2_1 v2_1;
1435};
1436
1437static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1438{
1439 struct binary_header *bhdr;
1440 union gc_info *gc_info;
1441 u16 offset;
1442
1443 if (!adev->mman.discovery_bin) {
1444 DRM_ERROR("ip discovery uninitialized\n");
1445 return -EINVAL;
1446 }
1447
1448 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1449 offset = le16_to_cpu(bhdr->table_list[GC].offset);
1450
1451 if (!offset)
1452 return 0;
1453
1454 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1455
1456 switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1457 case 1:
1458 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1459 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1460 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1461 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1462 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1463 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1464 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1465 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1466 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1467 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1468 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1469 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1470 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1471 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1472 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1473 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1474 le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1475 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1476 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1477 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1478 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1479 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1480 }
1481 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1482 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1483 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1484 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1485 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1486 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1487 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1488 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1489 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1490 }
1491 break;
1492 case 2:
1493 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1494 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1495 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1496 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1497 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1498 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1499 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1500 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1501 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1502 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1503 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1504 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1505 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1506 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1507 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1508 le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1509 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1510 if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1511 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1512 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1513 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1514 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1515 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1516 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1517 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1518 }
1519 break;
1520 default:
1521 dev_err(adev->dev,
1522 "Unhandled GC info table %d.%d\n",
1523 le16_to_cpu(gc_info->v1.header.version_major),
1524 le16_to_cpu(gc_info->v1.header.version_minor));
1525 return -EINVAL;
1526 }
1527 return 0;
1528}
1529
1530union mall_info {
1531 struct mall_info_v1_0 v1;
1532 struct mall_info_v2_0 v2;
1533};
1534
1535static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1536{
1537 struct binary_header *bhdr;
1538 union mall_info *mall_info;
1539 u32 u, mall_size_per_umc, m_s_present, half_use;
1540 u64 mall_size;
1541 u16 offset;
1542
1543 if (!adev->mman.discovery_bin) {
1544 DRM_ERROR("ip discovery uninitialized\n");
1545 return -EINVAL;
1546 }
1547
1548 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1549 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1550
1551 if (!offset)
1552 return 0;
1553
1554 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1555
1556 switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1557 case 1:
1558 mall_size = 0;
1559 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1560 m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1561 half_use = le32_to_cpu(mall_info->v1.m_half_use);
1562 for (u = 0; u < adev->gmc.num_umc; u++) {
1563 if (m_s_present & (1 << u))
1564 mall_size += mall_size_per_umc * 2;
1565 else if (half_use & (1 << u))
1566 mall_size += mall_size_per_umc / 2;
1567 else
1568 mall_size += mall_size_per_umc;
1569 }
1570 adev->gmc.mall_size = mall_size;
1571 adev->gmc.m_half_use = half_use;
1572 break;
1573 case 2:
1574 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1575 adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
1576 break;
1577 default:
1578 dev_err(adev->dev,
1579 "Unhandled MALL info table %d.%d\n",
1580 le16_to_cpu(mall_info->v1.header.version_major),
1581 le16_to_cpu(mall_info->v1.header.version_minor));
1582 return -EINVAL;
1583 }
1584 return 0;
1585}
1586
1587union vcn_info {
1588 struct vcn_info_v1_0 v1;
1589};
1590
1591static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1592{
1593 struct binary_header *bhdr;
1594 union vcn_info *vcn_info;
1595 u16 offset;
1596 int v;
1597
1598 if (!adev->mman.discovery_bin) {
1599 DRM_ERROR("ip discovery uninitialized\n");
1600 return -EINVAL;
1601 }
1602
1603 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1604 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1605 * but that may change in the future with new GPUs so keep this
1606 * check for defensive purposes.
1607 */
1608 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1609 dev_err(adev->dev, "invalid vcn instances\n");
1610 return -EINVAL;
1611 }
1612
1613 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1614 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1615
1616 if (!offset)
1617 return 0;
1618
1619 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1620
1621 switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1622 case 1:
1623 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1624 * so this won't overflow.
1625 */
1626 for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1627 adev->vcn.vcn_codec_disable_mask[v] =
1628 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1629 }
1630 break;
1631 default:
1632 dev_err(adev->dev,
1633 "Unhandled VCN info table %d.%d\n",
1634 le16_to_cpu(vcn_info->v1.header.version_major),
1635 le16_to_cpu(vcn_info->v1.header.version_minor));
1636 return -EINVAL;
1637 }
1638 return 0;
1639}
1640
1641static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1642{
1643 /* what IP to use for this? */
1644 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1645 case IP_VERSION(9, 0, 1):
1646 case IP_VERSION(9, 1, 0):
1647 case IP_VERSION(9, 2, 1):
1648 case IP_VERSION(9, 2, 2):
1649 case IP_VERSION(9, 3, 0):
1650 case IP_VERSION(9, 4, 0):
1651 case IP_VERSION(9, 4, 1):
1652 case IP_VERSION(9, 4, 2):
1653 case IP_VERSION(9, 4, 3):
1654 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1655 break;
1656 case IP_VERSION(10, 1, 10):
1657 case IP_VERSION(10, 1, 1):
1658 case IP_VERSION(10, 1, 2):
1659 case IP_VERSION(10, 1, 3):
1660 case IP_VERSION(10, 1, 4):
1661 case IP_VERSION(10, 3, 0):
1662 case IP_VERSION(10, 3, 1):
1663 case IP_VERSION(10, 3, 2):
1664 case IP_VERSION(10, 3, 3):
1665 case IP_VERSION(10, 3, 4):
1666 case IP_VERSION(10, 3, 5):
1667 case IP_VERSION(10, 3, 6):
1668 case IP_VERSION(10, 3, 7):
1669 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1670 break;
1671 case IP_VERSION(11, 0, 0):
1672 case IP_VERSION(11, 0, 1):
1673 case IP_VERSION(11, 0, 2):
1674 case IP_VERSION(11, 0, 3):
1675 case IP_VERSION(11, 0, 4):
1676 case IP_VERSION(11, 5, 0):
1677 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1678 break;
1679 default:
1680 dev_err(adev->dev,
1681 "Failed to add common ip block(GC_HWIP:0x%x)\n",
1682 amdgpu_ip_version(adev, GC_HWIP, 0));
1683 return -EINVAL;
1684 }
1685 return 0;
1686}
1687
1688static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1689{
1690 /* use GC or MMHUB IP version */
1691 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1692 case IP_VERSION(9, 0, 1):
1693 case IP_VERSION(9, 1, 0):
1694 case IP_VERSION(9, 2, 1):
1695 case IP_VERSION(9, 2, 2):
1696 case IP_VERSION(9, 3, 0):
1697 case IP_VERSION(9, 4, 0):
1698 case IP_VERSION(9, 4, 1):
1699 case IP_VERSION(9, 4, 2):
1700 case IP_VERSION(9, 4, 3):
1701 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1702 break;
1703 case IP_VERSION(10, 1, 10):
1704 case IP_VERSION(10, 1, 1):
1705 case IP_VERSION(10, 1, 2):
1706 case IP_VERSION(10, 1, 3):
1707 case IP_VERSION(10, 1, 4):
1708 case IP_VERSION(10, 3, 0):
1709 case IP_VERSION(10, 3, 1):
1710 case IP_VERSION(10, 3, 2):
1711 case IP_VERSION(10, 3, 3):
1712 case IP_VERSION(10, 3, 4):
1713 case IP_VERSION(10, 3, 5):
1714 case IP_VERSION(10, 3, 6):
1715 case IP_VERSION(10, 3, 7):
1716 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1717 break;
1718 case IP_VERSION(11, 0, 0):
1719 case IP_VERSION(11, 0, 1):
1720 case IP_VERSION(11, 0, 2):
1721 case IP_VERSION(11, 0, 3):
1722 case IP_VERSION(11, 0, 4):
1723 case IP_VERSION(11, 5, 0):
1724 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1725 break;
1726 default:
1727 dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1728 amdgpu_ip_version(adev, GC_HWIP, 0));
1729 return -EINVAL;
1730 }
1731 return 0;
1732}
1733
1734static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1735{
1736 switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1737 case IP_VERSION(4, 0, 0):
1738 case IP_VERSION(4, 0, 1):
1739 case IP_VERSION(4, 1, 0):
1740 case IP_VERSION(4, 1, 1):
1741 case IP_VERSION(4, 3, 0):
1742 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1743 break;
1744 case IP_VERSION(4, 2, 0):
1745 case IP_VERSION(4, 2, 1):
1746 case IP_VERSION(4, 4, 0):
1747 case IP_VERSION(4, 4, 2):
1748 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1749 break;
1750 case IP_VERSION(5, 0, 0):
1751 case IP_VERSION(5, 0, 1):
1752 case IP_VERSION(5, 0, 2):
1753 case IP_VERSION(5, 0, 3):
1754 case IP_VERSION(5, 2, 0):
1755 case IP_VERSION(5, 2, 1):
1756 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1757 break;
1758 case IP_VERSION(6, 0, 0):
1759 case IP_VERSION(6, 0, 1):
1760 case IP_VERSION(6, 0, 2):
1761 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1762 break;
1763 case IP_VERSION(6, 1, 0):
1764 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1765 break;
1766 default:
1767 dev_err(adev->dev,
1768 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1769 amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
1770 return -EINVAL;
1771 }
1772 return 0;
1773}
1774
1775static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1776{
1777 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1778 case IP_VERSION(9, 0, 0):
1779 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1780 break;
1781 case IP_VERSION(10, 0, 0):
1782 case IP_VERSION(10, 0, 1):
1783 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1784 break;
1785 case IP_VERSION(11, 0, 0):
1786 case IP_VERSION(11, 0, 2):
1787 case IP_VERSION(11, 0, 4):
1788 case IP_VERSION(11, 0, 5):
1789 case IP_VERSION(11, 0, 9):
1790 case IP_VERSION(11, 0, 7):
1791 case IP_VERSION(11, 0, 11):
1792 case IP_VERSION(11, 0, 12):
1793 case IP_VERSION(11, 0, 13):
1794 case IP_VERSION(11, 5, 0):
1795 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1796 break;
1797 case IP_VERSION(11, 0, 8):
1798 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1799 break;
1800 case IP_VERSION(11, 0, 3):
1801 case IP_VERSION(12, 0, 1):
1802 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1803 break;
1804 case IP_VERSION(13, 0, 0):
1805 case IP_VERSION(13, 0, 1):
1806 case IP_VERSION(13, 0, 2):
1807 case IP_VERSION(13, 0, 3):
1808 case IP_VERSION(13, 0, 5):
1809 case IP_VERSION(13, 0, 6):
1810 case IP_VERSION(13, 0, 7):
1811 case IP_VERSION(13, 0, 8):
1812 case IP_VERSION(13, 0, 10):
1813 case IP_VERSION(13, 0, 11):
1814 case IP_VERSION(14, 0, 0):
1815 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1816 break;
1817 case IP_VERSION(13, 0, 4):
1818 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1819 break;
1820 default:
1821 dev_err(adev->dev,
1822 "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1823 amdgpu_ip_version(adev, MP0_HWIP, 0));
1824 return -EINVAL;
1825 }
1826 return 0;
1827}
1828
1829static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1830{
1831 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1832 case IP_VERSION(9, 0, 0):
1833 case IP_VERSION(10, 0, 0):
1834 case IP_VERSION(10, 0, 1):
1835 case IP_VERSION(11, 0, 2):
1836 if (adev->asic_type == CHIP_ARCTURUS)
1837 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1838 else
1839 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1840 break;
1841 case IP_VERSION(11, 0, 0):
1842 case IP_VERSION(11, 0, 5):
1843 case IP_VERSION(11, 0, 9):
1844 case IP_VERSION(11, 0, 7):
1845 case IP_VERSION(11, 0, 8):
1846 case IP_VERSION(11, 0, 11):
1847 case IP_VERSION(11, 0, 12):
1848 case IP_VERSION(11, 0, 13):
1849 case IP_VERSION(11, 5, 0):
1850 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1851 break;
1852 case IP_VERSION(12, 0, 0):
1853 case IP_VERSION(12, 0, 1):
1854 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1855 break;
1856 case IP_VERSION(13, 0, 0):
1857 case IP_VERSION(13, 0, 1):
1858 case IP_VERSION(13, 0, 2):
1859 case IP_VERSION(13, 0, 3):
1860 case IP_VERSION(13, 0, 4):
1861 case IP_VERSION(13, 0, 5):
1862 case IP_VERSION(13, 0, 6):
1863 case IP_VERSION(13, 0, 7):
1864 case IP_VERSION(13, 0, 8):
1865 case IP_VERSION(13, 0, 10):
1866 case IP_VERSION(13, 0, 11):
1867 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1868 break;
1869 case IP_VERSION(14, 0, 0):
1870 amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
1871 break;
1872 default:
1873 dev_err(adev->dev,
1874 "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1875 amdgpu_ip_version(adev, MP1_HWIP, 0));
1876 return -EINVAL;
1877 }
1878 return 0;
1879}
1880
1881#if defined(CONFIG_DRM_AMD_DC)
1882static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
1883{
1884 amdgpu_device_set_sriov_virtual_display(adev);
1885 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1886}
1887#endif
1888
1889static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1890{
1891 if (adev->enable_virtual_display) {
1892 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1893 return 0;
1894 }
1895
1896 if (!amdgpu_device_has_dc_support(adev))
1897 return 0;
1898
1899#if defined(CONFIG_DRM_AMD_DC)
1900 if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1901 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1902 case IP_VERSION(1, 0, 0):
1903 case IP_VERSION(1, 0, 1):
1904 case IP_VERSION(2, 0, 2):
1905 case IP_VERSION(2, 0, 0):
1906 case IP_VERSION(2, 0, 3):
1907 case IP_VERSION(2, 1, 0):
1908 case IP_VERSION(3, 0, 0):
1909 case IP_VERSION(3, 0, 2):
1910 case IP_VERSION(3, 0, 3):
1911 case IP_VERSION(3, 0, 1):
1912 case IP_VERSION(3, 1, 2):
1913 case IP_VERSION(3, 1, 3):
1914 case IP_VERSION(3, 1, 4):
1915 case IP_VERSION(3, 1, 5):
1916 case IP_VERSION(3, 1, 6):
1917 case IP_VERSION(3, 2, 0):
1918 case IP_VERSION(3, 2, 1):
1919 case IP_VERSION(3, 5, 0):
1920 if (amdgpu_sriov_vf(adev))
1921 amdgpu_discovery_set_sriov_display(adev);
1922 else
1923 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1924 break;
1925 default:
1926 dev_err(adev->dev,
1927 "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1928 amdgpu_ip_version(adev, DCE_HWIP, 0));
1929 return -EINVAL;
1930 }
1931 } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
1932 switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
1933 case IP_VERSION(12, 0, 0):
1934 case IP_VERSION(12, 0, 1):
1935 case IP_VERSION(12, 1, 0):
1936 if (amdgpu_sriov_vf(adev))
1937 amdgpu_discovery_set_sriov_display(adev);
1938 else
1939 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1940 break;
1941 default:
1942 dev_err(adev->dev,
1943 "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1944 amdgpu_ip_version(adev, DCI_HWIP, 0));
1945 return -EINVAL;
1946 }
1947 }
1948#endif
1949 return 0;
1950}
1951
1952static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1953{
1954 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1955 case IP_VERSION(9, 0, 1):
1956 case IP_VERSION(9, 1, 0):
1957 case IP_VERSION(9, 2, 1):
1958 case IP_VERSION(9, 2, 2):
1959 case IP_VERSION(9, 3, 0):
1960 case IP_VERSION(9, 4, 0):
1961 case IP_VERSION(9, 4, 1):
1962 case IP_VERSION(9, 4, 2):
1963 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1964 break;
1965 case IP_VERSION(9, 4, 3):
1966 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
1967 break;
1968 case IP_VERSION(10, 1, 10):
1969 case IP_VERSION(10, 1, 2):
1970 case IP_VERSION(10, 1, 1):
1971 case IP_VERSION(10, 1, 3):
1972 case IP_VERSION(10, 1, 4):
1973 case IP_VERSION(10, 3, 0):
1974 case IP_VERSION(10, 3, 2):
1975 case IP_VERSION(10, 3, 1):
1976 case IP_VERSION(10, 3, 4):
1977 case IP_VERSION(10, 3, 5):
1978 case IP_VERSION(10, 3, 6):
1979 case IP_VERSION(10, 3, 3):
1980 case IP_VERSION(10, 3, 7):
1981 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
1982 break;
1983 case IP_VERSION(11, 0, 0):
1984 case IP_VERSION(11, 0, 1):
1985 case IP_VERSION(11, 0, 2):
1986 case IP_VERSION(11, 0, 3):
1987 case IP_VERSION(11, 0, 4):
1988 case IP_VERSION(11, 5, 0):
1989 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
1990 break;
1991 default:
1992 dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
1993 amdgpu_ip_version(adev, GC_HWIP, 0));
1994 return -EINVAL;
1995 }
1996 return 0;
1997}
1998
1999static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2000{
2001 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2002 case IP_VERSION(4, 0, 0):
2003 case IP_VERSION(4, 0, 1):
2004 case IP_VERSION(4, 1, 0):
2005 case IP_VERSION(4, 1, 1):
2006 case IP_VERSION(4, 1, 2):
2007 case IP_VERSION(4, 2, 0):
2008 case IP_VERSION(4, 2, 2):
2009 case IP_VERSION(4, 4, 0):
2010 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2011 break;
2012 case IP_VERSION(4, 4, 2):
2013 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2014 break;
2015 case IP_VERSION(5, 0, 0):
2016 case IP_VERSION(5, 0, 1):
2017 case IP_VERSION(5, 0, 2):
2018 case IP_VERSION(5, 0, 5):
2019 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2020 break;
2021 case IP_VERSION(5, 2, 0):
2022 case IP_VERSION(5, 2, 2):
2023 case IP_VERSION(5, 2, 4):
2024 case IP_VERSION(5, 2, 5):
2025 case IP_VERSION(5, 2, 6):
2026 case IP_VERSION(5, 2, 3):
2027 case IP_VERSION(5, 2, 1):
2028 case IP_VERSION(5, 2, 7):
2029 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2030 break;
2031 case IP_VERSION(6, 0, 0):
2032 case IP_VERSION(6, 0, 1):
2033 case IP_VERSION(6, 0, 2):
2034 case IP_VERSION(6, 0, 3):
2035 case IP_VERSION(6, 1, 0):
2036 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2037 break;
2038 default:
2039 dev_err(adev->dev,
2040 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2041 amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2042 return -EINVAL;
2043 }
2044 return 0;
2045}
2046
2047static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2048{
2049 if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2050 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2051 case IP_VERSION(7, 0, 0):
2052 case IP_VERSION(7, 2, 0):
2053 /* UVD is not supported on vega20 SR-IOV */
2054 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2055 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2056 break;
2057 default:
2058 dev_err(adev->dev,
2059 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2060 amdgpu_ip_version(adev, UVD_HWIP, 0));
2061 return -EINVAL;
2062 }
2063 switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2064 case IP_VERSION(4, 0, 0):
2065 case IP_VERSION(4, 1, 0):
2066 /* VCE is not supported on vega20 SR-IOV */
2067 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2068 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2069 break;
2070 default:
2071 dev_err(adev->dev,
2072 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2073 amdgpu_ip_version(adev, VCE_HWIP, 0));
2074 return -EINVAL;
2075 }
2076 } else {
2077 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2078 case IP_VERSION(1, 0, 0):
2079 case IP_VERSION(1, 0, 1):
2080 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2081 break;
2082 case IP_VERSION(2, 0, 0):
2083 case IP_VERSION(2, 0, 2):
2084 case IP_VERSION(2, 2, 0):
2085 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2086 if (!amdgpu_sriov_vf(adev))
2087 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2088 break;
2089 case IP_VERSION(2, 0, 3):
2090 break;
2091 case IP_VERSION(2, 5, 0):
2092 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2093 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2094 break;
2095 case IP_VERSION(2, 6, 0):
2096 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2097 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2098 break;
2099 case IP_VERSION(3, 0, 0):
2100 case IP_VERSION(3, 0, 16):
2101 case IP_VERSION(3, 1, 1):
2102 case IP_VERSION(3, 1, 2):
2103 case IP_VERSION(3, 0, 2):
2104 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2105 if (!amdgpu_sriov_vf(adev))
2106 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2107 break;
2108 case IP_VERSION(3, 0, 33):
2109 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2110 break;
2111 case IP_VERSION(4, 0, 0):
2112 case IP_VERSION(4, 0, 2):
2113 case IP_VERSION(4, 0, 4):
2114 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2115 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2116 break;
2117 case IP_VERSION(4, 0, 3):
2118 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2119 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2120 break;
2121 case IP_VERSION(4, 0, 5):
2122 amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2123 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2124 break;
2125 default:
2126 dev_err(adev->dev,
2127 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2128 amdgpu_ip_version(adev, UVD_HWIP, 0));
2129 return -EINVAL;
2130 }
2131 }
2132 return 0;
2133}
2134
2135static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2136{
2137 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2138 case IP_VERSION(10, 1, 10):
2139 case IP_VERSION(10, 1, 1):
2140 case IP_VERSION(10, 1, 2):
2141 case IP_VERSION(10, 1, 3):
2142 case IP_VERSION(10, 1, 4):
2143 case IP_VERSION(10, 3, 0):
2144 case IP_VERSION(10, 3, 1):
2145 case IP_VERSION(10, 3, 2):
2146 case IP_VERSION(10, 3, 3):
2147 case IP_VERSION(10, 3, 4):
2148 case IP_VERSION(10, 3, 5):
2149 case IP_VERSION(10, 3, 6):
2150 if (amdgpu_mes) {
2151 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
2152 adev->enable_mes = true;
2153 if (amdgpu_mes_kiq)
2154 adev->enable_mes_kiq = true;
2155 }
2156 break;
2157 case IP_VERSION(11, 0, 0):
2158 case IP_VERSION(11, 0, 1):
2159 case IP_VERSION(11, 0, 2):
2160 case IP_VERSION(11, 0, 3):
2161 case IP_VERSION(11, 0, 4):
2162 case IP_VERSION(11, 5, 0):
2163 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2164 adev->enable_mes = true;
2165 adev->enable_mes_kiq = true;
2166 break;
2167 default:
2168 break;
2169 }
2170 return 0;
2171}
2172
2173static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2174{
2175 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2176 case IP_VERSION(9, 4, 3):
2177 aqua_vanjaram_init_soc_config(adev);
2178 break;
2179 default:
2180 break;
2181 }
2182}
2183
2184static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2185{
2186 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2187 case IP_VERSION(6, 1, 0):
2188 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2189 break;
2190 default:
2191 break;
2192 }
2193
2194 return 0;
2195}
2196
2197static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2198{
2199 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2200 case IP_VERSION(4, 0, 5):
2201 if (amdgpu_umsch_mm & 0x1) {
2202 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2203 adev->enable_umsch_mm = true;
2204 }
2205 break;
2206 default:
2207 break;
2208 }
2209
2210 return 0;
2211}
2212
2213int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2214{
2215 int r;
2216
2217 switch (adev->asic_type) {
2218 case CHIP_VEGA10:
2219 vega10_reg_base_init(adev);
2220 adev->sdma.num_instances = 2;
2221 adev->gmc.num_umc = 4;
2222 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2223 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2224 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2225 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2226 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2227 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2228 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2229 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2230 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2231 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2232 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2233 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2234 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2235 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2236 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2237 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2238 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2239 break;
2240 case CHIP_VEGA12:
2241 vega10_reg_base_init(adev);
2242 adev->sdma.num_instances = 2;
2243 adev->gmc.num_umc = 4;
2244 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2245 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2246 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2247 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2248 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2249 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2250 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2251 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2252 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2253 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2254 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2255 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2256 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2257 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2258 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2259 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2260 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2261 break;
2262 case CHIP_RAVEN:
2263 vega10_reg_base_init(adev);
2264 adev->sdma.num_instances = 1;
2265 adev->vcn.num_vcn_inst = 1;
2266 adev->gmc.num_umc = 2;
2267 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2268 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2269 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2270 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2271 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2272 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2273 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2274 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2275 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2276 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2277 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2278 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2279 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2280 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2281 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2282 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2283 } else {
2284 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2285 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2286 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2287 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2288 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2289 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2290 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2291 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2292 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2293 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2294 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2295 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2296 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2297 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2298 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2299 }
2300 break;
2301 case CHIP_VEGA20:
2302 vega20_reg_base_init(adev);
2303 adev->sdma.num_instances = 2;
2304 adev->gmc.num_umc = 8;
2305 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2306 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2307 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2308 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2309 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2310 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2311 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2312 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2313 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2314 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2315 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2316 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2317 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2318 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2319 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2320 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2321 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2322 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2323 break;
2324 case CHIP_ARCTURUS:
2325 arct_reg_base_init(adev);
2326 adev->sdma.num_instances = 8;
2327 adev->vcn.num_vcn_inst = 2;
2328 adev->gmc.num_umc = 8;
2329 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2330 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2331 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2332 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2333 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2334 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2335 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2336 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2337 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2338 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2339 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2340 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2341 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2342 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2343 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2344 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2345 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2346 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2347 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2348 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2349 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2350 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2351 break;
2352 case CHIP_ALDEBARAN:
2353 aldebaran_reg_base_init(adev);
2354 adev->sdma.num_instances = 5;
2355 adev->vcn.num_vcn_inst = 2;
2356 adev->gmc.num_umc = 4;
2357 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2358 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2359 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2360 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2361 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2362 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2363 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2364 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2365 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2366 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2367 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2368 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2369 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2370 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2371 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2372 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2373 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2374 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2375 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2376 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2377 break;
2378 default:
2379 r = amdgpu_discovery_reg_base_init(adev);
2380 if (r)
2381 return -EINVAL;
2382
2383 amdgpu_discovery_harvest_ip(adev);
2384 amdgpu_discovery_get_gfx_info(adev);
2385 amdgpu_discovery_get_mall_info(adev);
2386 amdgpu_discovery_get_vcn_info(adev);
2387 break;
2388 }
2389
2390 amdgpu_discovery_init_soc_config(adev);
2391 amdgpu_discovery_sysfs_init(adev);
2392
2393 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2394 case IP_VERSION(9, 0, 1):
2395 case IP_VERSION(9, 2, 1):
2396 case IP_VERSION(9, 4, 0):
2397 case IP_VERSION(9, 4, 1):
2398 case IP_VERSION(9, 4, 2):
2399 case IP_VERSION(9, 4, 3):
2400 adev->family = AMDGPU_FAMILY_AI;
2401 break;
2402 case IP_VERSION(9, 1, 0):
2403 case IP_VERSION(9, 2, 2):
2404 case IP_VERSION(9, 3, 0):
2405 adev->family = AMDGPU_FAMILY_RV;
2406 break;
2407 case IP_VERSION(10, 1, 10):
2408 case IP_VERSION(10, 1, 1):
2409 case IP_VERSION(10, 1, 2):
2410 case IP_VERSION(10, 1, 3):
2411 case IP_VERSION(10, 1, 4):
2412 case IP_VERSION(10, 3, 0):
2413 case IP_VERSION(10, 3, 2):
2414 case IP_VERSION(10, 3, 4):
2415 case IP_VERSION(10, 3, 5):
2416 adev->family = AMDGPU_FAMILY_NV;
2417 break;
2418 case IP_VERSION(10, 3, 1):
2419 adev->family = AMDGPU_FAMILY_VGH;
2420 adev->apu_flags |= AMD_APU_IS_VANGOGH;
2421 break;
2422 case IP_VERSION(10, 3, 3):
2423 adev->family = AMDGPU_FAMILY_YC;
2424 break;
2425 case IP_VERSION(10, 3, 6):
2426 adev->family = AMDGPU_FAMILY_GC_10_3_6;
2427 break;
2428 case IP_VERSION(10, 3, 7):
2429 adev->family = AMDGPU_FAMILY_GC_10_3_7;
2430 break;
2431 case IP_VERSION(11, 0, 0):
2432 case IP_VERSION(11, 0, 2):
2433 case IP_VERSION(11, 0, 3):
2434 adev->family = AMDGPU_FAMILY_GC_11_0_0;
2435 break;
2436 case IP_VERSION(11, 0, 1):
2437 case IP_VERSION(11, 0, 4):
2438 adev->family = AMDGPU_FAMILY_GC_11_0_1;
2439 break;
2440 case IP_VERSION(11, 5, 0):
2441 adev->family = AMDGPU_FAMILY_GC_11_5_0;
2442 break;
2443 default:
2444 return -EINVAL;
2445 }
2446
2447 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2448 case IP_VERSION(9, 1, 0):
2449 case IP_VERSION(9, 2, 2):
2450 case IP_VERSION(9, 3, 0):
2451 case IP_VERSION(10, 1, 3):
2452 case IP_VERSION(10, 1, 4):
2453 case IP_VERSION(10, 3, 1):
2454 case IP_VERSION(10, 3, 3):
2455 case IP_VERSION(10, 3, 6):
2456 case IP_VERSION(10, 3, 7):
2457 case IP_VERSION(11, 0, 1):
2458 case IP_VERSION(11, 0, 4):
2459 case IP_VERSION(11, 5, 0):
2460 adev->flags |= AMD_IS_APU;
2461 break;
2462 default:
2463 break;
2464 }
2465
2466 if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
2467 adev->gmc.xgmi.supported = true;
2468
2469 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
2470 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
2471
2472 /* set NBIO version */
2473 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2474 case IP_VERSION(6, 1, 0):
2475 case IP_VERSION(6, 2, 0):
2476 adev->nbio.funcs = &nbio_v6_1_funcs;
2477 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2478 break;
2479 case IP_VERSION(7, 0, 0):
2480 case IP_VERSION(7, 0, 1):
2481 case IP_VERSION(2, 5, 0):
2482 adev->nbio.funcs = &nbio_v7_0_funcs;
2483 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2484 break;
2485 case IP_VERSION(7, 4, 0):
2486 case IP_VERSION(7, 4, 1):
2487 case IP_VERSION(7, 4, 4):
2488 adev->nbio.funcs = &nbio_v7_4_funcs;
2489 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2490 break;
2491 case IP_VERSION(7, 9, 0):
2492 adev->nbio.funcs = &nbio_v7_9_funcs;
2493 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2494 break;
2495 case IP_VERSION(7, 11, 0):
2496 adev->nbio.funcs = &nbio_v7_11_funcs;
2497 adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2498 break;
2499 case IP_VERSION(7, 2, 0):
2500 case IP_VERSION(7, 2, 1):
2501 case IP_VERSION(7, 3, 0):
2502 case IP_VERSION(7, 5, 0):
2503 case IP_VERSION(7, 5, 1):
2504 adev->nbio.funcs = &nbio_v7_2_funcs;
2505 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2506 break;
2507 case IP_VERSION(2, 1, 1):
2508 case IP_VERSION(2, 3, 0):
2509 case IP_VERSION(2, 3, 1):
2510 case IP_VERSION(2, 3, 2):
2511 case IP_VERSION(3, 3, 0):
2512 case IP_VERSION(3, 3, 1):
2513 case IP_VERSION(3, 3, 2):
2514 case IP_VERSION(3, 3, 3):
2515 adev->nbio.funcs = &nbio_v2_3_funcs;
2516 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2517 break;
2518 case IP_VERSION(4, 3, 0):
2519 case IP_VERSION(4, 3, 1):
2520 if (amdgpu_sriov_vf(adev))
2521 adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2522 else
2523 adev->nbio.funcs = &nbio_v4_3_funcs;
2524 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2525 break;
2526 case IP_VERSION(7, 7, 0):
2527 case IP_VERSION(7, 7, 1):
2528 adev->nbio.funcs = &nbio_v7_7_funcs;
2529 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2530 break;
2531 default:
2532 break;
2533 }
2534
2535 switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2536 case IP_VERSION(4, 0, 0):
2537 case IP_VERSION(4, 0, 1):
2538 case IP_VERSION(4, 1, 0):
2539 case IP_VERSION(4, 1, 1):
2540 case IP_VERSION(4, 1, 2):
2541 case IP_VERSION(4, 2, 0):
2542 case IP_VERSION(4, 2, 1):
2543 case IP_VERSION(4, 4, 0):
2544 case IP_VERSION(4, 4, 2):
2545 adev->hdp.funcs = &hdp_v4_0_funcs;
2546 break;
2547 case IP_VERSION(5, 0, 0):
2548 case IP_VERSION(5, 0, 1):
2549 case IP_VERSION(5, 0, 2):
2550 case IP_VERSION(5, 0, 3):
2551 case IP_VERSION(5, 0, 4):
2552 case IP_VERSION(5, 2, 0):
2553 adev->hdp.funcs = &hdp_v5_0_funcs;
2554 break;
2555 case IP_VERSION(5, 2, 1):
2556 adev->hdp.funcs = &hdp_v5_2_funcs;
2557 break;
2558 case IP_VERSION(6, 0, 0):
2559 case IP_VERSION(6, 0, 1):
2560 case IP_VERSION(6, 1, 0):
2561 adev->hdp.funcs = &hdp_v6_0_funcs;
2562 break;
2563 default:
2564 break;
2565 }
2566
2567 switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2568 case IP_VERSION(3, 6, 0):
2569 case IP_VERSION(3, 6, 1):
2570 case IP_VERSION(3, 6, 2):
2571 adev->df.funcs = &df_v3_6_funcs;
2572 break;
2573 case IP_VERSION(2, 1, 0):
2574 case IP_VERSION(2, 1, 1):
2575 case IP_VERSION(2, 5, 0):
2576 case IP_VERSION(3, 5, 1):
2577 case IP_VERSION(3, 5, 2):
2578 adev->df.funcs = &df_v1_7_funcs;
2579 break;
2580 case IP_VERSION(4, 3, 0):
2581 adev->df.funcs = &df_v4_3_funcs;
2582 break;
2583 case IP_VERSION(4, 6, 2):
2584 adev->df.funcs = &df_v4_6_2_funcs;
2585 break;
2586 default:
2587 break;
2588 }
2589
2590 switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2591 case IP_VERSION(9, 0, 0):
2592 case IP_VERSION(9, 0, 1):
2593 case IP_VERSION(10, 0, 0):
2594 case IP_VERSION(10, 0, 1):
2595 case IP_VERSION(10, 0, 2):
2596 adev->smuio.funcs = &smuio_v9_0_funcs;
2597 break;
2598 case IP_VERSION(11, 0, 0):
2599 case IP_VERSION(11, 0, 2):
2600 case IP_VERSION(11, 0, 3):
2601 case IP_VERSION(11, 0, 4):
2602 case IP_VERSION(11, 0, 7):
2603 case IP_VERSION(11, 0, 8):
2604 adev->smuio.funcs = &smuio_v11_0_funcs;
2605 break;
2606 case IP_VERSION(11, 0, 6):
2607 case IP_VERSION(11, 0, 10):
2608 case IP_VERSION(11, 0, 11):
2609 case IP_VERSION(11, 5, 0):
2610 case IP_VERSION(13, 0, 1):
2611 case IP_VERSION(13, 0, 9):
2612 case IP_VERSION(13, 0, 10):
2613 adev->smuio.funcs = &smuio_v11_0_6_funcs;
2614 break;
2615 case IP_VERSION(13, 0, 2):
2616 adev->smuio.funcs = &smuio_v13_0_funcs;
2617 break;
2618 case IP_VERSION(13, 0, 3):
2619 adev->smuio.funcs = &smuio_v13_0_3_funcs;
2620 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2621 adev->flags |= AMD_IS_APU;
2622 }
2623 break;
2624 case IP_VERSION(13, 0, 6):
2625 case IP_VERSION(13, 0, 8):
2626 case IP_VERSION(14, 0, 0):
2627 adev->smuio.funcs = &smuio_v13_0_6_funcs;
2628 break;
2629 default:
2630 break;
2631 }
2632
2633 switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
2634 case IP_VERSION(6, 0, 0):
2635 case IP_VERSION(6, 0, 1):
2636 case IP_VERSION(6, 0, 2):
2637 case IP_VERSION(6, 0, 3):
2638 adev->lsdma.funcs = &lsdma_v6_0_funcs;
2639 break;
2640 default:
2641 break;
2642 }
2643
2644 r = amdgpu_discovery_set_common_ip_blocks(adev);
2645 if (r)
2646 return r;
2647
2648 r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2649 if (r)
2650 return r;
2651
2652 /* For SR-IOV, PSP needs to be initialized before IH */
2653 if (amdgpu_sriov_vf(adev)) {
2654 r = amdgpu_discovery_set_psp_ip_blocks(adev);
2655 if (r)
2656 return r;
2657 r = amdgpu_discovery_set_ih_ip_blocks(adev);
2658 if (r)
2659 return r;
2660 } else {
2661 r = amdgpu_discovery_set_ih_ip_blocks(adev);
2662 if (r)
2663 return r;
2664
2665 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2666 r = amdgpu_discovery_set_psp_ip_blocks(adev);
2667 if (r)
2668 return r;
2669 }
2670 }
2671
2672 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2673 r = amdgpu_discovery_set_smu_ip_blocks(adev);
2674 if (r)
2675 return r;
2676 }
2677
2678 r = amdgpu_discovery_set_display_ip_blocks(adev);
2679 if (r)
2680 return r;
2681
2682 r = amdgpu_discovery_set_gc_ip_blocks(adev);
2683 if (r)
2684 return r;
2685
2686 r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2687 if (r)
2688 return r;
2689
2690 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2691 !amdgpu_sriov_vf(adev)) ||
2692 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2693 r = amdgpu_discovery_set_smu_ip_blocks(adev);
2694 if (r)
2695 return r;
2696 }
2697
2698 r = amdgpu_discovery_set_mm_ip_blocks(adev);
2699 if (r)
2700 return r;
2701
2702 r = amdgpu_discovery_set_mes_ip_blocks(adev);
2703 if (r)
2704 return r;
2705
2706 r = amdgpu_discovery_set_vpe_ip_blocks(adev);
2707 if (r)
2708 return r;
2709
2710 r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
2711 if (r)
2712 return r;
2713
2714 return 0;
2715}
2716