Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * Copyright 2022 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "amdgpu.h"
  24#include "soc15.h"
  25
  26#include "soc15_common.h"
  27#include "amdgpu_reg_state.h"
  28#include "amdgpu_xcp.h"
  29#include "gfx_v9_4_3.h"
  30#include "gfxhub_v1_2.h"
  31#include "sdma_v4_4_2.h"
  32
  33#define XCP_INST_MASK(num_inst, xcp_id)                                        \
  34	(num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
  35
  36#define AMDGPU_XCP_OPS_KFD	(1 << 0)
  37
  38void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
  39{
  40	int i;
  41
  42	adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
  43
  44	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
  45
  46	adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
  47	adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
  48	adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
  49
  50	adev->doorbell_index.sdma_doorbell_range = 20;
  51	for (i = 0; i < adev->sdma.num_instances; i++)
  52		adev->doorbell_index.sdma_engine[i] =
  53			AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
  54			i * (adev->doorbell_index.sdma_doorbell_range >> 1);
  55
  56	adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
  57	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
  58
  59	adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
  60	adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
  61
  62	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
  63}
  64
  65static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
  66{
  67	return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
  68}
  69
  70static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
  71			     uint32_t inst_idx, struct amdgpu_ring *ring)
  72{
  73	int xcp_id;
  74	enum AMDGPU_XCP_IP_BLOCK ip_blk;
  75	uint32_t inst_mask;
  76
  77	ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
  78	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
  79		adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
  80	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
  81		return;
  82
  83	inst_mask = 1 << inst_idx;
  84
  85	switch (ring->funcs->type) {
  86	case AMDGPU_HW_IP_GFX:
  87	case AMDGPU_RING_TYPE_COMPUTE:
  88	case AMDGPU_RING_TYPE_KIQ:
  89		ip_blk = AMDGPU_XCP_GFX;
  90		break;
  91	case AMDGPU_RING_TYPE_SDMA:
  92		ip_blk = AMDGPU_XCP_SDMA;
  93		break;
  94	case AMDGPU_RING_TYPE_VCN_ENC:
  95	case AMDGPU_RING_TYPE_VCN_JPEG:
  96		ip_blk = AMDGPU_XCP_VCN;
 
 
  97		break;
  98	default:
  99		DRM_ERROR("Not support ring type %d!", ring->funcs->type);
 100		return;
 101	}
 102
 103	for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
 104		if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
 105			ring->xcp_id = xcp_id;
 106			dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
 107				ring->xcp_id);
 108			if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
 109				adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
 110			break;
 111		}
 112	}
 113}
 114
 115static void aqua_vanjaram_xcp_gpu_sched_update(
 116		struct amdgpu_device *adev,
 117		struct amdgpu_ring *ring,
 118		unsigned int sel_xcp_id)
 119{
 120	unsigned int *num_gpu_sched;
 121
 122	num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
 123			.gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
 124	adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
 125			.sched[(*num_gpu_sched)++] = &ring->sched;
 126	DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
 127			sel_xcp_id, ring->funcs->type,
 128			ring->hw_prio, *num_gpu_sched);
 129}
 130
 131static int aqua_vanjaram_xcp_sched_list_update(
 132		struct amdgpu_device *adev)
 133{
 134	struct amdgpu_ring *ring;
 135	int i;
 136
 137	for (i = 0; i < MAX_XCP; i++) {
 138		atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
 139		memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
 140	}
 141
 142	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
 143		return 0;
 144
 145	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 146		ring = adev->rings[i];
 147		if (!ring || !ring->sched.ready || ring->no_scheduler)
 148			continue;
 149
 150		aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
 151
 152		/* VCN may be shared by two partitions under CPX MODE in certain
 153		 * configs.
 154		 */
 155		if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
 156		     ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
 157		    aqua_vanjaram_xcp_vcn_shared(adev))
 158			aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
 159	}
 160
 161	return 0;
 162}
 163
 164static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
 165{
 166	int i;
 167
 168	for (i = 0; i < adev->num_rings; i++) {
 169		struct amdgpu_ring *ring = adev->rings[i];
 170
 171		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
 172			ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
 173			aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
 174		else
 175			aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
 176	}
 177
 178	return aqua_vanjaram_xcp_sched_list_update(adev);
 179}
 180
 181static int aqua_vanjaram_select_scheds(
 182		struct amdgpu_device *adev,
 183		u32 hw_ip,
 184		u32 hw_prio,
 185		struct amdgpu_fpriv *fpriv,
 186		unsigned int *num_scheds,
 187		struct drm_gpu_scheduler ***scheds)
 188{
 189	u32 sel_xcp_id;
 190	int i;
 191
 192	if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
 193		u32 least_ref_cnt = ~0;
 194
 195		fpriv->xcp_id = 0;
 196		for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
 197			u32 total_ref_cnt;
 198
 199			total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
 200			if (total_ref_cnt < least_ref_cnt) {
 201				fpriv->xcp_id = i;
 202				least_ref_cnt = total_ref_cnt;
 203			}
 204		}
 205	}
 206	sel_xcp_id = fpriv->xcp_id;
 207
 208	if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
 209		*num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
 210		*scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
 211		atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
 212		DRM_DEBUG("Selected partition #%d", sel_xcp_id);
 213	} else {
 214		DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
 215		return -ENOENT;
 216	}
 217
 218	return 0;
 219}
 220
 221static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
 222					 enum amd_hw_ip_block_type block,
 223					 int8_t inst)
 224{
 225	int8_t dev_inst;
 226
 227	switch (block) {
 228	case GC_HWIP:
 229	case SDMA0_HWIP:
 230	/* Both JPEG and VCN as JPEG is only alias of VCN */
 231	case VCN_HWIP:
 232		dev_inst = adev->ip_map.dev_inst[block][inst];
 233		break;
 234	default:
 235		/* For rest of the IPs, no look up required.
 236		 * Assume 'logical instance == physical instance' for all configs. */
 237		dev_inst = inst;
 238		break;
 239	}
 240
 241	return dev_inst;
 242}
 243
 244static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
 245					 enum amd_hw_ip_block_type block,
 246					 uint32_t mask)
 247{
 248	uint32_t dev_mask = 0;
 249	int8_t log_inst, dev_inst;
 250
 251	while (mask) {
 252		log_inst = ffs(mask) - 1;
 253		dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
 254		dev_mask |= (1 << dev_inst);
 255		mask &= ~(1 << log_inst);
 256	}
 257
 258	return dev_mask;
 259}
 260
 261static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
 262					  enum amd_hw_ip_block_type ip_block,
 263					  uint32_t inst_mask)
 264{
 265	int l = 0, i;
 266
 267	while (inst_mask) {
 268		i = ffs(inst_mask) - 1;
 269		adev->ip_map.dev_inst[ip_block][l++] = i;
 270		inst_mask &= ~(1 << i);
 271	}
 272	for (; l < HWIP_MAX_INSTANCE; l++)
 273		adev->ip_map.dev_inst[ip_block][l] = -1;
 274}
 275
 276void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
 277{
 278	u32 ip_map[][2] = {
 279		{ GC_HWIP, adev->gfx.xcc_mask },
 280		{ SDMA0_HWIP, adev->sdma.sdma_mask },
 281		{ VCN_HWIP, adev->vcn.inst_mask },
 282	};
 283	int i;
 284
 285	for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
 286		aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
 287
 288	adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
 289	adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
 290}
 291
 292/* Fixed pattern for smn addressing on different AIDs:
 293 *   bit[34]: indicate cross AID access
 294 *   bit[33:32]: indicate target AID id
 295 * AID id range is 0 ~ 3 as maximum AID number is 4.
 296 */
 297u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
 298{
 299	u64 ext_offset;
 300
 301	/* local routing and bit[34:32] will be zeros */
 302	if (ext_id == 0)
 303		return 0;
 304
 305	/* Initiated from host, accessing to all non-zero aids are cross traffic */
 306	ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
 307
 308	return ext_offset;
 309}
 310
 311static enum amdgpu_gfx_partition
 312__aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
 313{
 314	struct amdgpu_device *adev = xcp_mgr->adev;
 315	int num_xcc, num_xcc_per_xcp = 0, mode = 0;
 316
 317	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
 318	if (adev->gfx.funcs->get_xccs_per_xcp)
 319		num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
 320	if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
 321		mode = num_xcc / num_xcc_per_xcp;
 322
 323	if (num_xcc_per_xcp == 1)
 324		return AMDGPU_CPX_PARTITION_MODE;
 325
 326	switch (mode) {
 327	case 1:
 328		return AMDGPU_SPX_PARTITION_MODE;
 329	case 2:
 330		return AMDGPU_DPX_PARTITION_MODE;
 331	case 3:
 332		return AMDGPU_TPX_PARTITION_MODE;
 333	case 4:
 334		return AMDGPU_QPX_PARTITION_MODE;
 335	default:
 336		return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
 337	}
 338
 339	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
 340}
 341
 342static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
 343{
 344	enum amdgpu_gfx_partition derv_mode,
 345		mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
 346	struct amdgpu_device *adev = xcp_mgr->adev;
 347
 348	derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
 349
 350	if (amdgpu_sriov_vf(adev))
 351		return derv_mode;
 352
 353	if (adev->nbio.funcs->get_compute_partition_mode) {
 354		mode = adev->nbio.funcs->get_compute_partition_mode(adev);
 355		if (mode != derv_mode)
 356			dev_warn(
 357				adev->dev,
 358				"Mismatch in compute partition mode - reported : %d derived : %d",
 359				mode, derv_mode);
 360	}
 361
 362	return mode;
 363}
 364
 365static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
 366{
 367	int num_xcc, num_xcc_per_xcp = 0;
 368
 369	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
 370
 371	switch (mode) {
 372	case AMDGPU_SPX_PARTITION_MODE:
 373		num_xcc_per_xcp = num_xcc;
 374		break;
 375	case AMDGPU_DPX_PARTITION_MODE:
 376		num_xcc_per_xcp = num_xcc / 2;
 377		break;
 378	case AMDGPU_TPX_PARTITION_MODE:
 379		num_xcc_per_xcp = num_xcc / 3;
 380		break;
 381	case AMDGPU_QPX_PARTITION_MODE:
 382		num_xcc_per_xcp = num_xcc / 4;
 383		break;
 384	case AMDGPU_CPX_PARTITION_MODE:
 385		num_xcc_per_xcp = 1;
 386		break;
 387	}
 388
 389	return num_xcc_per_xcp;
 390}
 391
 392static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
 393				    enum AMDGPU_XCP_IP_BLOCK ip_id,
 394				    struct amdgpu_xcp_ip *ip)
 395{
 396	struct amdgpu_device *adev = xcp_mgr->adev;
 397	int num_sdma, num_vcn, num_shared_vcn, num_xcp;
 398	int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
 
 399
 400	num_sdma = adev->sdma.num_instances;
 401	num_vcn = adev->vcn.num_vcn_inst;
 402	num_shared_vcn = 1;
 403
 404	num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
 405	num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
 406
 407	switch (xcp_mgr->mode) {
 408	case AMDGPU_SPX_PARTITION_MODE:
 
 
 
 409	case AMDGPU_DPX_PARTITION_MODE:
 
 
 
 410	case AMDGPU_TPX_PARTITION_MODE:
 
 
 
 411	case AMDGPU_QPX_PARTITION_MODE:
 
 
 
 412	case AMDGPU_CPX_PARTITION_MODE:
 413		num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
 414		num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
 415		break;
 416	default:
 417		return -EINVAL;
 418	}
 419
 420	if (num_vcn && num_xcp > num_vcn)
 421		num_shared_vcn = num_xcp / num_vcn;
 422
 423	switch (ip_id) {
 424	case AMDGPU_XCP_GFXHUB:
 425		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
 426		ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
 427		break;
 428	case AMDGPU_XCP_GFX:
 429		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
 430		ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
 431		break;
 432	case AMDGPU_XCP_SDMA:
 433		ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
 434		ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
 435		break;
 436	case AMDGPU_XCP_VCN:
 437		ip->inst_mask =
 438			XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
 439		/* TODO : Assign IP funcs */
 440		break;
 441	default:
 442		return -EINVAL;
 443	}
 444
 445	ip->ip_id = ip_id;
 446
 447	return 0;
 448}
 449
 450static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
 451					  int mode,
 452					  struct amdgpu_xcp_cfg *xcp_cfg)
 453{
 454	struct amdgpu_device *adev = xcp_mgr->adev;
 455	int max_res[AMDGPU_XCP_RES_MAX] = {};
 456	bool res_lt_xcp;
 457	int num_xcp, i;
 458	u16 nps_modes;
 459
 460	if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
 461		return -EINVAL;
 462
 463	max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
 464	max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
 465	max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
 466	max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
 467
 468	switch (mode) {
 469	case AMDGPU_SPX_PARTITION_MODE:
 470		num_xcp = 1;
 471		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
 472		break;
 473	case AMDGPU_DPX_PARTITION_MODE:
 474		num_xcp = 2;
 475		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
 476		break;
 477	case AMDGPU_TPX_PARTITION_MODE:
 478		num_xcp = 3;
 479		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
 480			    BIT(AMDGPU_NPS4_PARTITION_MODE);
 481		break;
 482	case AMDGPU_QPX_PARTITION_MODE:
 483		num_xcp = 4;
 484		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
 485			    BIT(AMDGPU_NPS4_PARTITION_MODE);
 486		break;
 487	case AMDGPU_CPX_PARTITION_MODE:
 488		num_xcp = NUM_XCC(adev->gfx.xcc_mask);
 489		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
 490			    BIT(AMDGPU_NPS4_PARTITION_MODE);
 491		break;
 492	default:
 493		return -EINVAL;
 494	}
 495
 496	xcp_cfg->compatible_nps_modes =
 497		(adev->gmc.supported_nps_modes & nps_modes);
 498	xcp_cfg->num_res = ARRAY_SIZE(max_res);
 499
 500	for (i = 0; i < xcp_cfg->num_res; i++) {
 501		res_lt_xcp = max_res[i] < num_xcp;
 502		xcp_cfg->xcp_res[i].id = i;
 503		xcp_cfg->xcp_res[i].num_inst =
 504			res_lt_xcp ? 1 : max_res[i] / num_xcp;
 505		xcp_cfg->xcp_res[i].num_inst =
 506			i == AMDGPU_XCP_RES_JPEG ?
 507			xcp_cfg->xcp_res[i].num_inst *
 508			adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
 509		xcp_cfg->xcp_res[i].num_shared =
 510			res_lt_xcp ? num_xcp / max_res[i] : 1;
 511	}
 512
 513	return 0;
 514}
 515
 516static enum amdgpu_gfx_partition
 517__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
 518{
 519	struct amdgpu_device *adev = xcp_mgr->adev;
 520	int num_xcc;
 521
 522	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
 523
 524	if (adev->gmc.num_mem_partitions == 1)
 525		return AMDGPU_SPX_PARTITION_MODE;
 526
 527	if (adev->gmc.num_mem_partitions == num_xcc)
 528		return AMDGPU_CPX_PARTITION_MODE;
 529
 530	if (adev->gmc.num_mem_partitions == num_xcc / 2)
 531		return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
 532						    AMDGPU_CPX_PARTITION_MODE;
 533
 534	if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
 535		return AMDGPU_DPX_PARTITION_MODE;
 536
 537	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
 538}
 539
 540static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
 541					  enum amdgpu_gfx_partition mode)
 542{
 543	struct amdgpu_device *adev = xcp_mgr->adev;
 544	int num_xcc, num_xccs_per_xcp;
 545
 546	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
 547	switch (mode) {
 548	case AMDGPU_SPX_PARTITION_MODE:
 549		return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
 550	case AMDGPU_DPX_PARTITION_MODE:
 551		return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
 552	case AMDGPU_TPX_PARTITION_MODE:
 553		return (adev->gmc.num_mem_partitions == 1 ||
 554			adev->gmc.num_mem_partitions == 3) &&
 555		       ((num_xcc % 3) == 0);
 556	case AMDGPU_QPX_PARTITION_MODE:
 557		num_xccs_per_xcp = num_xcc / 4;
 558		return (adev->gmc.num_mem_partitions == 1 ||
 559			adev->gmc.num_mem_partitions == 4) &&
 560		       (num_xccs_per_xcp >= 2);
 561	case AMDGPU_CPX_PARTITION_MODE:
 562		return ((num_xcc > 1) &&
 563		       (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
 564		       (num_xcc % adev->gmc.num_mem_partitions) == 0);
 565	default:
 566		return false;
 567	}
 568
 569	return false;
 570}
 571
 572static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
 573{
 574	/* TODO:
 575	 * Stop user queues and threads, and make sure GPU is empty of work.
 576	 */
 577
 578	if (flags & AMDGPU_XCP_OPS_KFD)
 579		amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
 580
 581	return 0;
 582}
 583
 584static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
 585{
 586	int ret = 0;
 587
 588	if (flags & AMDGPU_XCP_OPS_KFD) {
 589		amdgpu_amdkfd_device_probe(xcp_mgr->adev);
 590		amdgpu_amdkfd_device_init(xcp_mgr->adev);
 591		/* If KFD init failed, return failure */
 592		if (!xcp_mgr->adev->kfd.init_complete)
 593			ret = -EIO;
 594	}
 595
 596	return ret;
 597}
 598
 599static void
 600__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
 601{
 602	struct amdgpu_device *adev = xcp_mgr->adev;
 603
 604	xcp_mgr->supp_xcp_modes = 0;
 605
 606	switch (NUM_XCC(adev->gfx.xcc_mask)) {
 607	case 8:
 608		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
 609					  BIT(AMDGPU_DPX_PARTITION_MODE) |
 610					  BIT(AMDGPU_QPX_PARTITION_MODE) |
 611					  BIT(AMDGPU_CPX_PARTITION_MODE);
 612		break;
 613	case 6:
 614		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
 615					  BIT(AMDGPU_TPX_PARTITION_MODE) |
 616					  BIT(AMDGPU_CPX_PARTITION_MODE);
 617		break;
 618	case 4:
 619		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
 620					  BIT(AMDGPU_DPX_PARTITION_MODE) |
 621					  BIT(AMDGPU_CPX_PARTITION_MODE);
 622		break;
 623	/* this seems only existing in emulation phase */
 624	case 2:
 625		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
 626					  BIT(AMDGPU_CPX_PARTITION_MODE);
 627		break;
 628	case 1:
 629		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
 630					  BIT(AMDGPU_CPX_PARTITION_MODE);
 631		break;
 632
 633	default:
 634		break;
 635	}
 636}
 637
 638static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
 639{
 640	int mode;
 641
 642	xcp_mgr->avail_xcp_modes = 0;
 643
 644	for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
 645		if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode))
 646			xcp_mgr->avail_xcp_modes |= BIT(mode);
 647	}
 648}
 649
 650static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
 651					       int mode, int *num_xcps)
 652{
 653	int num_xcc_per_xcp, num_xcc, ret;
 654	struct amdgpu_device *adev;
 655	u32 flags = 0;
 656
 657	adev = xcp_mgr->adev;
 658	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
 659
 660	if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
 661		mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
 662		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
 663			dev_err(adev->dev,
 664				"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
 665				adev->gmc.num_mem_partitions);
 666			return -EINVAL;
 667		}
 668	} else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
 669		dev_err(adev->dev,
 670			"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
 671			amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
 672		return -EINVAL;
 673	}
 674
 675	if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
 676		flags |= AMDGPU_XCP_OPS_KFD;
 677
 678	if (flags & AMDGPU_XCP_OPS_KFD) {
 679		ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
 680		if (ret)
 681			goto out;
 682	}
 683
 684	ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
 685	if (ret)
 686		goto unlock;
 687
 688	num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
 689	if (adev->gfx.funcs->switch_partition_mode)
 690		adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
 691						       num_xcc_per_xcp);
 692
 693	/* Init info about new xcps */
 694	*num_xcps = num_xcc / num_xcc_per_xcp;
 695	amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
 696
 697	ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
 698	if (!ret)
 699		__aqua_vanjaram_update_available_partition_mode(xcp_mgr);
 700unlock:
 701	if (flags & AMDGPU_XCP_OPS_KFD)
 702		amdgpu_amdkfd_unlock_kfd(adev);
 703out:
 704	return ret;
 705}
 706
 707static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
 708					  int xcc_id, uint8_t *mem_id)
 709{
 710	/* memory/spatial modes validation check is already done */
 711	*mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
 712	*mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
 713
 714	return 0;
 715}
 716
 717static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
 718					struct amdgpu_xcp *xcp, uint8_t *mem_id)
 719{
 720	struct amdgpu_numa_info numa_info;
 721	struct amdgpu_device *adev;
 722	uint32_t xcc_mask;
 723	int r, i, xcc_id;
 724
 725	adev = xcp_mgr->adev;
 726	/* TODO: BIOS is not returning the right info now
 727	 * Check on this later
 728	 */
 729	/*
 730	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
 731		mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
 732	*/
 733	if (adev->gmc.num_mem_partitions == 1) {
 734		/* Only one range */
 735		*mem_id = 0;
 736		return 0;
 737	}
 738
 739	r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
 740	if (r || !xcc_mask)
 741		return -EINVAL;
 742
 743	xcc_id = ffs(xcc_mask) - 1;
 744	if (!adev->gmc.is_app_apu)
 745		return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
 746
 747	r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
 748
 749	if (r)
 750		return r;
 751
 752	r = -EINVAL;
 753	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
 754		if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
 755			*mem_id = i;
 756			r = 0;
 757			break;
 758		}
 759	}
 760
 761	return r;
 762}
 763
 764static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
 765				     enum AMDGPU_XCP_IP_BLOCK ip_id,
 766				     struct amdgpu_xcp_ip *ip)
 767{
 768	if (!ip)
 769		return -EINVAL;
 770
 771	return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
 772}
 773
 774struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
 775	.switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
 776	.query_partition_mode = &aqua_vanjaram_query_partition_mode,
 777	.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
 778	.get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
 779	.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
 780	.select_scheds = &aqua_vanjaram_select_scheds,
 781	.update_partition_sched_list =
 782		&aqua_vanjaram_update_partition_sched_list
 783};
 784
 785static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
 786{
 787	int ret;
 788
 789	if (amdgpu_sriov_vf(adev))
 790		aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
 791
 792	ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
 793				  &aqua_vanjaram_xcp_funcs);
 794	if (ret)
 795		return ret;
 796
 797	__aqua_vanjaram_update_supported_modes(adev->xcp_mgr);
 798	/* TODO: Default memory node affinity init */
 799
 800	return ret;
 801}
 802
 803int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
 804{
 805	u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
 806	int ret, i;
 807
 808	/* generally 1 AID supports 4 instances */
 809	adev->sdma.num_inst_per_aid = 4;
 810	adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
 811
 812	adev->aid_mask = i = 1;
 813	inst_mask >>= adev->sdma.num_inst_per_aid;
 814
 815	for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
 816	     inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
 817		avail_inst = inst_mask & mask;
 818		if (avail_inst == mask || avail_inst == 0x3 ||
 819		    avail_inst == 0xc)
 820			adev->aid_mask |= (1 << i);
 821	}
 822
 823	/* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
 824	 * addressed based on logical instance ids.
 825	 */
 826	adev->vcn.harvest_config = 0;
 827	adev->vcn.num_inst_per_aid = 1;
 828	adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
 829	adev->jpeg.harvest_config = 0;
 830	adev->jpeg.num_inst_per_aid = 1;
 831	adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
 832
 833	ret = aqua_vanjaram_xcp_mgr_init(adev);
 834	if (ret)
 835		return ret;
 836
 837	aqua_vanjaram_ip_map_init(adev);
 838
 839	return 0;
 840}
 841
 842static void aqua_read_smn(struct amdgpu_device *adev,
 843			  struct amdgpu_smn_reg_data *regdata,
 844			  uint64_t smn_addr)
 845{
 846	regdata->addr = smn_addr;
 847	regdata->value = RREG32_PCIE(smn_addr);
 848}
 849
 850struct aqua_reg_list {
 851	uint64_t start_addr;
 852	uint32_t num_regs;
 853	uint32_t incrx;
 854};
 855
 856#define DW_ADDR_INCR	4
 857
 858static void aqua_read_smn_ext(struct amdgpu_device *adev,
 859			      struct amdgpu_smn_reg_data *regdata,
 860			      uint64_t smn_addr, int i)
 861{
 862	regdata->addr =
 863		smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
 864	regdata->value = RREG32_PCIE_EXT(regdata->addr);
 865}
 866
 867#define smnreg_0x1A340218	0x1A340218
 868#define smnreg_0x1A3402E4	0x1A3402E4
 869#define smnreg_0x1A340294	0x1A340294
 870#define smreg_0x1A380088	0x1A380088
 871
 872#define NUM_PCIE_SMN_REGS	14
 873
 874static struct aqua_reg_list pcie_reg_addrs[] = {
 875	{ smnreg_0x1A340218, 1, 0 },
 876	{ smnreg_0x1A3402E4, 1, 0 },
 877	{ smnreg_0x1A340294, 6, DW_ADDR_INCR },
 878	{ smreg_0x1A380088, 6, DW_ADDR_INCR },
 879};
 880
 881static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
 882					     void *buf, size_t max_size)
 883{
 884	struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
 885	uint32_t start_addr, incrx, num_regs, szbuf;
 886	struct amdgpu_regs_pcie_v1_0 *pcie_regs;
 887	struct amdgpu_smn_reg_data *reg_data;
 888	struct pci_dev *us_pdev, *ds_pdev;
 889	int aer_cap, r, n;
 890
 891	if (!buf || !max_size)
 892		return -EINVAL;
 893
 894	pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
 895
 896	szbuf = sizeof(*pcie_reg_state) +
 897		amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
 898	/* Only one instance of pcie regs */
 899	if (max_size < szbuf)
 900		return -EOVERFLOW;
 901
 902	pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
 903						     sizeof(*pcie_reg_state));
 904	pcie_regs->inst_header.instance = 0;
 905	pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
 906	pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
 907
 908	reg_data = pcie_regs->smn_reg_values;
 909
 910	for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
 911		start_addr = pcie_reg_addrs[r].start_addr;
 912		incrx = pcie_reg_addrs[r].incrx;
 913		num_regs = pcie_reg_addrs[r].num_regs;
 914		for (n = 0; n < num_regs; n++) {
 915			aqua_read_smn(adev, reg_data, start_addr + n * incrx);
 916			++reg_data;
 917		}
 918	}
 919
 920	ds_pdev = pci_upstream_bridge(adev->pdev);
 921	us_pdev = pci_upstream_bridge(ds_pdev);
 922
 923	pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
 924				  &pcie_regs->device_status);
 925	pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
 926				  &pcie_regs->link_status);
 927
 928	aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
 929	if (aer_cap) {
 930		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
 931				      &pcie_regs->pcie_corr_err_status);
 932		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
 933				      &pcie_regs->pcie_uncorr_err_status);
 934	}
 935
 936	pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
 937			      &pcie_regs->sub_bus_number_latency);
 938
 939	pcie_reg_state->common_header.structure_size = szbuf;
 940	pcie_reg_state->common_header.format_revision = 1;
 941	pcie_reg_state->common_header.content_revision = 0;
 942	pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
 943	pcie_reg_state->common_header.num_instances = 1;
 944
 945	return pcie_reg_state->common_header.structure_size;
 946}
 947
 948#define smnreg_0x11A00050	0x11A00050
 949#define smnreg_0x11A00180	0x11A00180
 950#define smnreg_0x11A00070	0x11A00070
 951#define smnreg_0x11A00200	0x11A00200
 952#define smnreg_0x11A0020C	0x11A0020C
 953#define smnreg_0x11A00210	0x11A00210
 954#define smnreg_0x11A00108	0x11A00108
 955
 956#define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
 957
 958#define NUM_XGMI_SMN_REGS 25
 959
 960static struct aqua_reg_list xgmi_reg_addrs[] = {
 961	{ smnreg_0x11A00050, 1, 0 },
 962	{ smnreg_0x11A00180, 16, DW_ADDR_INCR },
 963	{ smnreg_0x11A00070, 4, DW_ADDR_INCR },
 964	{ smnreg_0x11A00200, 1, 0 },
 965	{ smnreg_0x11A0020C, 1, 0 },
 966	{ smnreg_0x11A00210, 1, 0 },
 967	{ smnreg_0x11A00108, 1, 0 },
 968};
 969
 970static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
 971					     void *buf, size_t max_size)
 972{
 973	struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
 974	uint32_t start_addr, incrx, num_regs, szbuf;
 975	struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
 976	struct amdgpu_smn_reg_data *reg_data;
 977	const int max_xgmi_instances = 8;
 978	int inst = 0, i, j, r, n;
 979	const int xgmi_inst = 2;
 980	void *p;
 981
 982	if (!buf || !max_size)
 983		return -EINVAL;
 984
 985	xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
 986
 987	szbuf = sizeof(*xgmi_reg_state) +
 988		amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
 989				    NUM_XGMI_SMN_REGS);
 990	/* Only one instance of pcie regs */
 991	if (max_size < szbuf)
 992		return -EOVERFLOW;
 993
 994	p = &xgmi_reg_state->xgmi_state_regs[0];
 995	for_each_inst(i, adev->aid_mask) {
 996		for (j = 0; j < xgmi_inst; ++j) {
 997			xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
 998			xgmi_regs->inst_header.instance = inst++;
 999
1000			xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
1001			xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
1002
1003			reg_data = xgmi_regs->smn_reg_values;
1004
1005			for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
1006				start_addr = xgmi_reg_addrs[r].start_addr;
1007				incrx = xgmi_reg_addrs[r].incrx;
1008				num_regs = xgmi_reg_addrs[r].num_regs;
1009
1010				for (n = 0; n < num_regs; n++) {
1011					aqua_read_smn_ext(
1012						adev, reg_data,
1013						XGMI_LINK_REG(start_addr, j) +
1014							n * incrx,
1015						i);
1016					++reg_data;
1017				}
1018			}
1019			p = reg_data;
1020		}
1021	}
1022
1023	xgmi_reg_state->common_header.structure_size = szbuf;
1024	xgmi_reg_state->common_header.format_revision = 1;
1025	xgmi_reg_state->common_header.content_revision = 0;
1026	xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
1027	xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
1028
1029	return xgmi_reg_state->common_header.structure_size;
1030}
1031
1032#define smnreg_0x11C00070	0x11C00070
1033#define smnreg_0x11C00210	0x11C00210
1034
1035static struct aqua_reg_list wafl_reg_addrs[] = {
1036	{ smnreg_0x11C00070, 4, DW_ADDR_INCR },
1037	{ smnreg_0x11C00210, 1, 0 },
1038};
1039
1040#define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
1041
1042#define NUM_WAFL_SMN_REGS 5
1043
1044static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
1045					     void *buf, size_t max_size)
1046{
1047	struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
1048	uint32_t start_addr, incrx, num_regs, szbuf;
1049	struct amdgpu_regs_wafl_v1_0 *wafl_regs;
1050	struct amdgpu_smn_reg_data *reg_data;
1051	const int max_wafl_instances = 8;
1052	int inst = 0, i, j, r, n;
1053	const int wafl_inst = 2;
1054	void *p;
1055
1056	if (!buf || !max_size)
1057		return -EINVAL;
1058
1059	wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
1060
1061	szbuf = sizeof(*wafl_reg_state) +
1062		amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
1063				    NUM_WAFL_SMN_REGS);
1064
1065	if (max_size < szbuf)
1066		return -EOVERFLOW;
1067
1068	p = &wafl_reg_state->wafl_state_regs[0];
1069	for_each_inst(i, adev->aid_mask) {
1070		for (j = 0; j < wafl_inst; ++j) {
1071			wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
1072			wafl_regs->inst_header.instance = inst++;
1073
1074			wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
1075			wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
1076
1077			reg_data = wafl_regs->smn_reg_values;
1078
1079			for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
1080				start_addr = wafl_reg_addrs[r].start_addr;
1081				incrx = wafl_reg_addrs[r].incrx;
1082				num_regs = wafl_reg_addrs[r].num_regs;
1083				for (n = 0; n < num_regs; n++) {
1084					aqua_read_smn_ext(
1085						adev, reg_data,
1086						WAFL_LINK_REG(start_addr, j) +
1087							n * incrx,
1088						i);
1089					++reg_data;
1090				}
1091			}
1092			p = reg_data;
1093		}
1094	}
1095
1096	wafl_reg_state->common_header.structure_size = szbuf;
1097	wafl_reg_state->common_header.format_revision = 1;
1098	wafl_reg_state->common_header.content_revision = 0;
1099	wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
1100	wafl_reg_state->common_header.num_instances = max_wafl_instances;
1101
1102	return wafl_reg_state->common_header.structure_size;
1103}
1104
1105#define smnreg_0x1B311060 0x1B311060
1106#define smnreg_0x1B411060 0x1B411060
1107#define smnreg_0x1B511060 0x1B511060
1108#define smnreg_0x1B611060 0x1B611060
1109
1110#define smnreg_0x1C307120 0x1C307120
1111#define smnreg_0x1C317120 0x1C317120
1112
1113#define smnreg_0x1C320830 0x1C320830
1114#define smnreg_0x1C380830 0x1C380830
1115#define smnreg_0x1C3D0830 0x1C3D0830
1116#define smnreg_0x1C420830 0x1C420830
1117
1118#define smnreg_0x1C320100 0x1C320100
1119#define smnreg_0x1C380100 0x1C380100
1120#define smnreg_0x1C3D0100 0x1C3D0100
1121#define smnreg_0x1C420100 0x1C420100
1122
1123#define smnreg_0x1B310500 0x1B310500
1124#define smnreg_0x1C300400 0x1C300400
1125
1126#define USR_CAKE_INCR 0x11000
1127#define USR_LINK_INCR 0x100000
1128#define USR_CP_INCR 0x10000
1129
1130#define NUM_USR_SMN_REGS	20
1131
1132struct aqua_reg_list usr_reg_addrs[] = {
1133	{ smnreg_0x1B311060, 4, DW_ADDR_INCR },
1134	{ smnreg_0x1B411060, 4, DW_ADDR_INCR },
1135	{ smnreg_0x1B511060, 4, DW_ADDR_INCR },
1136	{ smnreg_0x1B611060, 4, DW_ADDR_INCR },
1137	{ smnreg_0x1C307120, 2, DW_ADDR_INCR },
1138	{ smnreg_0x1C317120, 2, DW_ADDR_INCR },
1139};
1140
1141#define NUM_USR1_SMN_REGS	46
1142struct aqua_reg_list usr1_reg_addrs[] = {
1143	{ smnreg_0x1C320830, 6, USR_CAKE_INCR },
1144	{ smnreg_0x1C380830, 5, USR_CAKE_INCR },
1145	{ smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
1146	{ smnreg_0x1C420830, 4, USR_CAKE_INCR },
1147	{ smnreg_0x1C320100, 6, USR_CAKE_INCR },
1148	{ smnreg_0x1C380100, 5, USR_CAKE_INCR },
1149	{ smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
1150	{ smnreg_0x1C420100, 4, USR_CAKE_INCR },
1151	{ smnreg_0x1B310500, 4, USR_LINK_INCR },
1152	{ smnreg_0x1C300400, 2, USR_CP_INCR },
1153};
1154
1155static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
1156					    void *buf, size_t max_size,
1157					    int reg_state)
1158{
1159	uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
1160	struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
1161	struct amdgpu_regs_usr_v1_0 *usr_regs;
1162	struct amdgpu_smn_reg_data *reg_data;
1163	const int max_usr_instances = 4;
1164	struct aqua_reg_list *reg_addrs;
1165	int inst = 0, i, n, r, arr_size;
1166	void *p;
1167
1168	if (!buf || !max_size)
1169		return -EINVAL;
1170
1171	switch (reg_state) {
1172	case AMDGPU_REG_STATE_TYPE_USR:
1173		arr_size = ARRAY_SIZE(usr_reg_addrs);
1174		reg_addrs = usr_reg_addrs;
1175		num_smn = NUM_USR_SMN_REGS;
1176		break;
1177	case AMDGPU_REG_STATE_TYPE_USR_1:
1178		arr_size = ARRAY_SIZE(usr1_reg_addrs);
1179		reg_addrs = usr1_reg_addrs;
1180		num_smn = NUM_USR1_SMN_REGS;
1181		break;
1182	default:
1183		return -EINVAL;
1184	}
1185
1186	usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
1187
1188	szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
1189							     sizeof(*usr_regs),
1190							     num_smn);
1191	if (max_size < szbuf)
1192		return -EOVERFLOW;
1193
1194	p = &usr_reg_state->usr_state_regs[0];
1195	for_each_inst(i, adev->aid_mask) {
1196		usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
1197		usr_regs->inst_header.instance = inst++;
1198		usr_regs->inst_header.state = AMDGPU_INST_S_OK;
1199		usr_regs->inst_header.num_smn_regs = num_smn;
1200		reg_data = usr_regs->smn_reg_values;
1201
1202		for (r = 0; r < arr_size; r++) {
1203			start_addr = reg_addrs[r].start_addr;
1204			incrx = reg_addrs[r].incrx;
1205			num_regs = reg_addrs[r].num_regs;
1206			for (n = 0; n < num_regs; n++) {
1207				aqua_read_smn_ext(adev, reg_data,
1208						  start_addr + n * incrx, i);
1209				reg_data++;
1210			}
1211		}
1212		p = reg_data;
1213	}
1214
1215	usr_reg_state->common_header.structure_size = szbuf;
1216	usr_reg_state->common_header.format_revision = 1;
1217	usr_reg_state->common_header.content_revision = 0;
1218	usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
1219	usr_reg_state->common_header.num_instances = max_usr_instances;
1220
1221	return usr_reg_state->common_header.structure_size;
1222}
1223
1224ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
1225				    enum amdgpu_reg_state reg_state, void *buf,
1226				    size_t max_size)
1227{
1228	ssize_t size;
1229
1230	switch (reg_state) {
1231	case AMDGPU_REG_STATE_TYPE_PCIE:
1232		size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
1233		break;
1234	case AMDGPU_REG_STATE_TYPE_XGMI:
1235		size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
1236		break;
1237	case AMDGPU_REG_STATE_TYPE_WAFL:
1238		size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
1239		break;
1240	case AMDGPU_REG_STATE_TYPE_USR:
1241		size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
1242						    AMDGPU_REG_STATE_TYPE_USR);
1243		break;
1244	case AMDGPU_REG_STATE_TYPE_USR_1:
1245		size = aqua_vanjaram_read_usr_state(
1246			adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
1247		break;
1248	default:
1249		return -EINVAL;
1250	}
1251
1252	return size;
1253}
v6.8
   1/*
   2 * Copyright 2022 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "amdgpu.h"
  24#include "soc15.h"
  25
  26#include "soc15_common.h"
  27#include "amdgpu_reg_state.h"
  28#include "amdgpu_xcp.h"
  29#include "gfx_v9_4_3.h"
  30#include "gfxhub_v1_2.h"
  31#include "sdma_v4_4_2.h"
  32
  33#define XCP_INST_MASK(num_inst, xcp_id)                                        \
  34	(num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
  35
  36#define AMDGPU_XCP_OPS_KFD	(1 << 0)
  37
  38void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
  39{
  40	int i;
  41
  42	adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
  43
  44	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
  45
  46	adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
  47	adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
  48	adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
  49
  50	adev->doorbell_index.sdma_doorbell_range = 20;
  51	for (i = 0; i < adev->sdma.num_instances; i++)
  52		adev->doorbell_index.sdma_engine[i] =
  53			AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
  54			i * (adev->doorbell_index.sdma_doorbell_range >> 1);
  55
  56	adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
  57	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
  58
  59	adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
  60	adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
  61
  62	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
  63}
  64
 
 
 
 
 
  65static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
  66			     uint32_t inst_idx, struct amdgpu_ring *ring)
  67{
  68	int xcp_id;
  69	enum AMDGPU_XCP_IP_BLOCK ip_blk;
  70	uint32_t inst_mask;
  71
  72	ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
 
 
  73	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
  74		return;
  75
  76	inst_mask = 1 << inst_idx;
  77
  78	switch (ring->funcs->type) {
  79	case AMDGPU_HW_IP_GFX:
  80	case AMDGPU_RING_TYPE_COMPUTE:
  81	case AMDGPU_RING_TYPE_KIQ:
  82		ip_blk = AMDGPU_XCP_GFX;
  83		break;
  84	case AMDGPU_RING_TYPE_SDMA:
  85		ip_blk = AMDGPU_XCP_SDMA;
  86		break;
  87	case AMDGPU_RING_TYPE_VCN_ENC:
  88	case AMDGPU_RING_TYPE_VCN_JPEG:
  89		ip_blk = AMDGPU_XCP_VCN;
  90		if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
  91			inst_mask = 1 << (inst_idx * 2);
  92		break;
  93	default:
  94		DRM_ERROR("Not support ring type %d!", ring->funcs->type);
  95		return;
  96	}
  97
  98	for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
  99		if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
 100			ring->xcp_id = xcp_id;
 
 
 
 
 101			break;
 102		}
 103	}
 104}
 105
 106static void aqua_vanjaram_xcp_gpu_sched_update(
 107		struct amdgpu_device *adev,
 108		struct amdgpu_ring *ring,
 109		unsigned int sel_xcp_id)
 110{
 111	unsigned int *num_gpu_sched;
 112
 113	num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
 114			.gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
 115	adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
 116			.sched[(*num_gpu_sched)++] = &ring->sched;
 117	DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
 118			sel_xcp_id, ring->funcs->type,
 119			ring->hw_prio, *num_gpu_sched);
 120}
 121
 122static int aqua_vanjaram_xcp_sched_list_update(
 123		struct amdgpu_device *adev)
 124{
 125	struct amdgpu_ring *ring;
 126	int i;
 127
 128	for (i = 0; i < MAX_XCP; i++) {
 129		atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
 130		memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
 131	}
 132
 133	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
 134		return 0;
 135
 136	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 137		ring = adev->rings[i];
 138		if (!ring || !ring->sched.ready || ring->no_scheduler)
 139			continue;
 140
 141		aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
 142
 143		/* VCN is shared by two partitions under CPX MODE */
 
 
 144		if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
 145			ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
 146			adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
 147			aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
 148	}
 149
 150	return 0;
 151}
 152
 153static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
 154{
 155	int i;
 156
 157	for (i = 0; i < adev->num_rings; i++) {
 158		struct amdgpu_ring *ring = adev->rings[i];
 159
 160		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
 161			ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
 162			aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
 163		else
 164			aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
 165	}
 166
 167	return aqua_vanjaram_xcp_sched_list_update(adev);
 168}
 169
 170static int aqua_vanjaram_select_scheds(
 171		struct amdgpu_device *adev,
 172		u32 hw_ip,
 173		u32 hw_prio,
 174		struct amdgpu_fpriv *fpriv,
 175		unsigned int *num_scheds,
 176		struct drm_gpu_scheduler ***scheds)
 177{
 178	u32 sel_xcp_id;
 179	int i;
 180
 181	if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
 182		u32 least_ref_cnt = ~0;
 183
 184		fpriv->xcp_id = 0;
 185		for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
 186			u32 total_ref_cnt;
 187
 188			total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
 189			if (total_ref_cnt < least_ref_cnt) {
 190				fpriv->xcp_id = i;
 191				least_ref_cnt = total_ref_cnt;
 192			}
 193		}
 194	}
 195	sel_xcp_id = fpriv->xcp_id;
 196
 197	if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
 198		*num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
 199		*scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
 200		atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
 201		DRM_DEBUG("Selected partition #%d", sel_xcp_id);
 202	} else {
 203		DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
 204		return -ENOENT;
 205	}
 206
 207	return 0;
 208}
 209
 210static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
 211					 enum amd_hw_ip_block_type block,
 212					 int8_t inst)
 213{
 214	int8_t dev_inst;
 215
 216	switch (block) {
 217	case GC_HWIP:
 218	case SDMA0_HWIP:
 219	/* Both JPEG and VCN as JPEG is only alias of VCN */
 220	case VCN_HWIP:
 221		dev_inst = adev->ip_map.dev_inst[block][inst];
 222		break;
 223	default:
 224		/* For rest of the IPs, no look up required.
 225		 * Assume 'logical instance == physical instance' for all configs. */
 226		dev_inst = inst;
 227		break;
 228	}
 229
 230	return dev_inst;
 231}
 232
 233static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
 234					 enum amd_hw_ip_block_type block,
 235					 uint32_t mask)
 236{
 237	uint32_t dev_mask = 0;
 238	int8_t log_inst, dev_inst;
 239
 240	while (mask) {
 241		log_inst = ffs(mask) - 1;
 242		dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
 243		dev_mask |= (1 << dev_inst);
 244		mask &= ~(1 << log_inst);
 245	}
 246
 247	return dev_mask;
 248}
 249
 250static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
 251					  enum amd_hw_ip_block_type ip_block,
 252					  uint32_t inst_mask)
 253{
 254	int l = 0, i;
 255
 256	while (inst_mask) {
 257		i = ffs(inst_mask) - 1;
 258		adev->ip_map.dev_inst[ip_block][l++] = i;
 259		inst_mask &= ~(1 << i);
 260	}
 261	for (; l < HWIP_MAX_INSTANCE; l++)
 262		adev->ip_map.dev_inst[ip_block][l] = -1;
 263}
 264
 265void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
 266{
 267	u32 ip_map[][2] = {
 268		{ GC_HWIP, adev->gfx.xcc_mask },
 269		{ SDMA0_HWIP, adev->sdma.sdma_mask },
 270		{ VCN_HWIP, adev->vcn.inst_mask },
 271	};
 272	int i;
 273
 274	for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
 275		aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
 276
 277	adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
 278	adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
 279}
 280
 281/* Fixed pattern for smn addressing on different AIDs:
 282 *   bit[34]: indicate cross AID access
 283 *   bit[33:32]: indicate target AID id
 284 * AID id range is 0 ~ 3 as maximum AID number is 4.
 285 */
 286u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
 287{
 288	u64 ext_offset;
 289
 290	/* local routing and bit[34:32] will be zeros */
 291	if (ext_id == 0)
 292		return 0;
 293
 294	/* Initiated from host, accessing to all non-zero aids are cross traffic */
 295	ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
 296
 297	return ext_offset;
 298}
 299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 300static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
 301{
 302	enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
 
 303	struct amdgpu_device *adev = xcp_mgr->adev;
 304
 305	if (adev->nbio.funcs->get_compute_partition_mode)
 
 
 
 
 
 306		mode = adev->nbio.funcs->get_compute_partition_mode(adev);
 
 
 
 
 
 
 307
 308	return mode;
 309}
 310
 311static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
 312{
 313	int num_xcc, num_xcc_per_xcp = 0;
 314
 315	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
 316
 317	switch (mode) {
 318	case AMDGPU_SPX_PARTITION_MODE:
 319		num_xcc_per_xcp = num_xcc;
 320		break;
 321	case AMDGPU_DPX_PARTITION_MODE:
 322		num_xcc_per_xcp = num_xcc / 2;
 323		break;
 324	case AMDGPU_TPX_PARTITION_MODE:
 325		num_xcc_per_xcp = num_xcc / 3;
 326		break;
 327	case AMDGPU_QPX_PARTITION_MODE:
 328		num_xcc_per_xcp = num_xcc / 4;
 329		break;
 330	case AMDGPU_CPX_PARTITION_MODE:
 331		num_xcc_per_xcp = 1;
 332		break;
 333	}
 334
 335	return num_xcc_per_xcp;
 336}
 337
 338static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
 339				    enum AMDGPU_XCP_IP_BLOCK ip_id,
 340				    struct amdgpu_xcp_ip *ip)
 341{
 342	struct amdgpu_device *adev = xcp_mgr->adev;
 
 343	int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
 344	int num_sdma, num_vcn;
 345
 346	num_sdma = adev->sdma.num_instances;
 347	num_vcn = adev->vcn.num_vcn_inst;
 
 
 
 
 348
 349	switch (xcp_mgr->mode) {
 350	case AMDGPU_SPX_PARTITION_MODE:
 351		num_sdma_xcp = num_sdma;
 352		num_vcn_xcp = num_vcn;
 353		break;
 354	case AMDGPU_DPX_PARTITION_MODE:
 355		num_sdma_xcp = num_sdma / 2;
 356		num_vcn_xcp = num_vcn / 2;
 357		break;
 358	case AMDGPU_TPX_PARTITION_MODE:
 359		num_sdma_xcp = num_sdma / 3;
 360		num_vcn_xcp = num_vcn / 3;
 361		break;
 362	case AMDGPU_QPX_PARTITION_MODE:
 363		num_sdma_xcp = num_sdma / 4;
 364		num_vcn_xcp = num_vcn / 4;
 365		break;
 366	case AMDGPU_CPX_PARTITION_MODE:
 367		num_sdma_xcp = 2;
 368		num_vcn_xcp = num_vcn ? 1 : 0;
 369		break;
 370	default:
 371		return -EINVAL;
 372	}
 373
 374	num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
 
 375
 376	switch (ip_id) {
 377	case AMDGPU_XCP_GFXHUB:
 378		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
 379		ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
 380		break;
 381	case AMDGPU_XCP_GFX:
 382		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
 383		ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
 384		break;
 385	case AMDGPU_XCP_SDMA:
 386		ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
 387		ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
 388		break;
 389	case AMDGPU_XCP_VCN:
 390		ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
 
 391		/* TODO : Assign IP funcs */
 392		break;
 393	default:
 394		return -EINVAL;
 395	}
 396
 397	ip->ip_id = ip_id;
 398
 399	return 0;
 400}
 401
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 402static enum amdgpu_gfx_partition
 403__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
 404{
 405	struct amdgpu_device *adev = xcp_mgr->adev;
 406	int num_xcc;
 407
 408	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
 409
 410	if (adev->gmc.num_mem_partitions == 1)
 411		return AMDGPU_SPX_PARTITION_MODE;
 412
 413	if (adev->gmc.num_mem_partitions == num_xcc)
 414		return AMDGPU_CPX_PARTITION_MODE;
 415
 416	if (adev->gmc.num_mem_partitions == num_xcc / 2)
 417		return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
 418						    AMDGPU_QPX_PARTITION_MODE;
 419
 420	if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
 421		return AMDGPU_DPX_PARTITION_MODE;
 422
 423	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
 424}
 425
 426static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
 427					  enum amdgpu_gfx_partition mode)
 428{
 429	struct amdgpu_device *adev = xcp_mgr->adev;
 430	int num_xcc, num_xccs_per_xcp;
 431
 432	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
 433	switch (mode) {
 434	case AMDGPU_SPX_PARTITION_MODE:
 435		return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
 436	case AMDGPU_DPX_PARTITION_MODE:
 437		return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
 438	case AMDGPU_TPX_PARTITION_MODE:
 439		return (adev->gmc.num_mem_partitions == 1 ||
 440			adev->gmc.num_mem_partitions == 3) &&
 441		       ((num_xcc % 3) == 0);
 442	case AMDGPU_QPX_PARTITION_MODE:
 443		num_xccs_per_xcp = num_xcc / 4;
 444		return (adev->gmc.num_mem_partitions == 1 ||
 445			adev->gmc.num_mem_partitions == 4) &&
 446		       (num_xccs_per_xcp >= 2);
 447	case AMDGPU_CPX_PARTITION_MODE:
 448		return ((num_xcc > 1) &&
 449		       (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
 450		       (num_xcc % adev->gmc.num_mem_partitions) == 0);
 451	default:
 452		return false;
 453	}
 454
 455	return false;
 456}
 457
 458static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
 459{
 460	/* TODO:
 461	 * Stop user queues and threads, and make sure GPU is empty of work.
 462	 */
 463
 464	if (flags & AMDGPU_XCP_OPS_KFD)
 465		amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
 466
 467	return 0;
 468}
 469
 470static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
 471{
 472	int ret = 0;
 473
 474	if (flags & AMDGPU_XCP_OPS_KFD) {
 475		amdgpu_amdkfd_device_probe(xcp_mgr->adev);
 476		amdgpu_amdkfd_device_init(xcp_mgr->adev);
 477		/* If KFD init failed, return failure */
 478		if (!xcp_mgr->adev->kfd.init_complete)
 479			ret = -EIO;
 480	}
 481
 482	return ret;
 483}
 484
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 485static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
 486					       int mode, int *num_xcps)
 487{
 488	int num_xcc_per_xcp, num_xcc, ret;
 489	struct amdgpu_device *adev;
 490	u32 flags = 0;
 491
 492	adev = xcp_mgr->adev;
 493	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
 494
 495	if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
 496		mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
 
 
 
 
 
 
 497	} else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
 498		dev_err(adev->dev,
 499			"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
 500			amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
 501		return -EINVAL;
 502	}
 503
 504	if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
 505		flags |= AMDGPU_XCP_OPS_KFD;
 506
 507	if (flags & AMDGPU_XCP_OPS_KFD) {
 508		ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
 509		if (ret)
 510			goto out;
 511	}
 512
 513	ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
 514	if (ret)
 515		goto unlock;
 516
 517	num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
 518	if (adev->gfx.funcs->switch_partition_mode)
 519		adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
 520						       num_xcc_per_xcp);
 521
 522	/* Init info about new xcps */
 523	*num_xcps = num_xcc / num_xcc_per_xcp;
 524	amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
 525
 526	ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
 
 
 527unlock:
 528	if (flags & AMDGPU_XCP_OPS_KFD)
 529		amdgpu_amdkfd_unlock_kfd(adev);
 530out:
 531	return ret;
 532}
 533
 534static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
 535					  int xcc_id, uint8_t *mem_id)
 536{
 537	/* memory/spatial modes validation check is already done */
 538	*mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
 539	*mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
 540
 541	return 0;
 542}
 543
 544static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
 545					struct amdgpu_xcp *xcp, uint8_t *mem_id)
 546{
 547	struct amdgpu_numa_info numa_info;
 548	struct amdgpu_device *adev;
 549	uint32_t xcc_mask;
 550	int r, i, xcc_id;
 551
 552	adev = xcp_mgr->adev;
 553	/* TODO: BIOS is not returning the right info now
 554	 * Check on this later
 555	 */
 556	/*
 557	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
 558		mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
 559	*/
 560	if (adev->gmc.num_mem_partitions == 1) {
 561		/* Only one range */
 562		*mem_id = 0;
 563		return 0;
 564	}
 565
 566	r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
 567	if (r || !xcc_mask)
 568		return -EINVAL;
 569
 570	xcc_id = ffs(xcc_mask) - 1;
 571	if (!adev->gmc.is_app_apu)
 572		return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
 573
 574	r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
 575
 576	if (r)
 577		return r;
 578
 579	r = -EINVAL;
 580	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
 581		if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
 582			*mem_id = i;
 583			r = 0;
 584			break;
 585		}
 586	}
 587
 588	return r;
 589}
 590
 591static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
 592				     enum AMDGPU_XCP_IP_BLOCK ip_id,
 593				     struct amdgpu_xcp_ip *ip)
 594{
 595	if (!ip)
 596		return -EINVAL;
 597
 598	return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
 599}
 600
 601struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
 602	.switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
 603	.query_partition_mode = &aqua_vanjaram_query_partition_mode,
 604	.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
 
 605	.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
 606	.select_scheds = &aqua_vanjaram_select_scheds,
 607	.update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
 
 608};
 609
 610static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
 611{
 612	int ret;
 613
 
 
 
 614	ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
 615				  &aqua_vanjaram_xcp_funcs);
 616	if (ret)
 617		return ret;
 618
 
 619	/* TODO: Default memory node affinity init */
 620
 621	return ret;
 622}
 623
 624int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
 625{
 626	u32 mask, inst_mask = adev->sdma.sdma_mask;
 627	int ret, i;
 628
 629	/* generally 1 AID supports 4 instances */
 630	adev->sdma.num_inst_per_aid = 4;
 631	adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
 632
 633	adev->aid_mask = i = 1;
 634	inst_mask >>= adev->sdma.num_inst_per_aid;
 635
 636	for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
 637	     inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
 638		if ((inst_mask & mask) == mask)
 
 
 639			adev->aid_mask |= (1 << i);
 640	}
 641
 642	/* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
 643	 * addressed based on logical instance ids.
 644	 */
 645	adev->vcn.harvest_config = 0;
 646	adev->vcn.num_inst_per_aid = 1;
 647	adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
 648	adev->jpeg.harvest_config = 0;
 649	adev->jpeg.num_inst_per_aid = 1;
 650	adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
 651
 652	ret = aqua_vanjaram_xcp_mgr_init(adev);
 653	if (ret)
 654		return ret;
 655
 656	aqua_vanjaram_ip_map_init(adev);
 657
 658	return 0;
 659}
 660
 661static void aqua_read_smn(struct amdgpu_device *adev,
 662			  struct amdgpu_smn_reg_data *regdata,
 663			  uint64_t smn_addr)
 664{
 665	regdata->addr = smn_addr;
 666	regdata->value = RREG32_PCIE(smn_addr);
 667}
 668
 669struct aqua_reg_list {
 670	uint64_t start_addr;
 671	uint32_t num_regs;
 672	uint32_t incrx;
 673};
 674
 675#define DW_ADDR_INCR	4
 676
 677static void aqua_read_smn_ext(struct amdgpu_device *adev,
 678			      struct amdgpu_smn_reg_data *regdata,
 679			      uint64_t smn_addr, int i)
 680{
 681	regdata->addr =
 682		smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
 683	regdata->value = RREG32_PCIE_EXT(regdata->addr);
 684}
 685
 686#define smnreg_0x1A340218	0x1A340218
 687#define smnreg_0x1A3402E4	0x1A3402E4
 688#define smnreg_0x1A340294	0x1A340294
 689#define smreg_0x1A380088	0x1A380088
 690
 691#define NUM_PCIE_SMN_REGS	14
 692
 693static struct aqua_reg_list pcie_reg_addrs[] = {
 694	{ smnreg_0x1A340218, 1, 0 },
 695	{ smnreg_0x1A3402E4, 1, 0 },
 696	{ smnreg_0x1A340294, 6, DW_ADDR_INCR },
 697	{ smreg_0x1A380088, 6, DW_ADDR_INCR },
 698};
 699
 700static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
 701					     void *buf, size_t max_size)
 702{
 703	struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
 704	uint32_t start_addr, incrx, num_regs, szbuf;
 705	struct amdgpu_regs_pcie_v1_0 *pcie_regs;
 706	struct amdgpu_smn_reg_data *reg_data;
 707	struct pci_dev *us_pdev, *ds_pdev;
 708	int aer_cap, r, n;
 709
 710	if (!buf || !max_size)
 711		return -EINVAL;
 712
 713	pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
 714
 715	szbuf = sizeof(*pcie_reg_state) +
 716		amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
 717	/* Only one instance of pcie regs */
 718	if (max_size < szbuf)
 719		return -EOVERFLOW;
 720
 721	pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
 722						     sizeof(*pcie_reg_state));
 723	pcie_regs->inst_header.instance = 0;
 724	pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
 725	pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
 726
 727	reg_data = pcie_regs->smn_reg_values;
 728
 729	for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
 730		start_addr = pcie_reg_addrs[r].start_addr;
 731		incrx = pcie_reg_addrs[r].incrx;
 732		num_regs = pcie_reg_addrs[r].num_regs;
 733		for (n = 0; n < num_regs; n++) {
 734			aqua_read_smn(adev, reg_data, start_addr + n * incrx);
 735			++reg_data;
 736		}
 737	}
 738
 739	ds_pdev = pci_upstream_bridge(adev->pdev);
 740	us_pdev = pci_upstream_bridge(ds_pdev);
 741
 742	pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
 743				  &pcie_regs->device_status);
 744	pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
 745				  &pcie_regs->link_status);
 746
 747	aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
 748	if (aer_cap) {
 749		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
 750				      &pcie_regs->pcie_corr_err_status);
 751		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
 752				      &pcie_regs->pcie_uncorr_err_status);
 753	}
 754
 755	pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
 756			      &pcie_regs->sub_bus_number_latency);
 757
 758	pcie_reg_state->common_header.structure_size = szbuf;
 759	pcie_reg_state->common_header.format_revision = 1;
 760	pcie_reg_state->common_header.content_revision = 0;
 761	pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
 762	pcie_reg_state->common_header.num_instances = 1;
 763
 764	return pcie_reg_state->common_header.structure_size;
 765}
 766
 767#define smnreg_0x11A00050	0x11A00050
 768#define smnreg_0x11A00180	0x11A00180
 769#define smnreg_0x11A00070	0x11A00070
 770#define smnreg_0x11A00200	0x11A00200
 771#define smnreg_0x11A0020C	0x11A0020C
 772#define smnreg_0x11A00210	0x11A00210
 773#define smnreg_0x11A00108	0x11A00108
 774
 775#define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
 776
 777#define NUM_XGMI_SMN_REGS 25
 778
 779static struct aqua_reg_list xgmi_reg_addrs[] = {
 780	{ smnreg_0x11A00050, 1, 0 },
 781	{ smnreg_0x11A00180, 16, DW_ADDR_INCR },
 782	{ smnreg_0x11A00070, 4, DW_ADDR_INCR },
 783	{ smnreg_0x11A00200, 1, 0 },
 784	{ smnreg_0x11A0020C, 1, 0 },
 785	{ smnreg_0x11A00210, 1, 0 },
 786	{ smnreg_0x11A00108, 1, 0 },
 787};
 788
 789static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
 790					     void *buf, size_t max_size)
 791{
 792	struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
 793	uint32_t start_addr, incrx, num_regs, szbuf;
 794	struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
 795	struct amdgpu_smn_reg_data *reg_data;
 796	const int max_xgmi_instances = 8;
 797	int inst = 0, i, j, r, n;
 798	const int xgmi_inst = 2;
 799	void *p;
 800
 801	if (!buf || !max_size)
 802		return -EINVAL;
 803
 804	xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
 805
 806	szbuf = sizeof(*xgmi_reg_state) +
 807		amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
 808				    NUM_XGMI_SMN_REGS);
 809	/* Only one instance of pcie regs */
 810	if (max_size < szbuf)
 811		return -EOVERFLOW;
 812
 813	p = &xgmi_reg_state->xgmi_state_regs[0];
 814	for_each_inst(i, adev->aid_mask) {
 815		for (j = 0; j < xgmi_inst; ++j) {
 816			xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
 817			xgmi_regs->inst_header.instance = inst++;
 818
 819			xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
 820			xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
 821
 822			reg_data = xgmi_regs->smn_reg_values;
 823
 824			for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
 825				start_addr = xgmi_reg_addrs[r].start_addr;
 826				incrx = xgmi_reg_addrs[r].incrx;
 827				num_regs = xgmi_reg_addrs[r].num_regs;
 828
 829				for (n = 0; n < num_regs; n++) {
 830					aqua_read_smn_ext(
 831						adev, reg_data,
 832						XGMI_LINK_REG(start_addr, j) +
 833							n * incrx,
 834						i);
 835					++reg_data;
 836				}
 837			}
 838			p = reg_data;
 839		}
 840	}
 841
 842	xgmi_reg_state->common_header.structure_size = szbuf;
 843	xgmi_reg_state->common_header.format_revision = 1;
 844	xgmi_reg_state->common_header.content_revision = 0;
 845	xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
 846	xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
 847
 848	return xgmi_reg_state->common_header.structure_size;
 849}
 850
 851#define smnreg_0x11C00070	0x11C00070
 852#define smnreg_0x11C00210	0x11C00210
 853
 854static struct aqua_reg_list wafl_reg_addrs[] = {
 855	{ smnreg_0x11C00070, 4, DW_ADDR_INCR },
 856	{ smnreg_0x11C00210, 1, 0 },
 857};
 858
 859#define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
 860
 861#define NUM_WAFL_SMN_REGS 5
 862
 863static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
 864					     void *buf, size_t max_size)
 865{
 866	struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
 867	uint32_t start_addr, incrx, num_regs, szbuf;
 868	struct amdgpu_regs_wafl_v1_0 *wafl_regs;
 869	struct amdgpu_smn_reg_data *reg_data;
 870	const int max_wafl_instances = 8;
 871	int inst = 0, i, j, r, n;
 872	const int wafl_inst = 2;
 873	void *p;
 874
 875	if (!buf || !max_size)
 876		return -EINVAL;
 877
 878	wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
 879
 880	szbuf = sizeof(*wafl_reg_state) +
 881		amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
 882				    NUM_WAFL_SMN_REGS);
 883
 884	if (max_size < szbuf)
 885		return -EOVERFLOW;
 886
 887	p = &wafl_reg_state->wafl_state_regs[0];
 888	for_each_inst(i, adev->aid_mask) {
 889		for (j = 0; j < wafl_inst; ++j) {
 890			wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
 891			wafl_regs->inst_header.instance = inst++;
 892
 893			wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
 894			wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
 895
 896			reg_data = wafl_regs->smn_reg_values;
 897
 898			for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
 899				start_addr = wafl_reg_addrs[r].start_addr;
 900				incrx = wafl_reg_addrs[r].incrx;
 901				num_regs = wafl_reg_addrs[r].num_regs;
 902				for (n = 0; n < num_regs; n++) {
 903					aqua_read_smn_ext(
 904						adev, reg_data,
 905						WAFL_LINK_REG(start_addr, j) +
 906							n * incrx,
 907						i);
 908					++reg_data;
 909				}
 910			}
 911			p = reg_data;
 912		}
 913	}
 914
 915	wafl_reg_state->common_header.structure_size = szbuf;
 916	wafl_reg_state->common_header.format_revision = 1;
 917	wafl_reg_state->common_header.content_revision = 0;
 918	wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
 919	wafl_reg_state->common_header.num_instances = max_wafl_instances;
 920
 921	return wafl_reg_state->common_header.structure_size;
 922}
 923
 924#define smnreg_0x1B311060 0x1B311060
 925#define smnreg_0x1B411060 0x1B411060
 926#define smnreg_0x1B511060 0x1B511060
 927#define smnreg_0x1B611060 0x1B611060
 928
 929#define smnreg_0x1C307120 0x1C307120
 930#define smnreg_0x1C317120 0x1C317120
 931
 932#define smnreg_0x1C320830 0x1C320830
 933#define smnreg_0x1C380830 0x1C380830
 934#define smnreg_0x1C3D0830 0x1C3D0830
 935#define smnreg_0x1C420830 0x1C420830
 936
 937#define smnreg_0x1C320100 0x1C320100
 938#define smnreg_0x1C380100 0x1C380100
 939#define smnreg_0x1C3D0100 0x1C3D0100
 940#define smnreg_0x1C420100 0x1C420100
 941
 942#define smnreg_0x1B310500 0x1B310500
 943#define smnreg_0x1C300400 0x1C300400
 944
 945#define USR_CAKE_INCR 0x11000
 946#define USR_LINK_INCR 0x100000
 947#define USR_CP_INCR 0x10000
 948
 949#define NUM_USR_SMN_REGS	20
 950
 951struct aqua_reg_list usr_reg_addrs[] = {
 952	{ smnreg_0x1B311060, 4, DW_ADDR_INCR },
 953	{ smnreg_0x1B411060, 4, DW_ADDR_INCR },
 954	{ smnreg_0x1B511060, 4, DW_ADDR_INCR },
 955	{ smnreg_0x1B611060, 4, DW_ADDR_INCR },
 956	{ smnreg_0x1C307120, 2, DW_ADDR_INCR },
 957	{ smnreg_0x1C317120, 2, DW_ADDR_INCR },
 958};
 959
 960#define NUM_USR1_SMN_REGS	46
 961struct aqua_reg_list usr1_reg_addrs[] = {
 962	{ smnreg_0x1C320830, 6, USR_CAKE_INCR },
 963	{ smnreg_0x1C380830, 5, USR_CAKE_INCR },
 964	{ smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
 965	{ smnreg_0x1C420830, 4, USR_CAKE_INCR },
 966	{ smnreg_0x1C320100, 6, USR_CAKE_INCR },
 967	{ smnreg_0x1C380100, 5, USR_CAKE_INCR },
 968	{ smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
 969	{ smnreg_0x1C420100, 4, USR_CAKE_INCR },
 970	{ smnreg_0x1B310500, 4, USR_LINK_INCR },
 971	{ smnreg_0x1C300400, 2, USR_CP_INCR },
 972};
 973
 974static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
 975					    void *buf, size_t max_size,
 976					    int reg_state)
 977{
 978	uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
 979	struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
 980	struct amdgpu_regs_usr_v1_0 *usr_regs;
 981	struct amdgpu_smn_reg_data *reg_data;
 982	const int max_usr_instances = 4;
 983	struct aqua_reg_list *reg_addrs;
 984	int inst = 0, i, n, r, arr_size;
 985	void *p;
 986
 987	if (!buf || !max_size)
 988		return -EINVAL;
 989
 990	switch (reg_state) {
 991	case AMDGPU_REG_STATE_TYPE_USR:
 992		arr_size = ARRAY_SIZE(usr_reg_addrs);
 993		reg_addrs = usr_reg_addrs;
 994		num_smn = NUM_USR_SMN_REGS;
 995		break;
 996	case AMDGPU_REG_STATE_TYPE_USR_1:
 997		arr_size = ARRAY_SIZE(usr1_reg_addrs);
 998		reg_addrs = usr1_reg_addrs;
 999		num_smn = NUM_USR1_SMN_REGS;
1000		break;
1001	default:
1002		return -EINVAL;
1003	}
1004
1005	usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
1006
1007	szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
1008							     sizeof(*usr_regs),
1009							     num_smn);
1010	if (max_size < szbuf)
1011		return -EOVERFLOW;
1012
1013	p = &usr_reg_state->usr_state_regs[0];
1014	for_each_inst(i, adev->aid_mask) {
1015		usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
1016		usr_regs->inst_header.instance = inst++;
1017		usr_regs->inst_header.state = AMDGPU_INST_S_OK;
1018		usr_regs->inst_header.num_smn_regs = num_smn;
1019		reg_data = usr_regs->smn_reg_values;
1020
1021		for (r = 0; r < arr_size; r++) {
1022			start_addr = reg_addrs[r].start_addr;
1023			incrx = reg_addrs[r].incrx;
1024			num_regs = reg_addrs[r].num_regs;
1025			for (n = 0; n < num_regs; n++) {
1026				aqua_read_smn_ext(adev, reg_data,
1027						  start_addr + n * incrx, i);
1028				reg_data++;
1029			}
1030		}
1031		p = reg_data;
1032	}
1033
1034	usr_reg_state->common_header.structure_size = szbuf;
1035	usr_reg_state->common_header.format_revision = 1;
1036	usr_reg_state->common_header.content_revision = 0;
1037	usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
1038	usr_reg_state->common_header.num_instances = max_usr_instances;
1039
1040	return usr_reg_state->common_header.structure_size;
1041}
1042
1043ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
1044				    enum amdgpu_reg_state reg_state, void *buf,
1045				    size_t max_size)
1046{
1047	ssize_t size;
1048
1049	switch (reg_state) {
1050	case AMDGPU_REG_STATE_TYPE_PCIE:
1051		size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
1052		break;
1053	case AMDGPU_REG_STATE_TYPE_XGMI:
1054		size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
1055		break;
1056	case AMDGPU_REG_STATE_TYPE_WAFL:
1057		size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
1058		break;
1059	case AMDGPU_REG_STATE_TYPE_USR:
1060		size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
1061						    AMDGPU_REG_STATE_TYPE_USR);
1062		break;
1063	case AMDGPU_REG_STATE_TYPE_USR_1:
1064		size = aqua_vanjaram_read_usr_state(
1065			adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
1066		break;
1067	default:
1068		return -EINVAL;
1069	}
1070
1071	return size;
1072}