Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.9.4
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <linux/pci.h>
  26
  27#include <drm/drm_cache.h>
  28
  29#include "amdgpu.h"
  30#include "gmc_v9_0.h"
  31#include "amdgpu_atomfirmware.h"
  32#include "amdgpu_gem.h"
  33
  34#include "gc/gc_9_0_sh_mask.h"
  35#include "dce/dce_12_0_offset.h"
  36#include "dce/dce_12_0_sh_mask.h"
  37#include "vega10_enum.h"
  38#include "mmhub/mmhub_1_0_offset.h"
  39#include "athub/athub_1_0_sh_mask.h"
  40#include "athub/athub_1_0_offset.h"
  41#include "oss/osssys_4_0_offset.h"
  42
  43#include "soc15.h"
  44#include "soc15d.h"
  45#include "soc15_common.h"
  46#include "umc/umc_6_0_sh_mask.h"
  47
  48#include "gfxhub_v1_0.h"
  49#include "mmhub_v1_0.h"
  50#include "athub_v1_0.h"
  51#include "gfxhub_v1_1.h"
  52#include "gfxhub_v1_2.h"
  53#include "mmhub_v9_4.h"
  54#include "mmhub_v1_7.h"
  55#include "mmhub_v1_8.h"
  56#include "umc_v6_1.h"
  57#include "umc_v6_0.h"
  58#include "umc_v6_7.h"
  59#include "umc_v12_0.h"
  60#include "hdp_v4_0.h"
  61#include "mca_v3_0.h"
  62
  63#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
  64
  65#include "amdgpu_ras.h"
  66#include "amdgpu_xgmi.h"
  67
  68/* add these here since we already include dce12 headers and these are for DCN */
  69#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
  70#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
  71#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
  72#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
  73#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
  74#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
  75#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0                                                                  0x049d
  76#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX                                                         2
  77
  78#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2                                                          0x05ea
  79#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX                                                 2
  80
  81#define MAX_MEM_RANGES 8
  82
  83static const char * const gfxhub_client_ids[] = {
  84	"CB",
  85	"DB",
  86	"IA",
  87	"WD",
  88	"CPF",
  89	"CPC",
  90	"CPG",
  91	"RLC",
  92	"TCP",
  93	"SQC (inst)",
  94	"SQC (data)",
  95	"SQG",
  96	"PA",
  97};
  98
  99static const char *mmhub_client_ids_raven[][2] = {
 100	[0][0] = "MP1",
 101	[1][0] = "MP0",
 102	[2][0] = "VCN",
 103	[3][0] = "VCNU",
 104	[4][0] = "HDP",
 105	[5][0] = "DCE",
 106	[13][0] = "UTCL2",
 107	[19][0] = "TLS",
 108	[26][0] = "OSS",
 109	[27][0] = "SDMA0",
 110	[0][1] = "MP1",
 111	[1][1] = "MP0",
 112	[2][1] = "VCN",
 113	[3][1] = "VCNU",
 114	[4][1] = "HDP",
 115	[5][1] = "XDP",
 116	[6][1] = "DBGU0",
 117	[7][1] = "DCE",
 118	[8][1] = "DCEDWB0",
 119	[9][1] = "DCEDWB1",
 120	[26][1] = "OSS",
 121	[27][1] = "SDMA0",
 122};
 123
 124static const char *mmhub_client_ids_renoir[][2] = {
 125	[0][0] = "MP1",
 126	[1][0] = "MP0",
 127	[2][0] = "HDP",
 128	[4][0] = "DCEDMC",
 129	[5][0] = "DCEVGA",
 130	[13][0] = "UTCL2",
 131	[19][0] = "TLS",
 132	[26][0] = "OSS",
 133	[27][0] = "SDMA0",
 134	[28][0] = "VCN",
 135	[29][0] = "VCNU",
 136	[30][0] = "JPEG",
 137	[0][1] = "MP1",
 138	[1][1] = "MP0",
 139	[2][1] = "HDP",
 140	[3][1] = "XDP",
 141	[6][1] = "DBGU0",
 142	[7][1] = "DCEDMC",
 143	[8][1] = "DCEVGA",
 144	[9][1] = "DCEDWB",
 145	[26][1] = "OSS",
 146	[27][1] = "SDMA0",
 147	[28][1] = "VCN",
 148	[29][1] = "VCNU",
 149	[30][1] = "JPEG",
 150};
 151
 152static const char *mmhub_client_ids_vega10[][2] = {
 153	[0][0] = "MP0",
 154	[1][0] = "UVD",
 155	[2][0] = "UVDU",
 156	[3][0] = "HDP",
 157	[13][0] = "UTCL2",
 158	[14][0] = "OSS",
 159	[15][0] = "SDMA1",
 160	[32+0][0] = "VCE0",
 161	[32+1][0] = "VCE0U",
 162	[32+2][0] = "XDMA",
 163	[32+3][0] = "DCE",
 164	[32+4][0] = "MP1",
 165	[32+14][0] = "SDMA0",
 166	[0][1] = "MP0",
 167	[1][1] = "UVD",
 168	[2][1] = "UVDU",
 169	[3][1] = "DBGU0",
 170	[4][1] = "HDP",
 171	[5][1] = "XDP",
 172	[14][1] = "OSS",
 173	[15][1] = "SDMA0",
 174	[32+0][1] = "VCE0",
 175	[32+1][1] = "VCE0U",
 176	[32+2][1] = "XDMA",
 177	[32+3][1] = "DCE",
 178	[32+4][1] = "DCEDWB",
 179	[32+5][1] = "MP1",
 180	[32+6][1] = "DBGU1",
 181	[32+14][1] = "SDMA1",
 182};
 183
 184static const char *mmhub_client_ids_vega12[][2] = {
 185	[0][0] = "MP0",
 186	[1][0] = "VCE0",
 187	[2][0] = "VCE0U",
 188	[3][0] = "HDP",
 189	[13][0] = "UTCL2",
 190	[14][0] = "OSS",
 191	[15][0] = "SDMA1",
 192	[32+0][0] = "DCE",
 193	[32+1][0] = "XDMA",
 194	[32+2][0] = "UVD",
 195	[32+3][0] = "UVDU",
 196	[32+4][0] = "MP1",
 197	[32+15][0] = "SDMA0",
 198	[0][1] = "MP0",
 199	[1][1] = "VCE0",
 200	[2][1] = "VCE0U",
 201	[3][1] = "DBGU0",
 202	[4][1] = "HDP",
 203	[5][1] = "XDP",
 204	[14][1] = "OSS",
 205	[15][1] = "SDMA0",
 206	[32+0][1] = "DCE",
 207	[32+1][1] = "DCEDWB",
 208	[32+2][1] = "XDMA",
 209	[32+3][1] = "UVD",
 210	[32+4][1] = "UVDU",
 211	[32+5][1] = "MP1",
 212	[32+6][1] = "DBGU1",
 213	[32+15][1] = "SDMA1",
 214};
 215
 216static const char *mmhub_client_ids_vega20[][2] = {
 217	[0][0] = "XDMA",
 218	[1][0] = "DCE",
 219	[2][0] = "VCE0",
 220	[3][0] = "VCE0U",
 221	[4][0] = "UVD",
 222	[5][0] = "UVD1U",
 223	[13][0] = "OSS",
 224	[14][0] = "HDP",
 225	[15][0] = "SDMA0",
 226	[32+0][0] = "UVD",
 227	[32+1][0] = "UVDU",
 228	[32+2][0] = "MP1",
 229	[32+3][0] = "MP0",
 230	[32+12][0] = "UTCL2",
 231	[32+14][0] = "SDMA1",
 232	[0][1] = "XDMA",
 233	[1][1] = "DCE",
 234	[2][1] = "DCEDWB",
 235	[3][1] = "VCE0",
 236	[4][1] = "VCE0U",
 237	[5][1] = "UVD1",
 238	[6][1] = "UVD1U",
 239	[7][1] = "DBGU0",
 240	[8][1] = "XDP",
 241	[13][1] = "OSS",
 242	[14][1] = "HDP",
 243	[15][1] = "SDMA0",
 244	[32+0][1] = "UVD",
 245	[32+1][1] = "UVDU",
 246	[32+2][1] = "DBGU1",
 247	[32+3][1] = "MP1",
 248	[32+4][1] = "MP0",
 249	[32+14][1] = "SDMA1",
 250};
 251
 252static const char *mmhub_client_ids_arcturus[][2] = {
 253	[0][0] = "DBGU1",
 254	[1][0] = "XDP",
 255	[2][0] = "MP1",
 256	[14][0] = "HDP",
 257	[171][0] = "JPEG",
 258	[172][0] = "VCN",
 259	[173][0] = "VCNU",
 260	[203][0] = "JPEG1",
 261	[204][0] = "VCN1",
 262	[205][0] = "VCN1U",
 263	[256][0] = "SDMA0",
 264	[257][0] = "SDMA1",
 265	[258][0] = "SDMA2",
 266	[259][0] = "SDMA3",
 267	[260][0] = "SDMA4",
 268	[261][0] = "SDMA5",
 269	[262][0] = "SDMA6",
 270	[263][0] = "SDMA7",
 271	[384][0] = "OSS",
 272	[0][1] = "DBGU1",
 273	[1][1] = "XDP",
 274	[2][1] = "MP1",
 275	[14][1] = "HDP",
 276	[171][1] = "JPEG",
 277	[172][1] = "VCN",
 278	[173][1] = "VCNU",
 279	[203][1] = "JPEG1",
 280	[204][1] = "VCN1",
 281	[205][1] = "VCN1U",
 282	[256][1] = "SDMA0",
 283	[257][1] = "SDMA1",
 284	[258][1] = "SDMA2",
 285	[259][1] = "SDMA3",
 286	[260][1] = "SDMA4",
 287	[261][1] = "SDMA5",
 288	[262][1] = "SDMA6",
 289	[263][1] = "SDMA7",
 290	[384][1] = "OSS",
 291};
 292
 293static const char *mmhub_client_ids_aldebaran[][2] = {
 294	[2][0] = "MP1",
 295	[3][0] = "MP0",
 296	[32+1][0] = "DBGU_IO0",
 297	[32+2][0] = "DBGU_IO2",
 298	[32+4][0] = "MPIO",
 299	[96+11][0] = "JPEG0",
 300	[96+12][0] = "VCN0",
 301	[96+13][0] = "VCNU0",
 302	[128+11][0] = "JPEG1",
 303	[128+12][0] = "VCN1",
 304	[128+13][0] = "VCNU1",
 305	[160+1][0] = "XDP",
 306	[160+14][0] = "HDP",
 307	[256+0][0] = "SDMA0",
 308	[256+1][0] = "SDMA1",
 309	[256+2][0] = "SDMA2",
 310	[256+3][0] = "SDMA3",
 311	[256+4][0] = "SDMA4",
 312	[384+0][0] = "OSS",
 313	[2][1] = "MP1",
 314	[3][1] = "MP0",
 315	[32+1][1] = "DBGU_IO0",
 316	[32+2][1] = "DBGU_IO2",
 317	[32+4][1] = "MPIO",
 318	[96+11][1] = "JPEG0",
 319	[96+12][1] = "VCN0",
 320	[96+13][1] = "VCNU0",
 321	[128+11][1] = "JPEG1",
 322	[128+12][1] = "VCN1",
 323	[128+13][1] = "VCNU1",
 324	[160+1][1] = "XDP",
 325	[160+14][1] = "HDP",
 326	[256+0][1] = "SDMA0",
 327	[256+1][1] = "SDMA1",
 328	[256+2][1] = "SDMA2",
 329	[256+3][1] = "SDMA3",
 330	[256+4][1] = "SDMA4",
 331	[384+0][1] = "OSS",
 332};
 333
 334static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = {
 335	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
 336	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
 337};
 338
 339static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = {
 340	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
 341	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
 342};
 343
 344static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
 345	(0x000143c0 + 0x00000000),
 346	(0x000143c0 + 0x00000800),
 347	(0x000143c0 + 0x00001000),
 348	(0x000143c0 + 0x00001800),
 349	(0x000543c0 + 0x00000000),
 350	(0x000543c0 + 0x00000800),
 351	(0x000543c0 + 0x00001000),
 352	(0x000543c0 + 0x00001800),
 353	(0x000943c0 + 0x00000000),
 354	(0x000943c0 + 0x00000800),
 355	(0x000943c0 + 0x00001000),
 356	(0x000943c0 + 0x00001800),
 357	(0x000d43c0 + 0x00000000),
 358	(0x000d43c0 + 0x00000800),
 359	(0x000d43c0 + 0x00001000),
 360	(0x000d43c0 + 0x00001800),
 361	(0x001143c0 + 0x00000000),
 362	(0x001143c0 + 0x00000800),
 363	(0x001143c0 + 0x00001000),
 364	(0x001143c0 + 0x00001800),
 365	(0x001543c0 + 0x00000000),
 366	(0x001543c0 + 0x00000800),
 367	(0x001543c0 + 0x00001000),
 368	(0x001543c0 + 0x00001800),
 369	(0x001943c0 + 0x00000000),
 370	(0x001943c0 + 0x00000800),
 371	(0x001943c0 + 0x00001000),
 372	(0x001943c0 + 0x00001800),
 373	(0x001d43c0 + 0x00000000),
 374	(0x001d43c0 + 0x00000800),
 375	(0x001d43c0 + 0x00001000),
 376	(0x001d43c0 + 0x00001800),
 377};
 378
 379static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
 380	(0x000143e0 + 0x00000000),
 381	(0x000143e0 + 0x00000800),
 382	(0x000143e0 + 0x00001000),
 383	(0x000143e0 + 0x00001800),
 384	(0x000543e0 + 0x00000000),
 385	(0x000543e0 + 0x00000800),
 386	(0x000543e0 + 0x00001000),
 387	(0x000543e0 + 0x00001800),
 388	(0x000943e0 + 0x00000000),
 389	(0x000943e0 + 0x00000800),
 390	(0x000943e0 + 0x00001000),
 391	(0x000943e0 + 0x00001800),
 392	(0x000d43e0 + 0x00000000),
 393	(0x000d43e0 + 0x00000800),
 394	(0x000d43e0 + 0x00001000),
 395	(0x000d43e0 + 0x00001800),
 396	(0x001143e0 + 0x00000000),
 397	(0x001143e0 + 0x00000800),
 398	(0x001143e0 + 0x00001000),
 399	(0x001143e0 + 0x00001800),
 400	(0x001543e0 + 0x00000000),
 401	(0x001543e0 + 0x00000800),
 402	(0x001543e0 + 0x00001000),
 403	(0x001543e0 + 0x00001800),
 404	(0x001943e0 + 0x00000000),
 405	(0x001943e0 + 0x00000800),
 406	(0x001943e0 + 0x00001000),
 407	(0x001943e0 + 0x00001800),
 408	(0x001d43e0 + 0x00000000),
 409	(0x001d43e0 + 0x00000800),
 410	(0x001d43e0 + 0x00001000),
 411	(0x001d43e0 + 0x00001800),
 412};
 413
 414static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
 415		struct amdgpu_irq_src *src,
 416		unsigned int type,
 417		enum amdgpu_interrupt_state state)
 418{
 419	u32 bits, i, tmp, reg;
 420
 421	/* Devices newer then VEGA10/12 shall have these programming
 422	 * sequences performed by PSP BL
 423	 */
 424	if (adev->asic_type >= CHIP_VEGA20)
 425		return 0;
 426
 427	bits = 0x7f;
 428
 429	switch (state) {
 430	case AMDGPU_IRQ_STATE_DISABLE:
 431		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
 432			reg = ecc_umc_mcumc_ctrl_addrs[i];
 433			tmp = RREG32(reg);
 434			tmp &= ~bits;
 435			WREG32(reg, tmp);
 436		}
 437		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
 438			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
 439			tmp = RREG32(reg);
 440			tmp &= ~bits;
 441			WREG32(reg, tmp);
 442		}
 443		break;
 444	case AMDGPU_IRQ_STATE_ENABLE:
 445		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
 446			reg = ecc_umc_mcumc_ctrl_addrs[i];
 447			tmp = RREG32(reg);
 448			tmp |= bits;
 449			WREG32(reg, tmp);
 450		}
 451		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
 452			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
 453			tmp = RREG32(reg);
 454			tmp |= bits;
 455			WREG32(reg, tmp);
 456		}
 457		break;
 458	default:
 459		break;
 460	}
 461
 462	return 0;
 463}
 464
 465static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
 466					struct amdgpu_irq_src *src,
 467					unsigned int type,
 468					enum amdgpu_interrupt_state state)
 469{
 470	struct amdgpu_vmhub *hub;
 471	u32 tmp, reg, bits, i, j;
 472
 473	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 474		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 475		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 476		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 477		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 478		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 479		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
 480
 481	switch (state) {
 482	case AMDGPU_IRQ_STATE_DISABLE:
 483		for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
 484			hub = &adev->vmhub[j];
 485			for (i = 0; i < 16; i++) {
 486				reg = hub->vm_context0_cntl + i;
 487
 488				/* This works because this interrupt is only
 489				 * enabled at init/resume and disabled in
 490				 * fini/suspend, so the overall state doesn't
 491				 * change over the course of suspend/resume.
 492				 */
 493				if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
 494					continue;
 495
 496				if (j >= AMDGPU_MMHUB0(0))
 497					tmp = RREG32_SOC15_IP(MMHUB, reg);
 498				else
 499					tmp = RREG32_XCC(reg, j);
 500
 501				tmp &= ~bits;
 502
 503				if (j >= AMDGPU_MMHUB0(0))
 504					WREG32_SOC15_IP(MMHUB, reg, tmp);
 505				else
 506					WREG32_XCC(reg, tmp, j);
 507			}
 508		}
 509		break;
 510	case AMDGPU_IRQ_STATE_ENABLE:
 511		for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
 512			hub = &adev->vmhub[j];
 513			for (i = 0; i < 16; i++) {
 514				reg = hub->vm_context0_cntl + i;
 515
 516				/* This works because this interrupt is only
 517				 * enabled at init/resume and disabled in
 518				 * fini/suspend, so the overall state doesn't
 519				 * change over the course of suspend/resume.
 520				 */
 521				if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
 522					continue;
 523
 524				if (j >= AMDGPU_MMHUB0(0))
 525					tmp = RREG32_SOC15_IP(MMHUB, reg);
 526				else
 527					tmp = RREG32_XCC(reg, j);
 528
 529				tmp |= bits;
 530
 531				if (j >= AMDGPU_MMHUB0(0))
 532					WREG32_SOC15_IP(MMHUB, reg, tmp);
 533				else
 534					WREG32_XCC(reg, tmp, j);
 535			}
 536		}
 537		break;
 538	default:
 539		break;
 540	}
 541
 542	return 0;
 543}
 544
 545static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
 546				      struct amdgpu_irq_src *source,
 547				      struct amdgpu_iv_entry *entry)
 548{
 549	bool retry_fault = !!(entry->src_data[1] & 0x80);
 550	bool write_fault = !!(entry->src_data[1] & 0x20);
 551	uint32_t status = 0, cid = 0, rw = 0;
 552	struct amdgpu_task_info *task_info;
 553	struct amdgpu_vmhub *hub;
 554	const char *mmhub_cid;
 555	const char *hub_name;
 556	unsigned int vmhub;
 557	u64 addr;
 558	uint32_t cam_index = 0;
 559	int ret, xcc_id = 0;
 560	uint32_t node_id;
 561
 562	node_id = entry->node_id;
 563
 564	addr = (u64)entry->src_data[0] << 12;
 565	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
 566
 567	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
 568		hub_name = "mmhub0";
 569		vmhub = AMDGPU_MMHUB0(node_id / 4);
 570	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
 571		hub_name = "mmhub1";
 572		vmhub = AMDGPU_MMHUB1(0);
 573	} else {
 574		hub_name = "gfxhub0";
 575		if (adev->gfx.funcs->ih_node_to_logical_xcc) {
 576			xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev,
 577				node_id);
 578			if (xcc_id < 0)
 579				xcc_id = 0;
 580		}
 581		vmhub = xcc_id;
 582	}
 583	hub = &adev->vmhub[vmhub];
 584
 585	if (retry_fault) {
 586		if (adev->irq.retry_cam_enabled) {
 587			/* Delegate it to a different ring if the hardware hasn't
 588			 * already done it.
 589			 */
 590			if (entry->ih == &adev->irq.ih) {
 591				amdgpu_irq_delegate(adev, entry, 8);
 592				return 1;
 593			}
 594
 595			cam_index = entry->src_data[2] & 0x3ff;
 596
 597			ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
 598						     addr, write_fault);
 599			WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
 600			if (ret)
 601				return 1;
 602		} else {
 603			/* Process it onyl if it's the first fault for this address */
 604			if (entry->ih != &adev->irq.ih_soft &&
 605			    amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
 606					     entry->timestamp))
 607				return 1;
 608
 609			/* Delegate it to a different ring if the hardware hasn't
 610			 * already done it.
 611			 */
 612			if (entry->ih == &adev->irq.ih) {
 613				amdgpu_irq_delegate(adev, entry, 8);
 614				return 1;
 615			}
 616
 617			/* Try to handle the recoverable page faults by filling page
 618			 * tables
 619			 */
 620			if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
 621						   addr, write_fault))
 622				return 1;
 623		}
 624	}
 625
 626	if (!printk_ratelimit())
 627		return 0;
 628
 629	dev_err(adev->dev,
 630		"[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", hub_name,
 631		retry_fault ? "retry" : "no-retry",
 632		entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
 633
 634	task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
 635	if (task_info) {
 636		dev_err(adev->dev,
 637			" for process %s pid %d thread %s pid %d)\n",
 638			task_info->process_name, task_info->tgid,
 639			task_info->task_name, task_info->pid);
 640		amdgpu_vm_put_task_info(task_info);
 641	}
 642
 643	dev_err(adev->dev, "  in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
 644		addr, entry->client_id,
 645		soc15_ih_clientid_name[entry->client_id]);
 646
 647	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
 
 648		dev_err(adev->dev, "  cookie node_id %d fault from die %s%d%s\n",
 649			node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
 650			node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
 651
 652	if (amdgpu_sriov_vf(adev))
 653		return 0;
 654
 655	/*
 656	 * Issue a dummy read to wait for the status register to
 657	 * be updated to avoid reading an incorrect value due to
 658	 * the new fast GRBM interface.
 659	 */
 660	if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
 661	    (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
 662		RREG32(hub->vm_l2_pro_fault_status);
 663
 664	status = RREG32(hub->vm_l2_pro_fault_status);
 665	cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
 666	rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
 667	WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 668
 669	amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub);
 670
 671	dev_err(adev->dev,
 672		"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
 673		status);
 674	if (entry->vmid_src == AMDGPU_GFXHUB(0)) {
 675		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
 676			cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
 677			gfxhub_client_ids[cid],
 678			cid);
 679	} else {
 680		switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
 681		case IP_VERSION(9, 0, 0):
 682			mmhub_cid = mmhub_client_ids_vega10[cid][rw];
 683			break;
 684		case IP_VERSION(9, 3, 0):
 685			mmhub_cid = mmhub_client_ids_vega12[cid][rw];
 686			break;
 687		case IP_VERSION(9, 4, 0):
 688			mmhub_cid = mmhub_client_ids_vega20[cid][rw];
 689			break;
 690		case IP_VERSION(9, 4, 1):
 691			mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
 692			break;
 693		case IP_VERSION(9, 1, 0):
 694		case IP_VERSION(9, 2, 0):
 695			mmhub_cid = mmhub_client_ids_raven[cid][rw];
 696			break;
 697		case IP_VERSION(1, 5, 0):
 698		case IP_VERSION(2, 4, 0):
 699			mmhub_cid = mmhub_client_ids_renoir[cid][rw];
 700			break;
 701		case IP_VERSION(1, 8, 0):
 702		case IP_VERSION(9, 4, 2):
 703			mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
 704			break;
 705		default:
 706			mmhub_cid = NULL;
 707			break;
 708		}
 709		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
 710			mmhub_cid ? mmhub_cid : "unknown", cid);
 711	}
 712	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
 713		REG_GET_FIELD(status,
 714		VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
 715	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
 716		REG_GET_FIELD(status,
 717		VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
 718	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
 719		REG_GET_FIELD(status,
 720		VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
 721	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
 722		REG_GET_FIELD(status,
 723		VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
 724	dev_err(adev->dev, "\t RW: 0x%x\n", rw);
 725	return 0;
 726}
 727
 728static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
 729	.set = gmc_v9_0_vm_fault_interrupt_state,
 730	.process = gmc_v9_0_process_interrupt,
 731};
 732
 733
 734static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
 735	.set = gmc_v9_0_ecc_interrupt_state,
 736	.process = amdgpu_umc_process_ecc_irq,
 737};
 738
 739static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
 740{
 741	adev->gmc.vm_fault.num_types = 1;
 742	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
 743
 744	if (!amdgpu_sriov_vf(adev) &&
 745	    !adev->gmc.xgmi.connected_to_cpu &&
 746	    !adev->gmc.is_app_apu) {
 747		adev->gmc.ecc_irq.num_types = 1;
 748		adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
 749	}
 750}
 751
 752static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
 753					uint32_t flush_type)
 754{
 755	u32 req = 0;
 756
 757	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
 758			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
 759	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
 760	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
 761	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
 762	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
 763	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
 764	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
 765	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
 766			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
 767
 768	return req;
 769}
 770
 771/**
 772 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
 773 *
 774 * @adev: amdgpu_device pointer
 775 * @vmhub: vmhub type
 776 *
 777 */
 778static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
 779				       uint32_t vmhub)
 780{
 781	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
 782	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
 
 783		return false;
 784
 785	return ((vmhub == AMDGPU_MMHUB0(0) ||
 786		 vmhub == AMDGPU_MMHUB1(0)) &&
 787		(!amdgpu_sriov_vf(adev)) &&
 788		(!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
 789		   (adev->apu_flags & AMD_APU_IS_PICASSO))));
 790}
 791
 792static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
 793					uint8_t vmid, uint16_t *p_pasid)
 794{
 795	uint32_t value;
 796
 797	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
 798		     + vmid);
 799	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
 800
 801	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
 802}
 803
 804/*
 805 * GART
 806 * VMID 0 is the physical GPU addresses as used by the kernel.
 807 * VMIDs 1-15 are used for userspace clients and are handled
 808 * by the amdgpu vm/hsa code.
 809 */
 810
 811/**
 812 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
 813 *
 814 * @adev: amdgpu_device pointer
 815 * @vmid: vm instance to flush
 816 * @vmhub: which hub to flush
 817 * @flush_type: the flush type
 818 *
 819 * Flush the TLB for the requested page table using certain type.
 820 */
 821static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 822					uint32_t vmhub, uint32_t flush_type)
 823{
 824	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
 825	u32 j, inv_req, tmp, sem, req, ack, inst;
 826	const unsigned int eng = 17;
 827	struct amdgpu_vmhub *hub;
 828
 829	BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
 830
 831	hub = &adev->vmhub[vmhub];
 832	inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
 833	sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
 834	req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
 835	ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 836
 837	if (vmhub >= AMDGPU_MMHUB0(0))
 838		inst = GET_INST(GC, 0);
 839	else
 840		inst = vmhub;
 841
 842	/* This is necessary for SRIOV as well as for GFXOFF to function
 843	 * properly under bare metal
 844	 */
 845	if (adev->gfx.kiq[inst].ring.sched.ready &&
 846	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
 847		uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
 848		uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 849
 850		amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
 851						 1 << vmid, inst);
 852		return;
 853	}
 854
 855	/* This path is needed before KIQ/MES/GFXOFF are set up */
 856	spin_lock(&adev->gmc.invalidate_lock);
 857
 858	/*
 859	 * It may lose gpuvm invalidate acknowldege state across power-gating
 860	 * off cycle, add semaphore acquire before invalidation and semaphore
 861	 * release after invalidation to avoid entering power gated state
 862	 * to WA the Issue
 863	 */
 864
 865	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 866	if (use_semaphore) {
 867		for (j = 0; j < adev->usec_timeout; j++) {
 868			/* a read return value of 1 means semaphore acquire */
 869			if (vmhub >= AMDGPU_MMHUB0(0))
 870				tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, inst);
 871			else
 872				tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, inst);
 873			if (tmp & 0x1)
 874				break;
 875			udelay(1);
 876		}
 877
 878		if (j >= adev->usec_timeout)
 879			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
 880	}
 881
 882	if (vmhub >= AMDGPU_MMHUB0(0))
 883		WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, inst);
 884	else
 885		WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, inst);
 886
 887	/*
 888	 * Issue a dummy read to wait for the ACK register to
 889	 * be cleared to avoid a false ACK due to the new fast
 890	 * GRBM interface.
 891	 */
 892	if ((vmhub == AMDGPU_GFXHUB(0)) &&
 893	    (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
 894		RREG32_NO_KIQ(req);
 895
 896	for (j = 0; j < adev->usec_timeout; j++) {
 897		if (vmhub >= AMDGPU_MMHUB0(0))
 898			tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, inst);
 899		else
 900			tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, inst);
 901		if (tmp & (1 << vmid))
 902			break;
 903		udelay(1);
 904	}
 905
 906	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 907	if (use_semaphore) {
 908		/*
 909		 * add semaphore release after invalidation,
 910		 * write with 0 means semaphore release
 911		 */
 912		if (vmhub >= AMDGPU_MMHUB0(0))
 913			WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, inst);
 914		else
 915			WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, inst);
 916	}
 917
 918	spin_unlock(&adev->gmc.invalidate_lock);
 919
 920	if (j < adev->usec_timeout)
 921		return;
 922
 923	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
 924}
 925
 926/**
 927 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
 928 *
 929 * @adev: amdgpu_device pointer
 930 * @pasid: pasid to be flush
 931 * @flush_type: the flush type
 932 * @all_hub: flush all hubs
 933 * @inst: is used to select which instance of KIQ to use for the invalidation
 934 *
 935 * Flush the TLB for the requested pasid.
 936 */
 937static void gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 938					 uint16_t pasid, uint32_t flush_type,
 939					 bool all_hub, uint32_t inst)
 940{
 941	uint16_t queried;
 942	int i, vmid;
 943
 944	for (vmid = 1; vmid < 16; vmid++) {
 945		bool valid;
 946
 947		valid = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
 948								 &queried);
 949		if (!valid || queried != pasid)
 950			continue;
 951
 952		if (all_hub) {
 953			for_each_set_bit(i, adev->vmhubs_mask,
 954					 AMDGPU_MAX_VMHUBS)
 955				gmc_v9_0_flush_gpu_tlb(adev, vmid, i,
 956						       flush_type);
 957		} else {
 958			gmc_v9_0_flush_gpu_tlb(adev, vmid,
 959					       AMDGPU_GFXHUB(0),
 960					       flush_type);
 961		}
 962	}
 963}
 964
 965static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 966					    unsigned int vmid, uint64_t pd_addr)
 967{
 968	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
 969	struct amdgpu_device *adev = ring->adev;
 970	struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
 971	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
 972	unsigned int eng = ring->vm_inv_eng;
 973
 974	/*
 975	 * It may lose gpuvm invalidate acknowldege state across power-gating
 976	 * off cycle, add semaphore acquire before invalidation and semaphore
 977	 * release after invalidation to avoid entering power gated state
 978	 * to WA the Issue
 979	 */
 980
 981	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 982	if (use_semaphore)
 983		/* a read return value of 1 means semaphore acuqire */
 984		amdgpu_ring_emit_reg_wait(ring,
 985					  hub->vm_inv_eng0_sem +
 986					  hub->eng_distance * eng, 0x1, 0x1);
 987
 988	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
 989			      (hub->ctx_addr_distance * vmid),
 990			      lower_32_bits(pd_addr));
 991
 992	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
 993			      (hub->ctx_addr_distance * vmid),
 994			      upper_32_bits(pd_addr));
 995
 996	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
 997					    hub->eng_distance * eng,
 998					    hub->vm_inv_eng0_ack +
 999					    hub->eng_distance * eng,
1000					    req, 1 << vmid);
1001
1002	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
1003	if (use_semaphore)
1004		/*
1005		 * add semaphore release after invalidation,
1006		 * write with 0 means semaphore release
1007		 */
1008		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
1009				      hub->eng_distance * eng, 0);
1010
1011	return pd_addr;
1012}
1013
1014static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
1015					unsigned int pasid)
1016{
1017	struct amdgpu_device *adev = ring->adev;
1018	uint32_t reg;
1019
1020	/* Do nothing because there's no lut register for mmhub1. */
1021	if (ring->vm_hub == AMDGPU_MMHUB1(0))
1022		return;
1023
1024	if (ring->vm_hub == AMDGPU_GFXHUB(0))
1025		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
1026	else
1027		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
1028
1029	amdgpu_ring_emit_wreg(ring, reg, pasid);
1030}
1031
1032/*
1033 * PTE format on VEGA 10:
1034 * 63:59 reserved
1035 * 58:57 mtype
1036 * 56 F
1037 * 55 L
1038 * 54 P
1039 * 53 SW
1040 * 52 T
1041 * 50:48 reserved
1042 * 47:12 4k physical page base address
1043 * 11:7 fragment
1044 * 6 write
1045 * 5 read
1046 * 4 exe
1047 * 3 Z
1048 * 2 snooped
1049 * 1 system
1050 * 0 valid
1051 *
1052 * PDE format on VEGA 10:
1053 * 63:59 block fragment size
1054 * 58:55 reserved
1055 * 54 P
1056 * 53:48 reserved
1057 * 47:6 physical base address of PD or PTE
1058 * 5:3 reserved
1059 * 2 C
1060 * 1 system
1061 * 0 valid
1062 */
1063
1064static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1065
1066{
1067	switch (flags) {
1068	case AMDGPU_VM_MTYPE_DEFAULT:
1069		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1070	case AMDGPU_VM_MTYPE_NC:
1071		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1072	case AMDGPU_VM_MTYPE_WC:
1073		return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1074	case AMDGPU_VM_MTYPE_RW:
1075		return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1076	case AMDGPU_VM_MTYPE_CC:
1077		return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1078	case AMDGPU_VM_MTYPE_UC:
1079		return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1080	default:
1081		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1082	}
1083}
1084
1085static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1086				uint64_t *addr, uint64_t *flags)
1087{
1088	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1089		*addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1090	BUG_ON(*addr & 0xFFFF00000000003FULL);
1091
1092	if (!adev->gmc.translate_further)
1093		return;
1094
1095	if (level == AMDGPU_VM_PDB1) {
1096		/* Set the block fragment size */
1097		if (!(*flags & AMDGPU_PDE_PTE))
1098			*flags |= AMDGPU_PDE_BFS(0x9);
1099
1100	} else if (level == AMDGPU_VM_PDB0) {
1101		if (*flags & AMDGPU_PDE_PTE) {
1102			*flags &= ~AMDGPU_PDE_PTE;
1103			if (!(*flags & AMDGPU_PTE_VALID))
1104				*addr |= 1 << PAGE_SHIFT;
1105		} else {
1106			*flags |= AMDGPU_PTE_TF;
1107		}
1108	}
1109}
1110
1111static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
1112					 struct amdgpu_bo *bo,
1113					 struct amdgpu_bo_va_mapping *mapping,
1114					 uint64_t *flags)
1115{
1116	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1117	bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
1118	bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | AMDGPU_GEM_CREATE_EXT_COHERENT);
 
 
1119	bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
1120	bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
1121	struct amdgpu_vm *vm = mapping->bo_va->base.vm;
1122	unsigned int mtype_local, mtype;
1123	bool snoop = false;
1124	bool is_local;
1125
 
 
1126	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1127	case IP_VERSION(9, 4, 1):
1128	case IP_VERSION(9, 4, 2):
1129		if (is_vram) {
1130			if (bo_adev == adev) {
1131				if (uncached)
1132					mtype = MTYPE_UC;
1133				else if (coherent)
1134					mtype = MTYPE_CC;
1135				else
1136					mtype = MTYPE_RW;
1137				/* FIXME: is this still needed? Or does
1138				 * amdgpu_ttm_tt_pde_flags already handle this?
1139				 */
1140				if ((amdgpu_ip_version(adev, GC_HWIP, 0) ==
1141					     IP_VERSION(9, 4, 2) ||
1142				     amdgpu_ip_version(adev, GC_HWIP, 0) ==
1143					     IP_VERSION(9, 4, 3)) &&
1144				    adev->gmc.xgmi.connected_to_cpu)
1145					snoop = true;
1146			} else {
1147				if (uncached || coherent)
1148					mtype = MTYPE_UC;
1149				else
1150					mtype = MTYPE_NC;
1151				if (mapping->bo_va->is_xgmi)
1152					snoop = true;
1153			}
1154		} else {
1155			if (uncached || coherent)
1156				mtype = MTYPE_UC;
1157			else
1158				mtype = MTYPE_NC;
1159			/* FIXME: is this still needed? Or does
1160			 * amdgpu_ttm_tt_pde_flags already handle this?
1161			 */
1162			snoop = true;
1163		}
1164		break;
1165	case IP_VERSION(9, 4, 3):
 
1166		/* Only local VRAM BOs or system memory on non-NUMA APUs
1167		 * can be assumed to be local in their entirety. Choose
1168		 * MTYPE_NC as safe fallback for all system memory BOs on
1169		 * NUMA systems. Their MTYPE can be overridden per-page in
1170		 * gmc_v9_0_override_vm_pte_flags.
1171		 */
1172		mtype_local = MTYPE_RW;
1173		if (amdgpu_mtype_local == 1) {
1174			DRM_INFO_ONCE("Using MTYPE_NC for local memory\n");
1175			mtype_local = MTYPE_NC;
1176		} else if (amdgpu_mtype_local == 2) {
1177			DRM_INFO_ONCE("Using MTYPE_CC for local memory\n");
1178			mtype_local = MTYPE_CC;
1179		} else {
1180			DRM_INFO_ONCE("Using MTYPE_RW for local memory\n");
1181		}
1182		is_local = (!is_vram && (adev->flags & AMD_IS_APU) &&
1183			    num_possible_nodes() <= 1) ||
1184			   (is_vram && adev == bo_adev &&
1185			    KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id);
1186		snoop = true;
1187		if (uncached) {
1188			mtype = MTYPE_UC;
1189		} else if (ext_coherent) {
1190			if (adev->rev_id)
1191				mtype = is_local ? MTYPE_CC : MTYPE_UC;
1192			else
1193				mtype = MTYPE_UC;
1194		} else if (adev->flags & AMD_IS_APU) {
1195			mtype = is_local ? mtype_local : MTYPE_NC;
1196		} else {
1197			/* dGPU */
1198			if (is_local)
1199				mtype = mtype_local;
1200			else if (is_vram)
1201				mtype = MTYPE_NC;
1202			else
1203				mtype = MTYPE_UC;
1204		}
1205
1206		break;
1207	default:
1208		if (uncached || coherent)
1209			mtype = MTYPE_UC;
1210		else
1211			mtype = MTYPE_NC;
1212
1213		/* FIXME: is this still needed? Or does
1214		 * amdgpu_ttm_tt_pde_flags already handle this?
1215		 */
1216		if (!is_vram)
1217			snoop = true;
1218	}
1219
1220	if (mtype != MTYPE_NC)
1221		*flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
1222			 AMDGPU_PTE_MTYPE_VG10(mtype);
1223	*flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1224}
1225
1226static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1227				struct amdgpu_bo_va_mapping *mapping,
1228				uint64_t *flags)
1229{
1230	struct amdgpu_bo *bo = mapping->bo_va->base.bo;
1231
1232	*flags &= ~AMDGPU_PTE_EXECUTABLE;
1233	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1234
1235	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1236	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1237
1238	if (mapping->flags & AMDGPU_PTE_PRT) {
1239		*flags |= AMDGPU_PTE_PRT;
1240		*flags &= ~AMDGPU_PTE_VALID;
1241	}
1242
1243	if (bo && bo->tbo.resource)
1244		gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
1245					     mapping, flags);
1246}
1247
1248static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
1249					   struct amdgpu_vm *vm,
1250					   uint64_t addr, uint64_t *flags)
1251{
1252	int local_node, nid;
1253
1254	/* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
1255	 * memory can use more efficient MTYPEs.
1256	 */
1257	if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3))
 
1258		return;
1259
1260	/* Only direct-mapped memory allows us to determine the NUMA node from
1261	 * the DMA address.
1262	 */
1263	if (!adev->ram_is_direct_mapped) {
1264		dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n");
1265		return;
1266	}
1267
1268	/* MTYPE_NC is the same default and can be overridden.
1269	 * MTYPE_UC will be present if the memory is extended-coherent
1270	 * and can also be overridden.
1271	 */
1272	if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1273	    AMDGPU_PTE_MTYPE_VG10(MTYPE_NC) &&
1274	    (*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1275	    AMDGPU_PTE_MTYPE_VG10(MTYPE_UC)) {
1276		dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n");
1277		return;
1278	}
1279
1280	/* FIXME: Only supported on native mode for now. For carve-out, the
1281	 * NUMA affinity of the GPU/VM needs to come from the PCI info because
1282	 * memory partitions are not associated with different NUMA nodes.
1283	 */
1284	if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
1285		local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
1286	} else {
1287		dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n");
1288		return;
1289	}
1290
1291	/* Only handle real RAM. Mappings of PCIe resources don't have struct
1292	 * page or NUMA nodes.
1293	 */
1294	if (!page_is_ram(addr >> PAGE_SHIFT)) {
1295		dev_dbg_ratelimited(adev->dev, "Page is not RAM.\n");
1296		return;
1297	}
1298	nid = pfn_to_nid(addr >> PAGE_SHIFT);
1299	dev_dbg_ratelimited(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n",
1300			    vm->mem_id, local_node, nid);
1301	if (nid == local_node) {
1302		uint64_t old_flags = *flags;
1303		if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) ==
1304			AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) {
1305			unsigned int mtype_local = MTYPE_RW;
1306
1307			if (amdgpu_mtype_local == 1)
1308				mtype_local = MTYPE_NC;
1309			else if (amdgpu_mtype_local == 2)
1310				mtype_local = MTYPE_CC;
1311
1312			*flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
1313				 AMDGPU_PTE_MTYPE_VG10(mtype_local);
1314		} else if (adev->rev_id) {
1315			/* MTYPE_UC case */
1316			*flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
1317				 AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1318		}
1319
1320		dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n",
1321				    old_flags, *flags);
1322	}
1323}
1324
1325static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1326{
1327	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1328	unsigned int size;
1329
1330	/* TODO move to DC so GMC doesn't need to hard-code DCN registers */
1331
1332	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1333		size = AMDGPU_VBIOS_VGA_ALLOCATION;
1334	} else {
1335		u32 viewport;
1336
1337		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1338		case IP_VERSION(1, 0, 0):
1339		case IP_VERSION(1, 0, 1):
1340			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1341			size = (REG_GET_FIELD(viewport,
1342					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1343				REG_GET_FIELD(viewport,
1344					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1345				4);
1346			break;
1347		case IP_VERSION(2, 1, 0):
1348			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
1349			size = (REG_GET_FIELD(viewport,
1350					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1351				REG_GET_FIELD(viewport,
1352					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1353				4);
1354			break;
1355		default:
1356			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1357			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1358				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1359				4);
1360			break;
1361		}
1362	}
1363
1364	return size;
1365}
1366
1367static enum amdgpu_memory_partition
1368gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
1369{
1370	enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
1371
1372	if (adev->nbio.funcs->get_memory_partition_mode)
1373		mode = adev->nbio.funcs->get_memory_partition_mode(adev,
1374								   supp_modes);
1375
1376	return mode;
1377}
1378
1379static enum amdgpu_memory_partition
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1380gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
1381{
1382	if (amdgpu_sriov_vf(adev))
1383		return AMDGPU_NPS1_PARTITION_MODE;
1384
1385	return gmc_v9_0_get_memory_partition(adev, NULL);
1386}
1387
 
 
 
 
 
 
 
 
 
 
 
1388static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1389	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1390	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1391	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1392	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1393	.map_mtype = gmc_v9_0_map_mtype,
1394	.get_vm_pde = gmc_v9_0_get_vm_pde,
1395	.get_vm_pte = gmc_v9_0_get_vm_pte,
1396	.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
1397	.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1398	.query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
 
 
1399};
1400
1401static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1402{
1403	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1404}
1405
1406static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1407{
1408	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1409	case IP_VERSION(6, 0, 0):
1410		adev->umc.funcs = &umc_v6_0_funcs;
1411		break;
1412	case IP_VERSION(6, 1, 1):
1413		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1414		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1415		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1416		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1417		adev->umc.retire_unit = 1;
1418		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1419		adev->umc.ras = &umc_v6_1_ras;
1420		break;
1421	case IP_VERSION(6, 1, 2):
1422		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1423		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1424		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1425		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1426		adev->umc.retire_unit = 1;
1427		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1428		adev->umc.ras = &umc_v6_1_ras;
1429		break;
1430	case IP_VERSION(6, 7, 0):
1431		adev->umc.max_ras_err_cnt_per_query =
1432			UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
1433		adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1434		adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1435		adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1436		adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
1437		if (!adev->gmc.xgmi.connected_to_cpu)
1438			adev->umc.ras = &umc_v6_7_ras;
1439		if (1 & adev->smuio.funcs->get_die_id(adev))
1440			adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1441		else
1442			adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1443		break;
1444	case IP_VERSION(12, 0, 0):
1445		adev->umc.max_ras_err_cnt_per_query =
1446			UMC_V12_0_TOTAL_CHANNEL_NUM(adev) * UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
1447		adev->umc.channel_inst_num = UMC_V12_0_CHANNEL_INSTANCE_NUM;
1448		adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
1449		adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
1450		adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
1451		adev->umc.active_mask = adev->aid_mask;
1452		adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
1453		adev->umc.channel_idx_tbl = &umc_v12_0_channel_idx_tbl[0][0][0];
1454		if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1455			adev->umc.ras = &umc_v12_0_ras;
1456		break;
1457	default:
1458		break;
1459	}
1460}
1461
1462static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1463{
1464	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1465	case IP_VERSION(9, 4, 1):
1466		adev->mmhub.funcs = &mmhub_v9_4_funcs;
1467		break;
1468	case IP_VERSION(9, 4, 2):
1469		adev->mmhub.funcs = &mmhub_v1_7_funcs;
1470		break;
1471	case IP_VERSION(1, 8, 0):
1472		adev->mmhub.funcs = &mmhub_v1_8_funcs;
1473		break;
1474	default:
1475		adev->mmhub.funcs = &mmhub_v1_0_funcs;
1476		break;
1477	}
1478}
1479
1480static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1481{
1482	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1483	case IP_VERSION(9, 4, 0):
1484		adev->mmhub.ras = &mmhub_v1_0_ras;
1485		break;
1486	case IP_VERSION(9, 4, 1):
1487		adev->mmhub.ras = &mmhub_v9_4_ras;
1488		break;
1489	case IP_VERSION(9, 4, 2):
1490		adev->mmhub.ras = &mmhub_v1_7_ras;
1491		break;
1492	case IP_VERSION(1, 8, 0):
1493		adev->mmhub.ras = &mmhub_v1_8_ras;
1494		break;
1495	default:
1496		/* mmhub ras is not available */
1497		break;
1498	}
1499}
1500
1501static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1502{
1503	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
 
1504		adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
1505	else
1506		adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1507}
1508
1509static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
1510{
1511	adev->hdp.ras = &hdp_v4_0_ras;
1512}
1513
1514static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
1515{
1516	struct amdgpu_mca *mca = &adev->mca;
1517
1518	/* is UMC the right IP to check for MCA?  Maybe DF? */
1519	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1520	case IP_VERSION(6, 7, 0):
1521		if (!adev->gmc.xgmi.connected_to_cpu) {
1522			mca->mp0.ras = &mca_v3_0_mp0_ras;
1523			mca->mp1.ras = &mca_v3_0_mp1_ras;
1524			mca->mpio.ras = &mca_v3_0_mpio_ras;
1525		}
1526		break;
1527	default:
1528		break;
1529	}
1530}
1531
1532static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
1533{
1534	if (!adev->gmc.xgmi.connected_to_cpu)
1535		adev->gmc.xgmi.ras = &xgmi_ras;
1536}
1537
1538static int gmc_v9_0_early_init(void *handle)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1539{
1540	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1541
1542	/*
1543	 * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
1544	 * in their IP discovery tables
1545	 */
1546	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
1547	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
1548	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
 
1549		adev->gmc.xgmi.supported = true;
1550
1551	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
1552		adev->gmc.xgmi.supported = true;
1553		adev->gmc.xgmi.connected_to_cpu =
1554			adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1555	}
1556
1557	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
 
1558		enum amdgpu_pkg_type pkg_type =
1559			adev->smuio.funcs->get_pkg_type(adev);
1560		/* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
1561		 * and the APU, can be in used two possible modes:
1562		 *  - carveout mode
1563		 *  - native APU mode
1564		 * "is_app_apu" can be used to identify the APU in the native
1565		 * mode.
1566		 */
1567		adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU &&
1568					!pci_resource_len(adev->pdev, 0));
1569	}
1570
1571	gmc_v9_0_set_gmc_funcs(adev);
1572	gmc_v9_0_set_irq_funcs(adev);
1573	gmc_v9_0_set_umc_funcs(adev);
1574	gmc_v9_0_set_mmhub_funcs(adev);
1575	gmc_v9_0_set_mmhub_ras_funcs(adev);
1576	gmc_v9_0_set_gfxhub_funcs(adev);
1577	gmc_v9_0_set_hdp_ras_funcs(adev);
1578	gmc_v9_0_set_mca_ras_funcs(adev);
1579	gmc_v9_0_set_xgmi_ras_funcs(adev);
1580
1581	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1582	adev->gmc.shared_aperture_end =
1583		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1584	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1585	adev->gmc.private_aperture_end =
1586		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1587	adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
1588
1589	return 0;
1590}
1591
1592static int gmc_v9_0_late_init(void *handle)
1593{
1594	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1595	int r;
1596
1597	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1598	if (r)
1599		return r;
1600
1601	/*
1602	 * Workaround performance drop issue with VBIOS enables partial
1603	 * writes, while disables HBM ECC for vega10.
1604	 */
1605	if (!amdgpu_sriov_vf(adev) &&
1606	    (amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(6, 0, 0))) {
1607		if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1608			if (adev->df.funcs &&
1609			    adev->df.funcs->enable_ecc_force_par_wr_rmw)
1610				adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1611		}
1612	}
1613
1614	if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1615		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
1616		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP);
1617	}
1618
1619	r = amdgpu_gmc_ras_late_init(adev);
1620	if (r)
1621		return r;
1622
1623	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1624}
1625
1626static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1627					struct amdgpu_gmc *mc)
1628{
1629	u64 base = adev->mmhub.funcs->get_fb_location(adev);
1630
1631	amdgpu_gmc_set_agp_default(adev, mc);
1632
1633	/* add the xgmi offset of the physical node */
1634	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1635	if (adev->gmc.xgmi.connected_to_cpu) {
1636		amdgpu_gmc_sysvm_location(adev, mc);
1637	} else {
1638		amdgpu_gmc_vram_location(adev, mc, base);
1639		amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
1640		if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
1641			amdgpu_gmc_agp_location(adev, mc);
1642	}
1643	/* base offset of vram pages */
1644	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1645
1646	/* XXX: add the xgmi offset of the physical node? */
1647	adev->vm_manager.vram_base_offset +=
1648		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1649}
1650
1651/**
1652 * gmc_v9_0_mc_init - initialize the memory controller driver params
1653 *
1654 * @adev: amdgpu_device pointer
1655 *
1656 * Look up the amount of vram, vram width, and decide how to place
1657 * vram and gart within the GPU's physical address space.
1658 * Returns 0 for success.
1659 */
1660static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1661{
1662	int r;
1663
1664	/* size in MB on si */
1665	if (!adev->gmc.is_app_apu) {
1666		adev->gmc.mc_vram_size =
1667			adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1668	} else {
1669		DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n");
1670		adev->gmc.mc_vram_size = 0;
1671	}
1672	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1673
1674	if (!(adev->flags & AMD_IS_APU) &&
1675	    !adev->gmc.xgmi.connected_to_cpu) {
1676		r = amdgpu_device_resize_fb_bar(adev);
1677		if (r)
1678			return r;
1679	}
1680	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1681	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1682
1683#ifdef CONFIG_X86_64
1684	/*
1685	 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1686	 * interface can use VRAM through here as it appears system reserved
1687	 * memory in host address space.
1688	 *
1689	 * For APUs, VRAM is just the stolen system memory and can be accessed
1690	 * directly.
1691	 *
1692	 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1693	 */
1694
1695	/* check whether both host-gpu and gpu-gpu xgmi links exist */
1696	if ((!amdgpu_sriov_vf(adev) &&
1697		(adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
1698	    (adev->gmc.xgmi.supported &&
1699	     adev->gmc.xgmi.connected_to_cpu)) {
1700		adev->gmc.aper_base =
1701			adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1702			adev->gmc.xgmi.physical_node_id *
1703			adev->gmc.xgmi.node_segment_size;
1704		adev->gmc.aper_size = adev->gmc.real_vram_size;
1705	}
1706
1707#endif
1708	adev->gmc.visible_vram_size = adev->gmc.aper_size;
1709
1710	/* set the gart size */
1711	if (amdgpu_gart_size == -1) {
1712		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1713		case IP_VERSION(9, 0, 1):  /* all engines support GPUVM */
1714		case IP_VERSION(9, 2, 1):  /* all engines support GPUVM */
1715		case IP_VERSION(9, 4, 0):
1716		case IP_VERSION(9, 4, 1):
1717		case IP_VERSION(9, 4, 2):
1718		case IP_VERSION(9, 4, 3):
 
1719		default:
1720			adev->gmc.gart_size = 512ULL << 20;
1721			break;
1722		case IP_VERSION(9, 1, 0):   /* DCE SG support */
1723		case IP_VERSION(9, 2, 2):   /* DCE SG support */
1724		case IP_VERSION(9, 3, 0):
1725			adev->gmc.gart_size = 1024ULL << 20;
1726			break;
1727		}
1728	} else {
1729		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1730	}
1731
1732	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1733
1734	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1735
1736	return 0;
1737}
1738
1739static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1740{
1741	int r;
1742
1743	if (adev->gart.bo) {
1744		WARN(1, "VEGA10 PCIE GART already initialized\n");
1745		return 0;
1746	}
1747
1748	if (adev->gmc.xgmi.connected_to_cpu) {
1749		adev->gmc.vmid0_page_table_depth = 1;
1750		adev->gmc.vmid0_page_table_block_size = 12;
1751	} else {
1752		adev->gmc.vmid0_page_table_depth = 0;
1753		adev->gmc.vmid0_page_table_block_size = 0;
1754	}
1755
1756	/* Initialize common gart structure */
1757	r = amdgpu_gart_init(adev);
1758	if (r)
1759		return r;
1760	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1761	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1762				 AMDGPU_PTE_EXECUTABLE;
1763
1764	if (!adev->gmc.real_vram_size) {
1765		dev_info(adev->dev, "Put GART in system memory for APU\n");
1766		r = amdgpu_gart_table_ram_alloc(adev);
1767		if (r)
1768			dev_err(adev->dev, "Failed to allocate GART in system memory\n");
1769	} else {
1770		r = amdgpu_gart_table_vram_alloc(adev);
1771		if (r)
1772			return r;
1773
1774		if (adev->gmc.xgmi.connected_to_cpu)
1775			r = amdgpu_gmc_pdb0_alloc(adev);
1776	}
1777
1778	return r;
1779}
1780
1781/**
1782 * gmc_v9_0_save_registers - saves regs
1783 *
1784 * @adev: amdgpu_device pointer
1785 *
1786 * This saves potential register values that should be
1787 * restored upon resume
1788 */
1789static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1790{
1791	if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
1792	    (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1)))
1793		adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1794}
1795
1796static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
1797{
1798	enum amdgpu_memory_partition mode;
1799	u32 supp_modes;
1800	bool valid;
1801
1802	mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
1803
1804	/* Mode detected by hardware not present in supported modes */
1805	if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
1806	    !(BIT(mode - 1) & supp_modes))
1807		return false;
1808
1809	switch (mode) {
1810	case UNKNOWN_MEMORY_PARTITION_MODE:
1811	case AMDGPU_NPS1_PARTITION_MODE:
1812		valid = (adev->gmc.num_mem_partitions == 1);
1813		break;
1814	case AMDGPU_NPS2_PARTITION_MODE:
1815		valid = (adev->gmc.num_mem_partitions == 2);
1816		break;
1817	case AMDGPU_NPS4_PARTITION_MODE:
1818		valid = (adev->gmc.num_mem_partitions == 3 ||
1819			 adev->gmc.num_mem_partitions == 4);
1820		break;
1821	default:
1822		valid = false;
1823	}
1824
1825	return valid;
1826}
1827
1828static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
1829{
1830	int i;
1831
1832	/* Check if node with id 'nid' is present in 'node_ids' array */
1833	for (i = 0; i < num_ids; ++i)
1834		if (node_ids[i] == nid)
1835			return true;
1836
1837	return false;
1838}
1839
1840static void
1841gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
1842			      struct amdgpu_mem_partition_info *mem_ranges)
1843{
1844	struct amdgpu_numa_info numa_info;
1845	int node_ids[MAX_MEM_RANGES];
1846	int num_ranges = 0, ret;
1847	int num_xcc, xcc_id;
1848	uint32_t xcc_mask;
1849
1850	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1851	xcc_mask = (1U << num_xcc) - 1;
1852
1853	for_each_inst(xcc_id, xcc_mask)	{
1854		ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
1855		if (ret)
1856			continue;
1857
1858		if (numa_info.nid == NUMA_NO_NODE) {
1859			mem_ranges[0].size = numa_info.size;
1860			mem_ranges[0].numa.node = numa_info.nid;
1861			num_ranges = 1;
1862			break;
1863		}
1864
1865		if (gmc_v9_0_is_node_present(node_ids, num_ranges,
1866					     numa_info.nid))
1867			continue;
1868
1869		node_ids[num_ranges] = numa_info.nid;
1870		mem_ranges[num_ranges].numa.node = numa_info.nid;
1871		mem_ranges[num_ranges].size = numa_info.size;
1872		++num_ranges;
1873	}
1874
1875	adev->gmc.num_mem_partitions = num_ranges;
1876}
1877
1878static void
1879gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
1880			    struct amdgpu_mem_partition_info *mem_ranges)
1881{
1882	enum amdgpu_memory_partition mode;
1883	u32 start_addr = 0, size;
1884	int i;
1885
1886	mode = gmc_v9_0_query_memory_partition(adev);
1887
1888	switch (mode) {
1889	case UNKNOWN_MEMORY_PARTITION_MODE:
 
 
1890	case AMDGPU_NPS1_PARTITION_MODE:
1891		adev->gmc.num_mem_partitions = 1;
1892		break;
1893	case AMDGPU_NPS2_PARTITION_MODE:
1894		adev->gmc.num_mem_partitions = 2;
1895		break;
1896	case AMDGPU_NPS4_PARTITION_MODE:
1897		if (adev->flags & AMD_IS_APU)
1898			adev->gmc.num_mem_partitions = 3;
1899		else
1900			adev->gmc.num_mem_partitions = 4;
1901		break;
1902	default:
1903		adev->gmc.num_mem_partitions = 1;
1904		break;
1905	}
1906
1907	size = adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT;
1908	size /= adev->gmc.num_mem_partitions;
 
 
 
 
 
 
 
 
1909
1910	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
1911		mem_ranges[i].range.fpfn = start_addr;
1912		mem_ranges[i].size = ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
1913		mem_ranges[i].range.lpfn = start_addr + size - 1;
1914		start_addr += size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1915	}
1916
1917	/* Adjust the last one */
1918	mem_ranges[adev->gmc.num_mem_partitions - 1].range.lpfn =
1919		(adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
1920	mem_ranges[adev->gmc.num_mem_partitions - 1].size =
1921		adev->gmc.real_vram_size -
1922		((u64)mem_ranges[adev->gmc.num_mem_partitions - 1].range.fpfn
1923		 << AMDGPU_GPU_PAGE_SHIFT);
1924}
1925
1926static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
1927{
1928	bool valid;
1929
1930	adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES,
1931					   sizeof(struct amdgpu_mem_partition_info),
1932					   GFP_KERNEL);
1933	if (!adev->gmc.mem_partitions)
1934		return -ENOMEM;
1935
1936	/* TODO : Get the range from PSP/Discovery for dGPU */
1937	if (adev->gmc.is_app_apu)
1938		gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
1939	else
1940		gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
1941
1942	if (amdgpu_sriov_vf(adev))
1943		valid = true;
1944	else
1945		valid = gmc_v9_0_validate_partition_info(adev);
1946	if (!valid) {
1947		/* TODO: handle invalid case */
1948		dev_WARN(adev->dev,
1949			 "Mem ranges not matching with hardware config");
1950	}
1951
1952	return 0;
1953}
1954
1955static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
1956{
1957	adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
1958	adev->gmc.vram_width = 128 * 64;
1959}
1960
1961static int gmc_v9_0_sw_init(void *handle)
1962{
1963	int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
1964	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1965	unsigned long inst_mask = adev->aid_mask;
1966
1967	adev->gfxhub.funcs->init(adev);
1968
1969	adev->mmhub.funcs->init(adev);
1970
1971	spin_lock_init(&adev->gmc.invalidate_lock);
1972
1973	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
 
1974		gmc_v9_4_3_init_vram_info(adev);
1975	} else if (!adev->bios) {
1976		if (adev->flags & AMD_IS_APU) {
1977			adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
1978			adev->gmc.vram_width = 64 * 64;
1979		} else {
1980			adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
1981			adev->gmc.vram_width = 128 * 64;
1982		}
1983	} else {
1984		r = amdgpu_atomfirmware_get_vram_info(adev,
1985			&vram_width, &vram_type, &vram_vendor);
1986		if (amdgpu_sriov_vf(adev))
1987			/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1988			 * and DF related registers is not readable, seems hardcord is the
1989			 * only way to set the correct vram_width
1990			 */
1991			adev->gmc.vram_width = 2048;
1992		else if (amdgpu_emu_mode != 1)
1993			adev->gmc.vram_width = vram_width;
1994
1995		if (!adev->gmc.vram_width) {
1996			int chansize, numchan;
1997
1998			/* hbm memory channel size */
1999			if (adev->flags & AMD_IS_APU)
2000				chansize = 64;
2001			else
2002				chansize = 128;
2003			if (adev->df.funcs &&
2004			    adev->df.funcs->get_hbm_channel_number) {
2005				numchan = adev->df.funcs->get_hbm_channel_number(adev);
2006				adev->gmc.vram_width = numchan * chansize;
2007			}
2008		}
2009
2010		adev->gmc.vram_type = vram_type;
2011		adev->gmc.vram_vendor = vram_vendor;
2012	}
2013	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2014	case IP_VERSION(9, 1, 0):
2015	case IP_VERSION(9, 2, 2):
2016		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2017		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2018
2019		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
2020			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2021		} else {
2022			/* vm_size is 128TB + 512GB for legacy 3-level page support */
2023			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
2024			adev->gmc.translate_further =
2025				adev->vm_manager.num_level > 1;
2026		}
2027		break;
2028	case IP_VERSION(9, 0, 1):
2029	case IP_VERSION(9, 2, 1):
2030	case IP_VERSION(9, 4, 0):
2031	case IP_VERSION(9, 3, 0):
2032	case IP_VERSION(9, 4, 2):
2033		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2034		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2035
2036		/*
2037		 * To fulfill 4-level page support,
2038		 * vm size is 256TB (48bit), maximum size of Vega10,
2039		 * block size 512 (9bit)
2040		 */
2041
2042		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2043		if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
2044			adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2045		break;
2046	case IP_VERSION(9, 4, 1):
2047		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2048		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2049		set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask);
2050
2051		/* Keep the vm size same with Vega20 */
2052		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2053		adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2054		break;
2055	case IP_VERSION(9, 4, 3):
 
2056		bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
2057				  NUM_XCC(adev->gfx.xcc_mask));
2058
2059		inst_mask <<= AMDGPU_MMHUB0(0);
2060		bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32);
2061
2062		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2063		adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2064		break;
2065	default:
2066		break;
2067	}
2068
2069	/* This interrupt is VMC page fault.*/
2070	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
2071				&adev->gmc.vm_fault);
2072	if (r)
2073		return r;
2074
2075	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {
2076		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
2077					&adev->gmc.vm_fault);
2078		if (r)
2079			return r;
2080	}
2081
2082	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
2083				&adev->gmc.vm_fault);
2084
2085	if (r)
2086		return r;
2087
2088	if (!amdgpu_sriov_vf(adev) &&
2089	    !adev->gmc.xgmi.connected_to_cpu &&
2090	    !adev->gmc.is_app_apu) {
2091		/* interrupt sent to DF. */
2092		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
2093				      &adev->gmc.ecc_irq);
2094		if (r)
2095			return r;
2096	}
2097
2098	/* Set the internal MC address mask
2099	 * This is the max address of the GPU's
2100	 * internal address space.
2101	 */
2102	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
2103
2104	dma_addr_bits = amdgpu_ip_version(adev, GC_HWIP, 0) >=
2105					IP_VERSION(9, 4, 2) ?
2106				48 :
2107				44;
2108	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
2109	if (r) {
2110		dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
2111		return r;
2112	}
2113	adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
2114
2115	r = gmc_v9_0_mc_init(adev);
2116	if (r)
2117		return r;
2118
2119	amdgpu_gmc_get_vbios_allocations(adev);
2120
2121	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
 
2122		r = gmc_v9_0_init_mem_ranges(adev);
2123		if (r)
2124			return r;
2125	}
2126
2127	/* Memory manager */
2128	r = amdgpu_bo_init(adev);
2129	if (r)
2130		return r;
2131
2132	r = gmc_v9_0_gart_init(adev);
2133	if (r)
2134		return r;
2135
 
2136	/*
2137	 * number of VMs
2138	 * VMID 0 is reserved for System
2139	 * amdgpu graphics/compute will use VMIDs 1..n-1
2140	 * amdkfd will use VMIDs n..15
2141	 *
2142	 * The first KFD VMID is 8 for GPUs with graphics, 3 for
2143	 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
2144	 * for video processing.
2145	 */
2146	adev->vm_manager.first_kfd_vmid =
2147		(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
2148		 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
2149		 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) ?
 
2150			3 :
2151			8;
2152
2153	amdgpu_vm_manager_init(adev);
2154
2155	gmc_v9_0_save_registers(adev);
2156
2157	r = amdgpu_gmc_ras_sw_init(adev);
2158	if (r)
2159		return r;
2160
2161	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
 
2162		amdgpu_gmc_sysfs_init(adev);
2163
2164	return 0;
2165}
2166
2167static int gmc_v9_0_sw_fini(void *handle)
2168{
2169	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2170
2171	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
 
2172		amdgpu_gmc_sysfs_fini(adev);
2173
2174	amdgpu_gmc_ras_fini(adev);
2175	amdgpu_gem_force_release(adev);
2176	amdgpu_vm_manager_fini(adev);
2177	if (!adev->gmc.real_vram_size) {
2178		dev_info(adev->dev, "Put GART in system memory for APU free\n");
2179		amdgpu_gart_table_ram_free(adev);
2180	} else {
2181		amdgpu_gart_table_vram_free(adev);
2182	}
2183	amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
2184	amdgpu_bo_fini(adev);
2185
2186	adev->gmc.num_mem_partitions = 0;
2187	kfree(adev->gmc.mem_partitions);
2188
2189	return 0;
2190}
2191
2192static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
2193{
2194	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
2195	case IP_VERSION(9, 0, 0):
2196		if (amdgpu_sriov_vf(adev))
2197			break;
2198		fallthrough;
2199	case IP_VERSION(9, 4, 0):
2200		soc15_program_register_sequence(adev,
2201						golden_settings_mmhub_1_0_0,
2202						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
2203		soc15_program_register_sequence(adev,
2204						golden_settings_athub_1_0_0,
2205						ARRAY_SIZE(golden_settings_athub_1_0_0));
2206		break;
2207	case IP_VERSION(9, 1, 0):
2208	case IP_VERSION(9, 2, 0):
2209		/* TODO for renoir */
2210		soc15_program_register_sequence(adev,
2211						golden_settings_athub_1_0_0,
2212						ARRAY_SIZE(golden_settings_athub_1_0_0));
2213		break;
2214	default:
2215		break;
2216	}
2217}
2218
2219/**
2220 * gmc_v9_0_restore_registers - restores regs
2221 *
2222 * @adev: amdgpu_device pointer
2223 *
2224 * This restores register values, saved at suspend.
2225 */
2226void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
2227{
2228	if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
2229	    (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) {
2230		WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
2231		WARN_ON(adev->gmc.sdpif_register !=
2232			RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
2233	}
2234}
2235
2236/**
2237 * gmc_v9_0_gart_enable - gart enable
2238 *
2239 * @adev: amdgpu_device pointer
2240 */
2241static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
2242{
2243	int r;
2244
2245	if (adev->gmc.xgmi.connected_to_cpu)
2246		amdgpu_gmc_init_pdb0(adev);
2247
2248	if (adev->gart.bo == NULL) {
2249		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
2250		return -EINVAL;
2251	}
2252
2253	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
2254
2255	if (!adev->in_s0ix) {
2256		r = adev->gfxhub.funcs->gart_enable(adev);
2257		if (r)
2258			return r;
2259	}
2260
2261	r = adev->mmhub.funcs->gart_enable(adev);
2262	if (r)
2263		return r;
2264
2265	DRM_INFO("PCIE GART of %uM enabled.\n",
2266		 (unsigned int)(adev->gmc.gart_size >> 20));
2267	if (adev->gmc.pdb0_bo)
2268		DRM_INFO("PDB0 located at 0x%016llX\n",
2269				(unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
2270	DRM_INFO("PTB located at 0x%016llX\n",
2271			(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
2272
2273	return 0;
2274}
2275
2276static int gmc_v9_0_hw_init(void *handle)
2277{
2278	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2279	bool value;
2280	int i, r;
2281
2282	adev->gmc.flush_pasid_uses_kiq = true;
2283
2284	/* Vega20+XGMI caches PTEs in TC and TLB. Add a heavy-weight TLB flush
2285	 * (type 2), which flushes both. Due to a race condition with
2286	 * concurrent memory accesses using the same TLB cache line, we still
2287	 * need a second TLB flush after this.
2288	 */
2289	adev->gmc.flush_tlb_needs_extra_type_2 =
2290		amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) &&
2291		adev->gmc.xgmi.num_physical_nodes;
2292	/*
2293	 * TODO: This workaround is badly documented and had a buggy
2294	 * implementation. We should probably verify what we do here.
2295	 */
2296	adev->gmc.flush_tlb_needs_extra_type_0 =
2297		amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
2298		adev->rev_id == 0;
2299
2300	/* The sequence of these two function calls matters.*/
2301	gmc_v9_0_init_golden_registers(adev);
2302
2303	if (adev->mode_info.num_crtc) {
2304		/* Lockout access through VGA aperture*/
2305		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
2306		/* disable VGA render */
2307		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
2308	}
2309
2310	if (adev->mmhub.funcs->update_power_gating)
2311		adev->mmhub.funcs->update_power_gating(adev, true);
2312
2313	adev->hdp.funcs->init_registers(adev);
2314
2315	/* After HDP is initialized, flush HDP.*/
2316	adev->hdp.funcs->flush_hdp(adev, NULL);
2317
2318	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
2319		value = false;
2320	else
2321		value = true;
2322
2323	if (!amdgpu_sriov_vf(adev)) {
2324		if (!adev->in_s0ix)
2325			adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2326		adev->mmhub.funcs->set_fault_enable_default(adev, value);
2327	}
2328	for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2329		if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0)))
2330			continue;
2331		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
2332	}
2333
2334	if (adev->umc.funcs && adev->umc.funcs->init_registers)
2335		adev->umc.funcs->init_registers(adev);
2336
2337	r = gmc_v9_0_gart_enable(adev);
2338	if (r)
2339		return r;
2340
2341	if (amdgpu_emu_mode == 1)
2342		return amdgpu_gmc_vram_checking(adev);
2343
2344	return 0;
2345}
2346
2347/**
2348 * gmc_v9_0_gart_disable - gart disable
2349 *
2350 * @adev: amdgpu_device pointer
2351 *
2352 * This disables all VM page table.
2353 */
2354static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
2355{
2356	if (!adev->in_s0ix)
2357		adev->gfxhub.funcs->gart_disable(adev);
2358	adev->mmhub.funcs->gart_disable(adev);
2359}
2360
2361static int gmc_v9_0_hw_fini(void *handle)
2362{
2363	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2364
2365	gmc_v9_0_gart_disable(adev);
2366
2367	if (amdgpu_sriov_vf(adev)) {
2368		/* full access mode, so don't touch any GMC register */
2369		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
2370		return 0;
2371	}
2372
2373	/*
2374	 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
2375	 * a correct cached state for GMC. Otherwise, the "gate" again
2376	 * operation on S3 resuming will fail due to wrong cached state.
2377	 */
2378	if (adev->mmhub.funcs->update_power_gating)
2379		adev->mmhub.funcs->update_power_gating(adev, false);
2380
2381	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 
 
 
 
 
2382
2383	if (adev->gmc.ecc_irq.funcs &&
2384		amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
2385		amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
 
2386
2387	return 0;
2388}
2389
2390static int gmc_v9_0_suspend(void *handle)
2391{
2392	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2393
2394	return gmc_v9_0_hw_fini(adev);
2395}
2396
2397static int gmc_v9_0_resume(void *handle)
2398{
 
2399	int r;
2400	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2401
2402	r = gmc_v9_0_hw_init(adev);
 
 
 
 
 
 
 
 
2403	if (r)
2404		return r;
2405
2406	amdgpu_vmid_reset_all(adev);
2407
2408	return 0;
2409}
2410
2411static bool gmc_v9_0_is_idle(void *handle)
2412{
2413	/* MC is always ready in GMC v9.*/
2414	return true;
2415}
2416
2417static int gmc_v9_0_wait_for_idle(void *handle)
2418{
2419	/* There is no need to wait for MC idle in GMC v9.*/
2420	return 0;
2421}
2422
2423static int gmc_v9_0_soft_reset(void *handle)
2424{
2425	/* XXX for emulation.*/
2426	return 0;
2427}
2428
2429static int gmc_v9_0_set_clockgating_state(void *handle,
2430					enum amd_clockgating_state state)
2431{
2432	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2433
2434	adev->mmhub.funcs->set_clockgating(adev, state);
2435
2436	athub_v1_0_set_clockgating(adev, state);
2437
2438	return 0;
2439}
2440
2441static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
2442{
2443	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2444
2445	adev->mmhub.funcs->get_clockgating(adev, flags);
2446
2447	athub_v1_0_get_clockgating(adev, flags);
2448}
2449
2450static int gmc_v9_0_set_powergating_state(void *handle,
2451					enum amd_powergating_state state)
2452{
2453	return 0;
2454}
2455
2456const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
2457	.name = "gmc_v9_0",
2458	.early_init = gmc_v9_0_early_init,
2459	.late_init = gmc_v9_0_late_init,
2460	.sw_init = gmc_v9_0_sw_init,
2461	.sw_fini = gmc_v9_0_sw_fini,
2462	.hw_init = gmc_v9_0_hw_init,
2463	.hw_fini = gmc_v9_0_hw_fini,
2464	.suspend = gmc_v9_0_suspend,
2465	.resume = gmc_v9_0_resume,
2466	.is_idle = gmc_v9_0_is_idle,
2467	.wait_for_idle = gmc_v9_0_wait_for_idle,
2468	.soft_reset = gmc_v9_0_soft_reset,
2469	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
2470	.set_powergating_state = gmc_v9_0_set_powergating_state,
2471	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
2472};
2473
2474const struct amdgpu_ip_block_version gmc_v9_0_ip_block = {
2475	.type = AMD_IP_BLOCK_TYPE_GMC,
2476	.major = 9,
2477	.minor = 0,
2478	.rev = 0,
2479	.funcs = &gmc_v9_0_ip_funcs,
2480};
v6.13.7
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <linux/pci.h>
  26
  27#include <drm/drm_cache.h>
  28
  29#include "amdgpu.h"
  30#include "gmc_v9_0.h"
  31#include "amdgpu_atomfirmware.h"
  32#include "amdgpu_gem.h"
  33
  34#include "gc/gc_9_0_sh_mask.h"
  35#include "dce/dce_12_0_offset.h"
  36#include "dce/dce_12_0_sh_mask.h"
  37#include "vega10_enum.h"
  38#include "mmhub/mmhub_1_0_offset.h"
  39#include "athub/athub_1_0_sh_mask.h"
  40#include "athub/athub_1_0_offset.h"
  41#include "oss/osssys_4_0_offset.h"
  42
  43#include "soc15.h"
  44#include "soc15d.h"
  45#include "soc15_common.h"
  46#include "umc/umc_6_0_sh_mask.h"
  47
  48#include "gfxhub_v1_0.h"
  49#include "mmhub_v1_0.h"
  50#include "athub_v1_0.h"
  51#include "gfxhub_v1_1.h"
  52#include "gfxhub_v1_2.h"
  53#include "mmhub_v9_4.h"
  54#include "mmhub_v1_7.h"
  55#include "mmhub_v1_8.h"
  56#include "umc_v6_1.h"
  57#include "umc_v6_0.h"
  58#include "umc_v6_7.h"
  59#include "umc_v12_0.h"
  60#include "hdp_v4_0.h"
  61#include "mca_v3_0.h"
  62
  63#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
  64
  65#include "amdgpu_ras.h"
  66#include "amdgpu_xgmi.h"
  67
  68/* add these here since we already include dce12 headers and these are for DCN */
  69#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
  70#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
  71#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
  72#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
  73#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
  74#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
  75#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0                                                                  0x049d
  76#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX                                                         2
  77
  78#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2                                                          0x05ea
  79#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX                                                 2
  80
  81#define MAX_MEM_RANGES 8
  82
  83static const char * const gfxhub_client_ids[] = {
  84	"CB",
  85	"DB",
  86	"IA",
  87	"WD",
  88	"CPF",
  89	"CPC",
  90	"CPG",
  91	"RLC",
  92	"TCP",
  93	"SQC (inst)",
  94	"SQC (data)",
  95	"SQG",
  96	"PA",
  97};
  98
  99static const char *mmhub_client_ids_raven[][2] = {
 100	[0][0] = "MP1",
 101	[1][0] = "MP0",
 102	[2][0] = "VCN",
 103	[3][0] = "VCNU",
 104	[4][0] = "HDP",
 105	[5][0] = "DCE",
 106	[13][0] = "UTCL2",
 107	[19][0] = "TLS",
 108	[26][0] = "OSS",
 109	[27][0] = "SDMA0",
 110	[0][1] = "MP1",
 111	[1][1] = "MP0",
 112	[2][1] = "VCN",
 113	[3][1] = "VCNU",
 114	[4][1] = "HDP",
 115	[5][1] = "XDP",
 116	[6][1] = "DBGU0",
 117	[7][1] = "DCE",
 118	[8][1] = "DCEDWB0",
 119	[9][1] = "DCEDWB1",
 120	[26][1] = "OSS",
 121	[27][1] = "SDMA0",
 122};
 123
 124static const char *mmhub_client_ids_renoir[][2] = {
 125	[0][0] = "MP1",
 126	[1][0] = "MP0",
 127	[2][0] = "HDP",
 128	[4][0] = "DCEDMC",
 129	[5][0] = "DCEVGA",
 130	[13][0] = "UTCL2",
 131	[19][0] = "TLS",
 132	[26][0] = "OSS",
 133	[27][0] = "SDMA0",
 134	[28][0] = "VCN",
 135	[29][0] = "VCNU",
 136	[30][0] = "JPEG",
 137	[0][1] = "MP1",
 138	[1][1] = "MP0",
 139	[2][1] = "HDP",
 140	[3][1] = "XDP",
 141	[6][1] = "DBGU0",
 142	[7][1] = "DCEDMC",
 143	[8][1] = "DCEVGA",
 144	[9][1] = "DCEDWB",
 145	[26][1] = "OSS",
 146	[27][1] = "SDMA0",
 147	[28][1] = "VCN",
 148	[29][1] = "VCNU",
 149	[30][1] = "JPEG",
 150};
 151
 152static const char *mmhub_client_ids_vega10[][2] = {
 153	[0][0] = "MP0",
 154	[1][0] = "UVD",
 155	[2][0] = "UVDU",
 156	[3][0] = "HDP",
 157	[13][0] = "UTCL2",
 158	[14][0] = "OSS",
 159	[15][0] = "SDMA1",
 160	[32+0][0] = "VCE0",
 161	[32+1][0] = "VCE0U",
 162	[32+2][0] = "XDMA",
 163	[32+3][0] = "DCE",
 164	[32+4][0] = "MP1",
 165	[32+14][0] = "SDMA0",
 166	[0][1] = "MP0",
 167	[1][1] = "UVD",
 168	[2][1] = "UVDU",
 169	[3][1] = "DBGU0",
 170	[4][1] = "HDP",
 171	[5][1] = "XDP",
 172	[14][1] = "OSS",
 173	[15][1] = "SDMA0",
 174	[32+0][1] = "VCE0",
 175	[32+1][1] = "VCE0U",
 176	[32+2][1] = "XDMA",
 177	[32+3][1] = "DCE",
 178	[32+4][1] = "DCEDWB",
 179	[32+5][1] = "MP1",
 180	[32+6][1] = "DBGU1",
 181	[32+14][1] = "SDMA1",
 182};
 183
 184static const char *mmhub_client_ids_vega12[][2] = {
 185	[0][0] = "MP0",
 186	[1][0] = "VCE0",
 187	[2][0] = "VCE0U",
 188	[3][0] = "HDP",
 189	[13][0] = "UTCL2",
 190	[14][0] = "OSS",
 191	[15][0] = "SDMA1",
 192	[32+0][0] = "DCE",
 193	[32+1][0] = "XDMA",
 194	[32+2][0] = "UVD",
 195	[32+3][0] = "UVDU",
 196	[32+4][0] = "MP1",
 197	[32+15][0] = "SDMA0",
 198	[0][1] = "MP0",
 199	[1][1] = "VCE0",
 200	[2][1] = "VCE0U",
 201	[3][1] = "DBGU0",
 202	[4][1] = "HDP",
 203	[5][1] = "XDP",
 204	[14][1] = "OSS",
 205	[15][1] = "SDMA0",
 206	[32+0][1] = "DCE",
 207	[32+1][1] = "DCEDWB",
 208	[32+2][1] = "XDMA",
 209	[32+3][1] = "UVD",
 210	[32+4][1] = "UVDU",
 211	[32+5][1] = "MP1",
 212	[32+6][1] = "DBGU1",
 213	[32+15][1] = "SDMA1",
 214};
 215
 216static const char *mmhub_client_ids_vega20[][2] = {
 217	[0][0] = "XDMA",
 218	[1][0] = "DCE",
 219	[2][0] = "VCE0",
 220	[3][0] = "VCE0U",
 221	[4][0] = "UVD",
 222	[5][0] = "UVD1U",
 223	[13][0] = "OSS",
 224	[14][0] = "HDP",
 225	[15][0] = "SDMA0",
 226	[32+0][0] = "UVD",
 227	[32+1][0] = "UVDU",
 228	[32+2][0] = "MP1",
 229	[32+3][0] = "MP0",
 230	[32+12][0] = "UTCL2",
 231	[32+14][0] = "SDMA1",
 232	[0][1] = "XDMA",
 233	[1][1] = "DCE",
 234	[2][1] = "DCEDWB",
 235	[3][1] = "VCE0",
 236	[4][1] = "VCE0U",
 237	[5][1] = "UVD1",
 238	[6][1] = "UVD1U",
 239	[7][1] = "DBGU0",
 240	[8][1] = "XDP",
 241	[13][1] = "OSS",
 242	[14][1] = "HDP",
 243	[15][1] = "SDMA0",
 244	[32+0][1] = "UVD",
 245	[32+1][1] = "UVDU",
 246	[32+2][1] = "DBGU1",
 247	[32+3][1] = "MP1",
 248	[32+4][1] = "MP0",
 249	[32+14][1] = "SDMA1",
 250};
 251
 252static const char *mmhub_client_ids_arcturus[][2] = {
 253	[0][0] = "DBGU1",
 254	[1][0] = "XDP",
 255	[2][0] = "MP1",
 256	[14][0] = "HDP",
 257	[171][0] = "JPEG",
 258	[172][0] = "VCN",
 259	[173][0] = "VCNU",
 260	[203][0] = "JPEG1",
 261	[204][0] = "VCN1",
 262	[205][0] = "VCN1U",
 263	[256][0] = "SDMA0",
 264	[257][0] = "SDMA1",
 265	[258][0] = "SDMA2",
 266	[259][0] = "SDMA3",
 267	[260][0] = "SDMA4",
 268	[261][0] = "SDMA5",
 269	[262][0] = "SDMA6",
 270	[263][0] = "SDMA7",
 271	[384][0] = "OSS",
 272	[0][1] = "DBGU1",
 273	[1][1] = "XDP",
 274	[2][1] = "MP1",
 275	[14][1] = "HDP",
 276	[171][1] = "JPEG",
 277	[172][1] = "VCN",
 278	[173][1] = "VCNU",
 279	[203][1] = "JPEG1",
 280	[204][1] = "VCN1",
 281	[205][1] = "VCN1U",
 282	[256][1] = "SDMA0",
 283	[257][1] = "SDMA1",
 284	[258][1] = "SDMA2",
 285	[259][1] = "SDMA3",
 286	[260][1] = "SDMA4",
 287	[261][1] = "SDMA5",
 288	[262][1] = "SDMA6",
 289	[263][1] = "SDMA7",
 290	[384][1] = "OSS",
 291};
 292
 293static const char *mmhub_client_ids_aldebaran[][2] = {
 294	[2][0] = "MP1",
 295	[3][0] = "MP0",
 296	[32+1][0] = "DBGU_IO0",
 297	[32+2][0] = "DBGU_IO2",
 298	[32+4][0] = "MPIO",
 299	[96+11][0] = "JPEG0",
 300	[96+12][0] = "VCN0",
 301	[96+13][0] = "VCNU0",
 302	[128+11][0] = "JPEG1",
 303	[128+12][0] = "VCN1",
 304	[128+13][0] = "VCNU1",
 305	[160+1][0] = "XDP",
 306	[160+14][0] = "HDP",
 307	[256+0][0] = "SDMA0",
 308	[256+1][0] = "SDMA1",
 309	[256+2][0] = "SDMA2",
 310	[256+3][0] = "SDMA3",
 311	[256+4][0] = "SDMA4",
 312	[384+0][0] = "OSS",
 313	[2][1] = "MP1",
 314	[3][1] = "MP0",
 315	[32+1][1] = "DBGU_IO0",
 316	[32+2][1] = "DBGU_IO2",
 317	[32+4][1] = "MPIO",
 318	[96+11][1] = "JPEG0",
 319	[96+12][1] = "VCN0",
 320	[96+13][1] = "VCNU0",
 321	[128+11][1] = "JPEG1",
 322	[128+12][1] = "VCN1",
 323	[128+13][1] = "VCNU1",
 324	[160+1][1] = "XDP",
 325	[160+14][1] = "HDP",
 326	[256+0][1] = "SDMA0",
 327	[256+1][1] = "SDMA1",
 328	[256+2][1] = "SDMA2",
 329	[256+3][1] = "SDMA3",
 330	[256+4][1] = "SDMA4",
 331	[384+0][1] = "OSS",
 332};
 333
 334static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = {
 335	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
 336	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
 337};
 338
 339static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = {
 340	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
 341	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
 342};
 343
 344static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
 345	(0x000143c0 + 0x00000000),
 346	(0x000143c0 + 0x00000800),
 347	(0x000143c0 + 0x00001000),
 348	(0x000143c0 + 0x00001800),
 349	(0x000543c0 + 0x00000000),
 350	(0x000543c0 + 0x00000800),
 351	(0x000543c0 + 0x00001000),
 352	(0x000543c0 + 0x00001800),
 353	(0x000943c0 + 0x00000000),
 354	(0x000943c0 + 0x00000800),
 355	(0x000943c0 + 0x00001000),
 356	(0x000943c0 + 0x00001800),
 357	(0x000d43c0 + 0x00000000),
 358	(0x000d43c0 + 0x00000800),
 359	(0x000d43c0 + 0x00001000),
 360	(0x000d43c0 + 0x00001800),
 361	(0x001143c0 + 0x00000000),
 362	(0x001143c0 + 0x00000800),
 363	(0x001143c0 + 0x00001000),
 364	(0x001143c0 + 0x00001800),
 365	(0x001543c0 + 0x00000000),
 366	(0x001543c0 + 0x00000800),
 367	(0x001543c0 + 0x00001000),
 368	(0x001543c0 + 0x00001800),
 369	(0x001943c0 + 0x00000000),
 370	(0x001943c0 + 0x00000800),
 371	(0x001943c0 + 0x00001000),
 372	(0x001943c0 + 0x00001800),
 373	(0x001d43c0 + 0x00000000),
 374	(0x001d43c0 + 0x00000800),
 375	(0x001d43c0 + 0x00001000),
 376	(0x001d43c0 + 0x00001800),
 377};
 378
 379static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
 380	(0x000143e0 + 0x00000000),
 381	(0x000143e0 + 0x00000800),
 382	(0x000143e0 + 0x00001000),
 383	(0x000143e0 + 0x00001800),
 384	(0x000543e0 + 0x00000000),
 385	(0x000543e0 + 0x00000800),
 386	(0x000543e0 + 0x00001000),
 387	(0x000543e0 + 0x00001800),
 388	(0x000943e0 + 0x00000000),
 389	(0x000943e0 + 0x00000800),
 390	(0x000943e0 + 0x00001000),
 391	(0x000943e0 + 0x00001800),
 392	(0x000d43e0 + 0x00000000),
 393	(0x000d43e0 + 0x00000800),
 394	(0x000d43e0 + 0x00001000),
 395	(0x000d43e0 + 0x00001800),
 396	(0x001143e0 + 0x00000000),
 397	(0x001143e0 + 0x00000800),
 398	(0x001143e0 + 0x00001000),
 399	(0x001143e0 + 0x00001800),
 400	(0x001543e0 + 0x00000000),
 401	(0x001543e0 + 0x00000800),
 402	(0x001543e0 + 0x00001000),
 403	(0x001543e0 + 0x00001800),
 404	(0x001943e0 + 0x00000000),
 405	(0x001943e0 + 0x00000800),
 406	(0x001943e0 + 0x00001000),
 407	(0x001943e0 + 0x00001800),
 408	(0x001d43e0 + 0x00000000),
 409	(0x001d43e0 + 0x00000800),
 410	(0x001d43e0 + 0x00001000),
 411	(0x001d43e0 + 0x00001800),
 412};
 413
 414static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
 415		struct amdgpu_irq_src *src,
 416		unsigned int type,
 417		enum amdgpu_interrupt_state state)
 418{
 419	u32 bits, i, tmp, reg;
 420
 421	/* Devices newer then VEGA10/12 shall have these programming
 422	 * sequences performed by PSP BL
 423	 */
 424	if (adev->asic_type >= CHIP_VEGA20)
 425		return 0;
 426
 427	bits = 0x7f;
 428
 429	switch (state) {
 430	case AMDGPU_IRQ_STATE_DISABLE:
 431		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
 432			reg = ecc_umc_mcumc_ctrl_addrs[i];
 433			tmp = RREG32(reg);
 434			tmp &= ~bits;
 435			WREG32(reg, tmp);
 436		}
 437		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
 438			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
 439			tmp = RREG32(reg);
 440			tmp &= ~bits;
 441			WREG32(reg, tmp);
 442		}
 443		break;
 444	case AMDGPU_IRQ_STATE_ENABLE:
 445		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
 446			reg = ecc_umc_mcumc_ctrl_addrs[i];
 447			tmp = RREG32(reg);
 448			tmp |= bits;
 449			WREG32(reg, tmp);
 450		}
 451		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
 452			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
 453			tmp = RREG32(reg);
 454			tmp |= bits;
 455			WREG32(reg, tmp);
 456		}
 457		break;
 458	default:
 459		break;
 460	}
 461
 462	return 0;
 463}
 464
 465static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
 466					struct amdgpu_irq_src *src,
 467					unsigned int type,
 468					enum amdgpu_interrupt_state state)
 469{
 470	struct amdgpu_vmhub *hub;
 471	u32 tmp, reg, bits, i, j;
 472
 473	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 474		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 475		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 476		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 477		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 478		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 479		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
 480
 481	switch (state) {
 482	case AMDGPU_IRQ_STATE_DISABLE:
 483		for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
 484			hub = &adev->vmhub[j];
 485			for (i = 0; i < 16; i++) {
 486				reg = hub->vm_context0_cntl + i;
 487
 488				/* This works because this interrupt is only
 489				 * enabled at init/resume and disabled in
 490				 * fini/suspend, so the overall state doesn't
 491				 * change over the course of suspend/resume.
 492				 */
 493				if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
 494					continue;
 495
 496				if (j >= AMDGPU_MMHUB0(0))
 497					tmp = RREG32_SOC15_IP(MMHUB, reg);
 498				else
 499					tmp = RREG32_XCC(reg, j);
 500
 501				tmp &= ~bits;
 502
 503				if (j >= AMDGPU_MMHUB0(0))
 504					WREG32_SOC15_IP(MMHUB, reg, tmp);
 505				else
 506					WREG32_XCC(reg, tmp, j);
 507			}
 508		}
 509		break;
 510	case AMDGPU_IRQ_STATE_ENABLE:
 511		for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
 512			hub = &adev->vmhub[j];
 513			for (i = 0; i < 16; i++) {
 514				reg = hub->vm_context0_cntl + i;
 515
 516				/* This works because this interrupt is only
 517				 * enabled at init/resume and disabled in
 518				 * fini/suspend, so the overall state doesn't
 519				 * change over the course of suspend/resume.
 520				 */
 521				if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
 522					continue;
 523
 524				if (j >= AMDGPU_MMHUB0(0))
 525					tmp = RREG32_SOC15_IP(MMHUB, reg);
 526				else
 527					tmp = RREG32_XCC(reg, j);
 528
 529				tmp |= bits;
 530
 531				if (j >= AMDGPU_MMHUB0(0))
 532					WREG32_SOC15_IP(MMHUB, reg, tmp);
 533				else
 534					WREG32_XCC(reg, tmp, j);
 535			}
 536		}
 537		break;
 538	default:
 539		break;
 540	}
 541
 542	return 0;
 543}
 544
 545static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
 546				      struct amdgpu_irq_src *source,
 547				      struct amdgpu_iv_entry *entry)
 548{
 549	bool retry_fault = !!(entry->src_data[1] & 0x80);
 550	bool write_fault = !!(entry->src_data[1] & 0x20);
 551	uint32_t status = 0, cid = 0, rw = 0, fed = 0;
 552	struct amdgpu_task_info *task_info;
 553	struct amdgpu_vmhub *hub;
 554	const char *mmhub_cid;
 555	const char *hub_name;
 556	unsigned int vmhub;
 557	u64 addr;
 558	uint32_t cam_index = 0;
 559	int ret, xcc_id = 0;
 560	uint32_t node_id;
 561
 562	node_id = entry->node_id;
 563
 564	addr = (u64)entry->src_data[0] << 12;
 565	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
 566
 567	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
 568		hub_name = "mmhub0";
 569		vmhub = AMDGPU_MMHUB0(node_id / 4);
 570	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
 571		hub_name = "mmhub1";
 572		vmhub = AMDGPU_MMHUB1(0);
 573	} else {
 574		hub_name = "gfxhub0";
 575		if (adev->gfx.funcs->ih_node_to_logical_xcc) {
 576			xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev,
 577				node_id);
 578			if (xcc_id < 0)
 579				xcc_id = 0;
 580		}
 581		vmhub = xcc_id;
 582	}
 583	hub = &adev->vmhub[vmhub];
 584
 585	if (retry_fault) {
 586		if (adev->irq.retry_cam_enabled) {
 587			/* Delegate it to a different ring if the hardware hasn't
 588			 * already done it.
 589			 */
 590			if (entry->ih == &adev->irq.ih) {
 591				amdgpu_irq_delegate(adev, entry, 8);
 592				return 1;
 593			}
 594
 595			cam_index = entry->src_data[2] & 0x3ff;
 596
 597			ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
 598						     addr, entry->timestamp, write_fault);
 599			WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
 600			if (ret)
 601				return 1;
 602		} else {
 603			/* Process it onyl if it's the first fault for this address */
 604			if (entry->ih != &adev->irq.ih_soft &&
 605			    amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
 606					     entry->timestamp))
 607				return 1;
 608
 609			/* Delegate it to a different ring if the hardware hasn't
 610			 * already done it.
 611			 */
 612			if (entry->ih == &adev->irq.ih) {
 613				amdgpu_irq_delegate(adev, entry, 8);
 614				return 1;
 615			}
 616
 617			/* Try to handle the recoverable page faults by filling page
 618			 * tables
 619			 */
 620			if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
 621						   addr, entry->timestamp, write_fault))
 622				return 1;
 623		}
 624	}
 625
 626	if (!printk_ratelimit())
 627		return 0;
 628
 629	dev_err(adev->dev,
 630		"[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", hub_name,
 631		retry_fault ? "retry" : "no-retry",
 632		entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
 633
 634	task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
 635	if (task_info) {
 636		dev_err(adev->dev,
 637			" for process %s pid %d thread %s pid %d)\n",
 638			task_info->process_name, task_info->tgid,
 639			task_info->task_name, task_info->pid);
 640		amdgpu_vm_put_task_info(task_info);
 641	}
 642
 643	dev_err(adev->dev, "  in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
 644		addr, entry->client_id,
 645		soc15_ih_clientid_name[entry->client_id]);
 646
 647	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
 648	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
 649		dev_err(adev->dev, "  cookie node_id %d fault from die %s%d%s\n",
 650			node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
 651			node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
 652
 653	if (amdgpu_sriov_vf(adev))
 654		return 0;
 655
 656	/*
 657	 * Issue a dummy read to wait for the status register to
 658	 * be updated to avoid reading an incorrect value due to
 659	 * the new fast GRBM interface.
 660	 */
 661	if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
 662	    (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
 663		RREG32(hub->vm_l2_pro_fault_status);
 664
 665	status = RREG32(hub->vm_l2_pro_fault_status);
 666	cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
 667	rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
 668	fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
 669
 670	/* for fed error, kfd will handle it, return directly */
 671	if (fed && amdgpu_ras_is_poison_mode_supported(adev) &&
 672	    (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)))
 673		return 0;
 674
 675	/* Only print L2 fault status if the status register could be read and
 676	 * contains useful information
 677	 */
 678	if (!status)
 679		return 0;
 680
 681	if (!amdgpu_sriov_vf(adev))
 682		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
 683
 684	amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub);
 685
 686	dev_err(adev->dev,
 687		"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
 688		status);
 689	if (entry->vmid_src == AMDGPU_GFXHUB(0)) {
 690		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
 691			cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
 692			gfxhub_client_ids[cid],
 693			cid);
 694	} else {
 695		switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
 696		case IP_VERSION(9, 0, 0):
 697			mmhub_cid = mmhub_client_ids_vega10[cid][rw];
 698			break;
 699		case IP_VERSION(9, 3, 0):
 700			mmhub_cid = mmhub_client_ids_vega12[cid][rw];
 701			break;
 702		case IP_VERSION(9, 4, 0):
 703			mmhub_cid = mmhub_client_ids_vega20[cid][rw];
 704			break;
 705		case IP_VERSION(9, 4, 1):
 706			mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
 707			break;
 708		case IP_VERSION(9, 1, 0):
 709		case IP_VERSION(9, 2, 0):
 710			mmhub_cid = mmhub_client_ids_raven[cid][rw];
 711			break;
 712		case IP_VERSION(1, 5, 0):
 713		case IP_VERSION(2, 4, 0):
 714			mmhub_cid = mmhub_client_ids_renoir[cid][rw];
 715			break;
 716		case IP_VERSION(1, 8, 0):
 717		case IP_VERSION(9, 4, 2):
 718			mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
 719			break;
 720		default:
 721			mmhub_cid = NULL;
 722			break;
 723		}
 724		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
 725			mmhub_cid ? mmhub_cid : "unknown", cid);
 726	}
 727	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
 728		REG_GET_FIELD(status,
 729		VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
 730	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
 731		REG_GET_FIELD(status,
 732		VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
 733	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
 734		REG_GET_FIELD(status,
 735		VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
 736	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
 737		REG_GET_FIELD(status,
 738		VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
 739	dev_err(adev->dev, "\t RW: 0x%x\n", rw);
 740	return 0;
 741}
 742
 743static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
 744	.set = gmc_v9_0_vm_fault_interrupt_state,
 745	.process = gmc_v9_0_process_interrupt,
 746};
 747
 748
 749static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
 750	.set = gmc_v9_0_ecc_interrupt_state,
 751	.process = amdgpu_umc_process_ecc_irq,
 752};
 753
 754static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
 755{
 756	adev->gmc.vm_fault.num_types = 1;
 757	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
 758
 759	if (!amdgpu_sriov_vf(adev) &&
 760	    !adev->gmc.xgmi.connected_to_cpu &&
 761	    !adev->gmc.is_app_apu) {
 762		adev->gmc.ecc_irq.num_types = 1;
 763		adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
 764	}
 765}
 766
 767static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
 768					uint32_t flush_type)
 769{
 770	u32 req = 0;
 771
 772	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
 773			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
 774	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
 775	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
 776	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
 777	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
 778	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
 779	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
 780	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
 781			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
 782
 783	return req;
 784}
 785
 786/**
 787 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
 788 *
 789 * @adev: amdgpu_device pointer
 790 * @vmhub: vmhub type
 791 *
 792 */
 793static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
 794				       uint32_t vmhub)
 795{
 796	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
 797	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
 798	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
 799		return false;
 800
 801	return ((vmhub == AMDGPU_MMHUB0(0) ||
 802		 vmhub == AMDGPU_MMHUB1(0)) &&
 803		(!amdgpu_sriov_vf(adev)) &&
 804		(!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
 805		   (adev->apu_flags & AMD_APU_IS_PICASSO))));
 806}
 807
 808static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
 809					uint8_t vmid, uint16_t *p_pasid)
 810{
 811	uint32_t value;
 812
 813	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
 814		     + vmid);
 815	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
 816
 817	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
 818}
 819
 820/*
 821 * GART
 822 * VMID 0 is the physical GPU addresses as used by the kernel.
 823 * VMIDs 1-15 are used for userspace clients and are handled
 824 * by the amdgpu vm/hsa code.
 825 */
 826
 827/**
 828 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
 829 *
 830 * @adev: amdgpu_device pointer
 831 * @vmid: vm instance to flush
 832 * @vmhub: which hub to flush
 833 * @flush_type: the flush type
 834 *
 835 * Flush the TLB for the requested page table using certain type.
 836 */
 837static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 838					uint32_t vmhub, uint32_t flush_type)
 839{
 840	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
 841	u32 j, inv_req, tmp, sem, req, ack, inst;
 842	const unsigned int eng = 17;
 843	struct amdgpu_vmhub *hub;
 844
 845	BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
 846
 847	hub = &adev->vmhub[vmhub];
 848	inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
 849	sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
 850	req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
 851	ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 852
 853	if (vmhub >= AMDGPU_MMHUB0(0))
 854		inst = 0;
 855	else
 856		inst = vmhub;
 857
 858	/* This is necessary for SRIOV as well as for GFXOFF to function
 859	 * properly under bare metal
 860	 */
 861	if (adev->gfx.kiq[inst].ring.sched.ready &&
 862	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
 863		uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
 864		uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 865
 866		amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
 867						 1 << vmid, inst);
 868		return;
 869	}
 870
 871	/* This path is needed before KIQ/MES/GFXOFF are set up */
 872	spin_lock(&adev->gmc.invalidate_lock);
 873
 874	/*
 875	 * It may lose gpuvm invalidate acknowldege state across power-gating
 876	 * off cycle, add semaphore acquire before invalidation and semaphore
 877	 * release after invalidation to avoid entering power gated state
 878	 * to WA the Issue
 879	 */
 880
 881	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 882	if (use_semaphore) {
 883		for (j = 0; j < adev->usec_timeout; j++) {
 884			/* a read return value of 1 means semaphore acquire */
 885			if (vmhub >= AMDGPU_MMHUB0(0))
 886				tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, GET_INST(GC, inst));
 887			else
 888				tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, GET_INST(GC, inst));
 889			if (tmp & 0x1)
 890				break;
 891			udelay(1);
 892		}
 893
 894		if (j >= adev->usec_timeout)
 895			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
 896	}
 897
 898	if (vmhub >= AMDGPU_MMHUB0(0))
 899		WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, GET_INST(GC, inst));
 900	else
 901		WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, GET_INST(GC, inst));
 902
 903	/*
 904	 * Issue a dummy read to wait for the ACK register to
 905	 * be cleared to avoid a false ACK due to the new fast
 906	 * GRBM interface.
 907	 */
 908	if ((vmhub == AMDGPU_GFXHUB(0)) &&
 909	    (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
 910		RREG32_NO_KIQ(req);
 911
 912	for (j = 0; j < adev->usec_timeout; j++) {
 913		if (vmhub >= AMDGPU_MMHUB0(0))
 914			tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, GET_INST(GC, inst));
 915		else
 916			tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, GET_INST(GC, inst));
 917		if (tmp & (1 << vmid))
 918			break;
 919		udelay(1);
 920	}
 921
 922	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 923	if (use_semaphore) {
 924		/*
 925		 * add semaphore release after invalidation,
 926		 * write with 0 means semaphore release
 927		 */
 928		if (vmhub >= AMDGPU_MMHUB0(0))
 929			WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, GET_INST(GC, inst));
 930		else
 931			WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, GET_INST(GC, inst));
 932	}
 933
 934	spin_unlock(&adev->gmc.invalidate_lock);
 935
 936	if (j < adev->usec_timeout)
 937		return;
 938
 939	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
 940}
 941
 942/**
 943 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
 944 *
 945 * @adev: amdgpu_device pointer
 946 * @pasid: pasid to be flush
 947 * @flush_type: the flush type
 948 * @all_hub: flush all hubs
 949 * @inst: is used to select which instance of KIQ to use for the invalidation
 950 *
 951 * Flush the TLB for the requested pasid.
 952 */
 953static void gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 954					 uint16_t pasid, uint32_t flush_type,
 955					 bool all_hub, uint32_t inst)
 956{
 957	uint16_t queried;
 958	int i, vmid;
 959
 960	for (vmid = 1; vmid < 16; vmid++) {
 961		bool valid;
 962
 963		valid = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
 964								 &queried);
 965		if (!valid || queried != pasid)
 966			continue;
 967
 968		if (all_hub) {
 969			for_each_set_bit(i, adev->vmhubs_mask,
 970					 AMDGPU_MAX_VMHUBS)
 971				gmc_v9_0_flush_gpu_tlb(adev, vmid, i,
 972						       flush_type);
 973		} else {
 974			gmc_v9_0_flush_gpu_tlb(adev, vmid,
 975					       AMDGPU_GFXHUB(0),
 976					       flush_type);
 977		}
 978	}
 979}
 980
 981static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 982					    unsigned int vmid, uint64_t pd_addr)
 983{
 984	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
 985	struct amdgpu_device *adev = ring->adev;
 986	struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
 987	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
 988	unsigned int eng = ring->vm_inv_eng;
 989
 990	/*
 991	 * It may lose gpuvm invalidate acknowldege state across power-gating
 992	 * off cycle, add semaphore acquire before invalidation and semaphore
 993	 * release after invalidation to avoid entering power gated state
 994	 * to WA the Issue
 995	 */
 996
 997	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 998	if (use_semaphore)
 999		/* a read return value of 1 means semaphore acuqire */
1000		amdgpu_ring_emit_reg_wait(ring,
1001					  hub->vm_inv_eng0_sem +
1002					  hub->eng_distance * eng, 0x1, 0x1);
1003
1004	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
1005			      (hub->ctx_addr_distance * vmid),
1006			      lower_32_bits(pd_addr));
1007
1008	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
1009			      (hub->ctx_addr_distance * vmid),
1010			      upper_32_bits(pd_addr));
1011
1012	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
1013					    hub->eng_distance * eng,
1014					    hub->vm_inv_eng0_ack +
1015					    hub->eng_distance * eng,
1016					    req, 1 << vmid);
1017
1018	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
1019	if (use_semaphore)
1020		/*
1021		 * add semaphore release after invalidation,
1022		 * write with 0 means semaphore release
1023		 */
1024		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
1025				      hub->eng_distance * eng, 0);
1026
1027	return pd_addr;
1028}
1029
1030static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
1031					unsigned int pasid)
1032{
1033	struct amdgpu_device *adev = ring->adev;
1034	uint32_t reg;
1035
1036	/* Do nothing because there's no lut register for mmhub1. */
1037	if (ring->vm_hub == AMDGPU_MMHUB1(0))
1038		return;
1039
1040	if (ring->vm_hub == AMDGPU_GFXHUB(0))
1041		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
1042	else
1043		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
1044
1045	amdgpu_ring_emit_wreg(ring, reg, pasid);
1046}
1047
1048/*
1049 * PTE format on VEGA 10:
1050 * 63:59 reserved
1051 * 58:57 mtype
1052 * 56 F
1053 * 55 L
1054 * 54 P
1055 * 53 SW
1056 * 52 T
1057 * 50:48 reserved
1058 * 47:12 4k physical page base address
1059 * 11:7 fragment
1060 * 6 write
1061 * 5 read
1062 * 4 exe
1063 * 3 Z
1064 * 2 snooped
1065 * 1 system
1066 * 0 valid
1067 *
1068 * PDE format on VEGA 10:
1069 * 63:59 block fragment size
1070 * 58:55 reserved
1071 * 54 P
1072 * 53:48 reserved
1073 * 47:6 physical base address of PD or PTE
1074 * 5:3 reserved
1075 * 2 C
1076 * 1 system
1077 * 0 valid
1078 */
1079
1080static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1081
1082{
1083	switch (flags) {
1084	case AMDGPU_VM_MTYPE_DEFAULT:
1085		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
1086	case AMDGPU_VM_MTYPE_NC:
1087		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
1088	case AMDGPU_VM_MTYPE_WC:
1089		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_WC);
1090	case AMDGPU_VM_MTYPE_RW:
1091		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_RW);
1092	case AMDGPU_VM_MTYPE_CC:
1093		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_CC);
1094	case AMDGPU_VM_MTYPE_UC:
1095		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC);
1096	default:
1097		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
1098	}
1099}
1100
1101static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1102				uint64_t *addr, uint64_t *flags)
1103{
1104	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1105		*addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1106	BUG_ON(*addr & 0xFFFF00000000003FULL);
1107
1108	if (!adev->gmc.translate_further)
1109		return;
1110
1111	if (level == AMDGPU_VM_PDB1) {
1112		/* Set the block fragment size */
1113		if (!(*flags & AMDGPU_PDE_PTE))
1114			*flags |= AMDGPU_PDE_BFS(0x9);
1115
1116	} else if (level == AMDGPU_VM_PDB0) {
1117		if (*flags & AMDGPU_PDE_PTE) {
1118			*flags &= ~AMDGPU_PDE_PTE;
1119			if (!(*flags & AMDGPU_PTE_VALID))
1120				*addr |= 1 << PAGE_SHIFT;
1121		} else {
1122			*flags |= AMDGPU_PTE_TF;
1123		}
1124	}
1125}
1126
1127static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
1128					 struct amdgpu_bo *bo,
1129					 struct amdgpu_bo_va_mapping *mapping,
1130					 uint64_t *flags)
1131{
1132	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1133	bool is_vram = bo->tbo.resource &&
1134		bo->tbo.resource->mem_type == TTM_PL_VRAM;
1135	bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
1136				     AMDGPU_GEM_CREATE_EXT_COHERENT);
1137	bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
1138	bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
1139	struct amdgpu_vm *vm = mapping->bo_va->base.vm;
1140	unsigned int mtype_local, mtype;
1141	bool snoop = false;
1142	bool is_local;
1143
1144	dma_resv_assert_held(bo->tbo.base.resv);
1145
1146	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1147	case IP_VERSION(9, 4, 1):
1148	case IP_VERSION(9, 4, 2):
1149		if (is_vram) {
1150			if (bo_adev == adev) {
1151				if (uncached)
1152					mtype = MTYPE_UC;
1153				else if (coherent)
1154					mtype = MTYPE_CC;
1155				else
1156					mtype = MTYPE_RW;
1157				/* FIXME: is this still needed? Or does
1158				 * amdgpu_ttm_tt_pde_flags already handle this?
1159				 */
1160				if ((amdgpu_ip_version(adev, GC_HWIP, 0) ==
1161					     IP_VERSION(9, 4, 2) ||
1162				     amdgpu_ip_version(adev, GC_HWIP, 0) ==
1163					     IP_VERSION(9, 4, 3)) &&
1164				    adev->gmc.xgmi.connected_to_cpu)
1165					snoop = true;
1166			} else {
1167				if (uncached || coherent)
1168					mtype = MTYPE_UC;
1169				else
1170					mtype = MTYPE_NC;
1171				if (mapping->bo_va->is_xgmi)
1172					snoop = true;
1173			}
1174		} else {
1175			if (uncached || coherent)
1176				mtype = MTYPE_UC;
1177			else
1178				mtype = MTYPE_NC;
1179			/* FIXME: is this still needed? Or does
1180			 * amdgpu_ttm_tt_pde_flags already handle this?
1181			 */
1182			snoop = true;
1183		}
1184		break;
1185	case IP_VERSION(9, 4, 3):
1186	case IP_VERSION(9, 4, 4):
1187		/* Only local VRAM BOs or system memory on non-NUMA APUs
1188		 * can be assumed to be local in their entirety. Choose
1189		 * MTYPE_NC as safe fallback for all system memory BOs on
1190		 * NUMA systems. Their MTYPE can be overridden per-page in
1191		 * gmc_v9_0_override_vm_pte_flags.
1192		 */
1193		mtype_local = MTYPE_RW;
1194		if (amdgpu_mtype_local == 1) {
1195			DRM_INFO_ONCE("Using MTYPE_NC for local memory\n");
1196			mtype_local = MTYPE_NC;
1197		} else if (amdgpu_mtype_local == 2) {
1198			DRM_INFO_ONCE("Using MTYPE_CC for local memory\n");
1199			mtype_local = MTYPE_CC;
1200		} else {
1201			DRM_INFO_ONCE("Using MTYPE_RW for local memory\n");
1202		}
1203		is_local = (!is_vram && (adev->flags & AMD_IS_APU) &&
1204			    num_possible_nodes() <= 1) ||
1205			   (is_vram && adev == bo_adev &&
1206			    KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id);
1207		snoop = true;
1208		if (uncached) {
1209			mtype = MTYPE_UC;
1210		} else if (ext_coherent) {
1211			if (adev->rev_id)
1212				mtype = is_local ? MTYPE_CC : MTYPE_UC;
1213			else
1214				mtype = MTYPE_UC;
1215		} else if (adev->flags & AMD_IS_APU) {
1216			mtype = is_local ? mtype_local : MTYPE_NC;
1217		} else {
1218			/* dGPU */
1219			if (is_local)
1220				mtype = mtype_local;
1221			else if (is_vram)
1222				mtype = MTYPE_NC;
1223			else
1224				mtype = MTYPE_UC;
1225		}
1226
1227		break;
1228	default:
1229		if (uncached || coherent)
1230			mtype = MTYPE_UC;
1231		else
1232			mtype = MTYPE_NC;
1233
1234		/* FIXME: is this still needed? Or does
1235		 * amdgpu_ttm_tt_pde_flags already handle this?
1236		 */
1237		if (!is_vram)
1238			snoop = true;
1239	}
1240
1241	if (mtype != MTYPE_NC)
1242		*flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype);
1243
1244	*flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1245}
1246
1247static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1248				struct amdgpu_bo_va_mapping *mapping,
1249				uint64_t *flags)
1250{
1251	struct amdgpu_bo *bo = mapping->bo_va->base.bo;
1252
1253	*flags &= ~AMDGPU_PTE_EXECUTABLE;
1254	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1255
1256	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1257	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1258
1259	if (mapping->flags & AMDGPU_PTE_PRT) {
1260		*flags |= AMDGPU_PTE_PRT;
1261		*flags &= ~AMDGPU_PTE_VALID;
1262	}
1263
1264	if ((*flags & AMDGPU_PTE_VALID) && bo)
1265		gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
 
1266}
1267
1268static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
1269					   struct amdgpu_vm *vm,
1270					   uint64_t addr, uint64_t *flags)
1271{
1272	int local_node, nid;
1273
1274	/* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
1275	 * memory can use more efficient MTYPEs.
1276	 */
1277	if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
1278	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4))
1279		return;
1280
1281	/* Only direct-mapped memory allows us to determine the NUMA node from
1282	 * the DMA address.
1283	 */
1284	if (!adev->ram_is_direct_mapped) {
1285		dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n");
1286		return;
1287	}
1288
1289	/* MTYPE_NC is the same default and can be overridden.
1290	 * MTYPE_UC will be present if the memory is extended-coherent
1291	 * and can also be overridden.
1292	 */
1293	if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1294	    AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC) &&
1295	    (*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1296	    AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC)) {
1297		dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n");
1298		return;
1299	}
1300
1301	/* FIXME: Only supported on native mode for now. For carve-out, the
1302	 * NUMA affinity of the GPU/VM needs to come from the PCI info because
1303	 * memory partitions are not associated with different NUMA nodes.
1304	 */
1305	if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
1306		local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
1307	} else {
1308		dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n");
1309		return;
1310	}
1311
1312	/* Only handle real RAM. Mappings of PCIe resources don't have struct
1313	 * page or NUMA nodes.
1314	 */
1315	if (!page_is_ram(addr >> PAGE_SHIFT)) {
1316		dev_dbg_ratelimited(adev->dev, "Page is not RAM.\n");
1317		return;
1318	}
1319	nid = pfn_to_nid(addr >> PAGE_SHIFT);
1320	dev_dbg_ratelimited(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n",
1321			    vm->mem_id, local_node, nid);
1322	if (nid == local_node) {
1323		uint64_t old_flags = *flags;
1324		if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) ==
1325			AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC)) {
1326			unsigned int mtype_local = MTYPE_RW;
1327
1328			if (amdgpu_mtype_local == 1)
1329				mtype_local = MTYPE_NC;
1330			else if (amdgpu_mtype_local == 2)
1331				mtype_local = MTYPE_CC;
1332
1333			*flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype_local);
 
1334		} else if (adev->rev_id) {
1335			/* MTYPE_UC case */
1336			*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
 
1337		}
1338
1339		dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n",
1340				    old_flags, *flags);
1341	}
1342}
1343
1344static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1345{
1346	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1347	unsigned int size;
1348
1349	/* TODO move to DC so GMC doesn't need to hard-code DCN registers */
1350
1351	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1352		size = AMDGPU_VBIOS_VGA_ALLOCATION;
1353	} else {
1354		u32 viewport;
1355
1356		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1357		case IP_VERSION(1, 0, 0):
1358		case IP_VERSION(1, 0, 1):
1359			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1360			size = (REG_GET_FIELD(viewport,
1361					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1362				REG_GET_FIELD(viewport,
1363					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1364				4);
1365			break;
1366		case IP_VERSION(2, 1, 0):
1367			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
1368			size = (REG_GET_FIELD(viewport,
1369					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1370				REG_GET_FIELD(viewport,
1371					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1372				4);
1373			break;
1374		default:
1375			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1376			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1377				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1378				4);
1379			break;
1380		}
1381	}
1382
1383	return size;
1384}
1385
1386static enum amdgpu_memory_partition
1387gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
1388{
1389	enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
1390
1391	if (adev->nbio.funcs->get_memory_partition_mode)
1392		mode = adev->nbio.funcs->get_memory_partition_mode(adev,
1393								   supp_modes);
1394
1395	return mode;
1396}
1397
1398static enum amdgpu_memory_partition
1399gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev)
1400{
1401	switch (adev->gmc.num_mem_partitions) {
1402	case 0:
1403		return UNKNOWN_MEMORY_PARTITION_MODE;
1404	case 1:
1405		return AMDGPU_NPS1_PARTITION_MODE;
1406	case 2:
1407		return AMDGPU_NPS2_PARTITION_MODE;
1408	case 4:
1409		return AMDGPU_NPS4_PARTITION_MODE;
1410	default:
1411		return AMDGPU_NPS1_PARTITION_MODE;
1412	}
1413
1414	return AMDGPU_NPS1_PARTITION_MODE;
1415}
1416
1417static enum amdgpu_memory_partition
1418gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
1419{
1420	if (amdgpu_sriov_vf(adev))
1421		return gmc_v9_0_query_vf_memory_partition(adev);
1422
1423	return gmc_v9_0_get_memory_partition(adev, NULL);
1424}
1425
1426static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
1427{
1428	if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested &&
1429	    adev->nbio.funcs->is_nps_switch_requested(adev)) {
1430		adev->gmc.reset_flags |= AMDGPU_GMC_INIT_RESET_NPS;
1431		return true;
1432	}
1433
1434	return false;
1435}
1436
1437static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1438	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1439	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1440	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1441	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1442	.map_mtype = gmc_v9_0_map_mtype,
1443	.get_vm_pde = gmc_v9_0_get_vm_pde,
1444	.get_vm_pte = gmc_v9_0_get_vm_pte,
1445	.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
1446	.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1447	.query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
1448	.request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
1449	.need_reset_on_init = &gmc_v9_0_need_reset_on_init,
1450};
1451
1452static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1453{
1454	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1455}
1456
1457static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1458{
1459	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1460	case IP_VERSION(6, 0, 0):
1461		adev->umc.funcs = &umc_v6_0_funcs;
1462		break;
1463	case IP_VERSION(6, 1, 1):
1464		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1465		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1466		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1467		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1468		adev->umc.retire_unit = 1;
1469		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1470		adev->umc.ras = &umc_v6_1_ras;
1471		break;
1472	case IP_VERSION(6, 1, 2):
1473		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1474		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1475		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1476		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1477		adev->umc.retire_unit = 1;
1478		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1479		adev->umc.ras = &umc_v6_1_ras;
1480		break;
1481	case IP_VERSION(6, 7, 0):
1482		adev->umc.max_ras_err_cnt_per_query =
1483			UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
1484		adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1485		adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1486		adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1487		adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
1488		if (!adev->gmc.xgmi.connected_to_cpu)
1489			adev->umc.ras = &umc_v6_7_ras;
1490		if (1 & adev->smuio.funcs->get_die_id(adev))
1491			adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1492		else
1493			adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1494		break;
1495	case IP_VERSION(12, 0, 0):
1496		adev->umc.max_ras_err_cnt_per_query =
1497			UMC_V12_0_TOTAL_CHANNEL_NUM(adev) * UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
1498		adev->umc.channel_inst_num = UMC_V12_0_CHANNEL_INSTANCE_NUM;
1499		adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
1500		adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
1501		adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
1502		adev->umc.active_mask = adev->aid_mask;
1503		adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
 
1504		if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1505			adev->umc.ras = &umc_v12_0_ras;
1506		break;
1507	default:
1508		break;
1509	}
1510}
1511
1512static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1513{
1514	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1515	case IP_VERSION(9, 4, 1):
1516		adev->mmhub.funcs = &mmhub_v9_4_funcs;
1517		break;
1518	case IP_VERSION(9, 4, 2):
1519		adev->mmhub.funcs = &mmhub_v1_7_funcs;
1520		break;
1521	case IP_VERSION(1, 8, 0):
1522		adev->mmhub.funcs = &mmhub_v1_8_funcs;
1523		break;
1524	default:
1525		adev->mmhub.funcs = &mmhub_v1_0_funcs;
1526		break;
1527	}
1528}
1529
1530static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1531{
1532	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1533	case IP_VERSION(9, 4, 0):
1534		adev->mmhub.ras = &mmhub_v1_0_ras;
1535		break;
1536	case IP_VERSION(9, 4, 1):
1537		adev->mmhub.ras = &mmhub_v9_4_ras;
1538		break;
1539	case IP_VERSION(9, 4, 2):
1540		adev->mmhub.ras = &mmhub_v1_7_ras;
1541		break;
1542	case IP_VERSION(1, 8, 0):
1543		adev->mmhub.ras = &mmhub_v1_8_ras;
1544		break;
1545	default:
1546		/* mmhub ras is not available */
1547		break;
1548	}
1549}
1550
1551static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1552{
1553	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1554	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
1555		adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
1556	else
1557		adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1558}
1559
1560static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
1561{
1562	adev->hdp.ras = &hdp_v4_0_ras;
1563}
1564
1565static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
1566{
1567	struct amdgpu_mca *mca = &adev->mca;
1568
1569	/* is UMC the right IP to check for MCA?  Maybe DF? */
1570	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1571	case IP_VERSION(6, 7, 0):
1572		if (!adev->gmc.xgmi.connected_to_cpu) {
1573			mca->mp0.ras = &mca_v3_0_mp0_ras;
1574			mca->mp1.ras = &mca_v3_0_mp1_ras;
1575			mca->mpio.ras = &mca_v3_0_mpio_ras;
1576		}
1577		break;
1578	default:
1579		break;
1580	}
1581}
1582
1583static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
1584{
1585	if (!adev->gmc.xgmi.connected_to_cpu)
1586		adev->gmc.xgmi.ras = &xgmi_ras;
1587}
1588
1589static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev)
1590{
1591	adev->gmc.supported_nps_modes = 0;
1592
1593	if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
1594		return;
1595
1596	/*TODO: Check PSP version also which supports NPS switch. Otherwise keep
1597	 * supported modes as 0.
1598	 */
1599	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1600	case IP_VERSION(9, 4, 3):
1601	case IP_VERSION(9, 4, 4):
1602		adev->gmc.supported_nps_modes =
1603			BIT(AMDGPU_NPS1_PARTITION_MODE) |
1604			BIT(AMDGPU_NPS4_PARTITION_MODE);
1605		break;
1606	default:
1607		break;
1608	}
1609}
1610
1611static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
1612{
1613	struct amdgpu_device *adev = ip_block->adev;
1614
1615	/*
1616	 * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
1617	 * in their IP discovery tables
1618	 */
1619	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
1620	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
1621	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1622	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
1623		adev->gmc.xgmi.supported = true;
1624
1625	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
1626		adev->gmc.xgmi.supported = true;
1627		adev->gmc.xgmi.connected_to_cpu =
1628			adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1629	}
1630
1631	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1632	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
1633		enum amdgpu_pkg_type pkg_type =
1634			adev->smuio.funcs->get_pkg_type(adev);
1635		/* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
1636		 * and the APU, can be in used two possible modes:
1637		 *  - carveout mode
1638		 *  - native APU mode
1639		 * "is_app_apu" can be used to identify the APU in the native
1640		 * mode.
1641		 */
1642		adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU &&
1643					!pci_resource_len(adev->pdev, 0));
1644	}
1645
1646	gmc_v9_0_set_gmc_funcs(adev);
1647	gmc_v9_0_set_irq_funcs(adev);
1648	gmc_v9_0_set_umc_funcs(adev);
1649	gmc_v9_0_set_mmhub_funcs(adev);
1650	gmc_v9_0_set_mmhub_ras_funcs(adev);
1651	gmc_v9_0_set_gfxhub_funcs(adev);
1652	gmc_v9_0_set_hdp_ras_funcs(adev);
1653	gmc_v9_0_set_mca_ras_funcs(adev);
1654	gmc_v9_0_set_xgmi_ras_funcs(adev);
1655
1656	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1657	adev->gmc.shared_aperture_end =
1658		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1659	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1660	adev->gmc.private_aperture_end =
1661		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1662	adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
1663
1664	return 0;
1665}
1666
1667static int gmc_v9_0_late_init(struct amdgpu_ip_block *ip_block)
1668{
1669	struct amdgpu_device *adev = ip_block->adev;
1670	int r;
1671
1672	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1673	if (r)
1674		return r;
1675
1676	/*
1677	 * Workaround performance drop issue with VBIOS enables partial
1678	 * writes, while disables HBM ECC for vega10.
1679	 */
1680	if (!amdgpu_sriov_vf(adev) &&
1681	    (amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(6, 0, 0))) {
1682		if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1683			if (adev->df.funcs &&
1684			    adev->df.funcs->enable_ecc_force_par_wr_rmw)
1685				adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1686		}
1687	}
1688
1689	if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1690		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
1691		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP);
1692	}
1693
1694	r = amdgpu_gmc_ras_late_init(adev);
1695	if (r)
1696		return r;
1697
1698	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1699}
1700
1701static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1702					struct amdgpu_gmc *mc)
1703{
1704	u64 base = adev->mmhub.funcs->get_fb_location(adev);
1705
1706	amdgpu_gmc_set_agp_default(adev, mc);
1707
1708	/* add the xgmi offset of the physical node */
1709	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1710	if (adev->gmc.xgmi.connected_to_cpu) {
1711		amdgpu_gmc_sysvm_location(adev, mc);
1712	} else {
1713		amdgpu_gmc_vram_location(adev, mc, base);
1714		amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
1715		if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
1716			amdgpu_gmc_agp_location(adev, mc);
1717	}
1718	/* base offset of vram pages */
1719	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1720
1721	/* XXX: add the xgmi offset of the physical node? */
1722	adev->vm_manager.vram_base_offset +=
1723		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1724}
1725
1726/**
1727 * gmc_v9_0_mc_init - initialize the memory controller driver params
1728 *
1729 * @adev: amdgpu_device pointer
1730 *
1731 * Look up the amount of vram, vram width, and decide how to place
1732 * vram and gart within the GPU's physical address space.
1733 * Returns 0 for success.
1734 */
1735static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1736{
1737	int r;
1738
1739	/* size in MB on si */
1740	if (!adev->gmc.is_app_apu) {
1741		adev->gmc.mc_vram_size =
1742			adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1743	} else {
1744		DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n");
1745		adev->gmc.mc_vram_size = 0;
1746	}
1747	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1748
1749	if (!(adev->flags & AMD_IS_APU) &&
1750	    !adev->gmc.xgmi.connected_to_cpu) {
1751		r = amdgpu_device_resize_fb_bar(adev);
1752		if (r)
1753			return r;
1754	}
1755	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1756	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1757
1758#ifdef CONFIG_X86_64
1759	/*
1760	 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1761	 * interface can use VRAM through here as it appears system reserved
1762	 * memory in host address space.
1763	 *
1764	 * For APUs, VRAM is just the stolen system memory and can be accessed
1765	 * directly.
1766	 *
1767	 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1768	 */
1769
1770	/* check whether both host-gpu and gpu-gpu xgmi links exist */
1771	if ((!amdgpu_sriov_vf(adev) &&
1772		(adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
1773	    (adev->gmc.xgmi.supported &&
1774	     adev->gmc.xgmi.connected_to_cpu)) {
1775		adev->gmc.aper_base =
1776			adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1777			adev->gmc.xgmi.physical_node_id *
1778			adev->gmc.xgmi.node_segment_size;
1779		adev->gmc.aper_size = adev->gmc.real_vram_size;
1780	}
1781
1782#endif
1783	adev->gmc.visible_vram_size = adev->gmc.aper_size;
1784
1785	/* set the gart size */
1786	if (amdgpu_gart_size == -1) {
1787		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1788		case IP_VERSION(9, 0, 1):  /* all engines support GPUVM */
1789		case IP_VERSION(9, 2, 1):  /* all engines support GPUVM */
1790		case IP_VERSION(9, 4, 0):
1791		case IP_VERSION(9, 4, 1):
1792		case IP_VERSION(9, 4, 2):
1793		case IP_VERSION(9, 4, 3):
1794		case IP_VERSION(9, 4, 4):
1795		default:
1796			adev->gmc.gart_size = 512ULL << 20;
1797			break;
1798		case IP_VERSION(9, 1, 0):   /* DCE SG support */
1799		case IP_VERSION(9, 2, 2):   /* DCE SG support */
1800		case IP_VERSION(9, 3, 0):
1801			adev->gmc.gart_size = 1024ULL << 20;
1802			break;
1803		}
1804	} else {
1805		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1806	}
1807
1808	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1809
1810	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1811
1812	return 0;
1813}
1814
1815static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1816{
1817	int r;
1818
1819	if (adev->gart.bo) {
1820		WARN(1, "VEGA10 PCIE GART already initialized\n");
1821		return 0;
1822	}
1823
1824	if (adev->gmc.xgmi.connected_to_cpu) {
1825		adev->gmc.vmid0_page_table_depth = 1;
1826		adev->gmc.vmid0_page_table_block_size = 12;
1827	} else {
1828		adev->gmc.vmid0_page_table_depth = 0;
1829		adev->gmc.vmid0_page_table_block_size = 0;
1830	}
1831
1832	/* Initialize common gart structure */
1833	r = amdgpu_gart_init(adev);
1834	if (r)
1835		return r;
1836	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1837	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC) |
1838				 AMDGPU_PTE_EXECUTABLE;
1839
1840	if (!adev->gmc.real_vram_size) {
1841		dev_info(adev->dev, "Put GART in system memory for APU\n");
1842		r = amdgpu_gart_table_ram_alloc(adev);
1843		if (r)
1844			dev_err(adev->dev, "Failed to allocate GART in system memory\n");
1845	} else {
1846		r = amdgpu_gart_table_vram_alloc(adev);
1847		if (r)
1848			return r;
1849
1850		if (adev->gmc.xgmi.connected_to_cpu)
1851			r = amdgpu_gmc_pdb0_alloc(adev);
1852	}
1853
1854	return r;
1855}
1856
1857/**
1858 * gmc_v9_0_save_registers - saves regs
1859 *
1860 * @adev: amdgpu_device pointer
1861 *
1862 * This saves potential register values that should be
1863 * restored upon resume
1864 */
1865static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1866{
1867	if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
1868	    (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1)))
1869		adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1870}
1871
1872static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
1873{
1874	enum amdgpu_memory_partition mode;
1875	u32 supp_modes;
1876	bool valid;
1877
1878	mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
1879
1880	/* Mode detected by hardware not present in supported modes */
1881	if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
1882	    !(BIT(mode - 1) & supp_modes))
1883		return false;
1884
1885	switch (mode) {
1886	case UNKNOWN_MEMORY_PARTITION_MODE:
1887	case AMDGPU_NPS1_PARTITION_MODE:
1888		valid = (adev->gmc.num_mem_partitions == 1);
1889		break;
1890	case AMDGPU_NPS2_PARTITION_MODE:
1891		valid = (adev->gmc.num_mem_partitions == 2);
1892		break;
1893	case AMDGPU_NPS4_PARTITION_MODE:
1894		valid = (adev->gmc.num_mem_partitions == 3 ||
1895			 adev->gmc.num_mem_partitions == 4);
1896		break;
1897	default:
1898		valid = false;
1899	}
1900
1901	return valid;
1902}
1903
1904static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
1905{
1906	int i;
1907
1908	/* Check if node with id 'nid' is present in 'node_ids' array */
1909	for (i = 0; i < num_ids; ++i)
1910		if (node_ids[i] == nid)
1911			return true;
1912
1913	return false;
1914}
1915
1916static void
1917gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
1918			      struct amdgpu_mem_partition_info *mem_ranges)
1919{
1920	struct amdgpu_numa_info numa_info;
1921	int node_ids[MAX_MEM_RANGES];
1922	int num_ranges = 0, ret;
1923	int num_xcc, xcc_id;
1924	uint32_t xcc_mask;
1925
1926	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1927	xcc_mask = (1U << num_xcc) - 1;
1928
1929	for_each_inst(xcc_id, xcc_mask)	{
1930		ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
1931		if (ret)
1932			continue;
1933
1934		if (numa_info.nid == NUMA_NO_NODE) {
1935			mem_ranges[0].size = numa_info.size;
1936			mem_ranges[0].numa.node = numa_info.nid;
1937			num_ranges = 1;
1938			break;
1939		}
1940
1941		if (gmc_v9_0_is_node_present(node_ids, num_ranges,
1942					     numa_info.nid))
1943			continue;
1944
1945		node_ids[num_ranges] = numa_info.nid;
1946		mem_ranges[num_ranges].numa.node = numa_info.nid;
1947		mem_ranges[num_ranges].size = numa_info.size;
1948		++num_ranges;
1949	}
1950
1951	adev->gmc.num_mem_partitions = num_ranges;
1952}
1953
1954static void
1955gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
1956			    struct amdgpu_mem_partition_info *mem_ranges)
1957{
1958	enum amdgpu_memory_partition mode;
1959	u32 start_addr = 0, size;
1960	int i, r, l;
1961
1962	mode = gmc_v9_0_query_memory_partition(adev);
1963
1964	switch (mode) {
1965	case UNKNOWN_MEMORY_PARTITION_MODE:
1966		adev->gmc.num_mem_partitions = 0;
1967		break;
1968	case AMDGPU_NPS1_PARTITION_MODE:
1969		adev->gmc.num_mem_partitions = 1;
1970		break;
1971	case AMDGPU_NPS2_PARTITION_MODE:
1972		adev->gmc.num_mem_partitions = 2;
1973		break;
1974	case AMDGPU_NPS4_PARTITION_MODE:
1975		if (adev->flags & AMD_IS_APU)
1976			adev->gmc.num_mem_partitions = 3;
1977		else
1978			adev->gmc.num_mem_partitions = 4;
1979		break;
1980	default:
1981		adev->gmc.num_mem_partitions = 1;
1982		break;
1983	}
1984
1985	/* Use NPS range info, if populated */
1986	r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
1987					 &adev->gmc.num_mem_partitions);
1988	if (!r) {
1989		l = 0;
1990		for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
1991			if (mem_ranges[i].range.lpfn >
1992			    mem_ranges[i - 1].range.lpfn)
1993				l = i;
1994		}
1995
1996	} else {
1997		if (!adev->gmc.num_mem_partitions) {
1998			dev_err(adev->dev,
1999				"Not able to detect NPS mode, fall back to NPS1");
2000			adev->gmc.num_mem_partitions = 1;
2001		}
2002		/* Fallback to sw based calculation */
2003		size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
2004		size /= adev->gmc.num_mem_partitions;
2005
2006		for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
2007			mem_ranges[i].range.fpfn = start_addr;
2008			mem_ranges[i].size =
2009				((u64)size << AMDGPU_GPU_PAGE_SHIFT);
2010			mem_ranges[i].range.lpfn = start_addr + size - 1;
2011			start_addr += size;
2012		}
2013
2014		l = adev->gmc.num_mem_partitions - 1;
2015	}
2016
2017	/* Adjust the last one */
2018	mem_ranges[l].range.lpfn =
2019		(adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
2020	mem_ranges[l].size =
2021		adev->gmc.real_vram_size -
2022		((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
 
2023}
2024
2025static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
2026{
2027	bool valid;
2028
2029	adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES,
2030					   sizeof(struct amdgpu_mem_partition_info),
2031					   GFP_KERNEL);
2032	if (!adev->gmc.mem_partitions)
2033		return -ENOMEM;
2034
2035	/* TODO : Get the range from PSP/Discovery for dGPU */
2036	if (adev->gmc.is_app_apu)
2037		gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
2038	else
2039		gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
2040
2041	if (amdgpu_sriov_vf(adev))
2042		valid = true;
2043	else
2044		valid = gmc_v9_0_validate_partition_info(adev);
2045	if (!valid) {
2046		/* TODO: handle invalid case */
2047		dev_WARN(adev->dev,
2048			 "Mem ranges not matching with hardware config");
2049	}
2050
2051	return 0;
2052}
2053
2054static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
2055{
2056	adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
2057	adev->gmc.vram_width = 128 * 64;
2058}
2059
2060static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
2061{
2062	int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
2063	struct amdgpu_device *adev = ip_block->adev;
2064	unsigned long inst_mask = adev->aid_mask;
2065
2066	adev->gfxhub.funcs->init(adev);
2067
2068	adev->mmhub.funcs->init(adev);
2069
2070	spin_lock_init(&adev->gmc.invalidate_lock);
2071
2072	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2073	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
2074		gmc_v9_4_3_init_vram_info(adev);
2075	} else if (!adev->bios) {
2076		if (adev->flags & AMD_IS_APU) {
2077			adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
2078			adev->gmc.vram_width = 64 * 64;
2079		} else {
2080			adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
2081			adev->gmc.vram_width = 128 * 64;
2082		}
2083	} else {
2084		r = amdgpu_atomfirmware_get_vram_info(adev,
2085			&vram_width, &vram_type, &vram_vendor);
2086		if (amdgpu_sriov_vf(adev))
2087			/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
2088			 * and DF related registers is not readable, seems hardcord is the
2089			 * only way to set the correct vram_width
2090			 */
2091			adev->gmc.vram_width = 2048;
2092		else if (amdgpu_emu_mode != 1)
2093			adev->gmc.vram_width = vram_width;
2094
2095		if (!adev->gmc.vram_width) {
2096			int chansize, numchan;
2097
2098			/* hbm memory channel size */
2099			if (adev->flags & AMD_IS_APU)
2100				chansize = 64;
2101			else
2102				chansize = 128;
2103			if (adev->df.funcs &&
2104			    adev->df.funcs->get_hbm_channel_number) {
2105				numchan = adev->df.funcs->get_hbm_channel_number(adev);
2106				adev->gmc.vram_width = numchan * chansize;
2107			}
2108		}
2109
2110		adev->gmc.vram_type = vram_type;
2111		adev->gmc.vram_vendor = vram_vendor;
2112	}
2113	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2114	case IP_VERSION(9, 1, 0):
2115	case IP_VERSION(9, 2, 2):
2116		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2117		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2118
2119		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
2120			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2121		} else {
2122			/* vm_size is 128TB + 512GB for legacy 3-level page support */
2123			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
2124			adev->gmc.translate_further =
2125				adev->vm_manager.num_level > 1;
2126		}
2127		break;
2128	case IP_VERSION(9, 0, 1):
2129	case IP_VERSION(9, 2, 1):
2130	case IP_VERSION(9, 4, 0):
2131	case IP_VERSION(9, 3, 0):
2132	case IP_VERSION(9, 4, 2):
2133		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2134		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2135
2136		/*
2137		 * To fulfill 4-level page support,
2138		 * vm size is 256TB (48bit), maximum size of Vega10,
2139		 * block size 512 (9bit)
2140		 */
2141
2142		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2143		if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
2144			adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2145		break;
2146	case IP_VERSION(9, 4, 1):
2147		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2148		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2149		set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask);
2150
2151		/* Keep the vm size same with Vega20 */
2152		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2153		adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2154		break;
2155	case IP_VERSION(9, 4, 3):
2156	case IP_VERSION(9, 4, 4):
2157		bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
2158				  NUM_XCC(adev->gfx.xcc_mask));
2159
2160		inst_mask <<= AMDGPU_MMHUB0(0);
2161		bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32);
2162
2163		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2164		adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2165		break;
2166	default:
2167		break;
2168	}
2169
2170	/* This interrupt is VMC page fault.*/
2171	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
2172				&adev->gmc.vm_fault);
2173	if (r)
2174		return r;
2175
2176	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {
2177		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
2178					&adev->gmc.vm_fault);
2179		if (r)
2180			return r;
2181	}
2182
2183	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
2184				&adev->gmc.vm_fault);
2185
2186	if (r)
2187		return r;
2188
2189	if (!amdgpu_sriov_vf(adev) &&
2190	    !adev->gmc.xgmi.connected_to_cpu &&
2191	    !adev->gmc.is_app_apu) {
2192		/* interrupt sent to DF. */
2193		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
2194				      &adev->gmc.ecc_irq);
2195		if (r)
2196			return r;
2197	}
2198
2199	/* Set the internal MC address mask
2200	 * This is the max address of the GPU's
2201	 * internal address space.
2202	 */
2203	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
2204
2205	dma_addr_bits = amdgpu_ip_version(adev, GC_HWIP, 0) >=
2206					IP_VERSION(9, 4, 2) ?
2207				48 :
2208				44;
2209	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
2210	if (r) {
2211		dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
2212		return r;
2213	}
2214	adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
2215
2216	r = gmc_v9_0_mc_init(adev);
2217	if (r)
2218		return r;
2219
2220	amdgpu_gmc_get_vbios_allocations(adev);
2221
2222	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2223	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
2224		r = gmc_v9_0_init_mem_ranges(adev);
2225		if (r)
2226			return r;
2227	}
2228
2229	/* Memory manager */
2230	r = amdgpu_bo_init(adev);
2231	if (r)
2232		return r;
2233
2234	r = gmc_v9_0_gart_init(adev);
2235	if (r)
2236		return r;
2237
2238	gmc_v9_0_init_nps_details(adev);
2239	/*
2240	 * number of VMs
2241	 * VMID 0 is reserved for System
2242	 * amdgpu graphics/compute will use VMIDs 1..n-1
2243	 * amdkfd will use VMIDs n..15
2244	 *
2245	 * The first KFD VMID is 8 for GPUs with graphics, 3 for
2246	 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
2247	 * for video processing.
2248	 */
2249	adev->vm_manager.first_kfd_vmid =
2250		(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
2251		 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
2252		 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2253		 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) ?
2254			3 :
2255			8;
2256
2257	amdgpu_vm_manager_init(adev);
2258
2259	gmc_v9_0_save_registers(adev);
2260
2261	r = amdgpu_gmc_ras_sw_init(adev);
2262	if (r)
2263		return r;
2264
2265	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2266	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
2267		amdgpu_gmc_sysfs_init(adev);
2268
2269	return 0;
2270}
2271
2272static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
2273{
2274	struct amdgpu_device *adev = ip_block->adev;
2275
2276	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2277	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
2278		amdgpu_gmc_sysfs_fini(adev);
2279
2280	amdgpu_gmc_ras_fini(adev);
2281	amdgpu_gem_force_release(adev);
2282	amdgpu_vm_manager_fini(adev);
2283	if (!adev->gmc.real_vram_size) {
2284		dev_info(adev->dev, "Put GART in system memory for APU free\n");
2285		amdgpu_gart_table_ram_free(adev);
2286	} else {
2287		amdgpu_gart_table_vram_free(adev);
2288	}
2289	amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
2290	amdgpu_bo_fini(adev);
2291
2292	adev->gmc.num_mem_partitions = 0;
2293	kfree(adev->gmc.mem_partitions);
2294
2295	return 0;
2296}
2297
2298static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
2299{
2300	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
2301	case IP_VERSION(9, 0, 0):
2302		if (amdgpu_sriov_vf(adev))
2303			break;
2304		fallthrough;
2305	case IP_VERSION(9, 4, 0):
2306		soc15_program_register_sequence(adev,
2307						golden_settings_mmhub_1_0_0,
2308						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
2309		soc15_program_register_sequence(adev,
2310						golden_settings_athub_1_0_0,
2311						ARRAY_SIZE(golden_settings_athub_1_0_0));
2312		break;
2313	case IP_VERSION(9, 1, 0):
2314	case IP_VERSION(9, 2, 0):
2315		/* TODO for renoir */
2316		soc15_program_register_sequence(adev,
2317						golden_settings_athub_1_0_0,
2318						ARRAY_SIZE(golden_settings_athub_1_0_0));
2319		break;
2320	default:
2321		break;
2322	}
2323}
2324
2325/**
2326 * gmc_v9_0_restore_registers - restores regs
2327 *
2328 * @adev: amdgpu_device pointer
2329 *
2330 * This restores register values, saved at suspend.
2331 */
2332void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
2333{
2334	if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
2335	    (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) {
2336		WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
2337		WARN_ON(adev->gmc.sdpif_register !=
2338			RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
2339	}
2340}
2341
2342/**
2343 * gmc_v9_0_gart_enable - gart enable
2344 *
2345 * @adev: amdgpu_device pointer
2346 */
2347static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
2348{
2349	int r;
2350
2351	if (adev->gmc.xgmi.connected_to_cpu)
2352		amdgpu_gmc_init_pdb0(adev);
2353
2354	if (adev->gart.bo == NULL) {
2355		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
2356		return -EINVAL;
2357	}
2358
2359	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
2360
2361	if (!adev->in_s0ix) {
2362		r = adev->gfxhub.funcs->gart_enable(adev);
2363		if (r)
2364			return r;
2365	}
2366
2367	r = adev->mmhub.funcs->gart_enable(adev);
2368	if (r)
2369		return r;
2370
2371	DRM_INFO("PCIE GART of %uM enabled.\n",
2372		 (unsigned int)(adev->gmc.gart_size >> 20));
2373	if (adev->gmc.pdb0_bo)
2374		DRM_INFO("PDB0 located at 0x%016llX\n",
2375				(unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
2376	DRM_INFO("PTB located at 0x%016llX\n",
2377			(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
2378
2379	return 0;
2380}
2381
2382static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
2383{
2384	struct amdgpu_device *adev = ip_block->adev;
2385	bool value;
2386	int i, r;
2387
2388	adev->gmc.flush_pasid_uses_kiq = true;
2389
2390	/* Vega20+XGMI caches PTEs in TC and TLB. Add a heavy-weight TLB flush
2391	 * (type 2), which flushes both. Due to a race condition with
2392	 * concurrent memory accesses using the same TLB cache line, we still
2393	 * need a second TLB flush after this.
2394	 */
2395	adev->gmc.flush_tlb_needs_extra_type_2 =
2396		amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) &&
2397		adev->gmc.xgmi.num_physical_nodes;
2398	/*
2399	 * TODO: This workaround is badly documented and had a buggy
2400	 * implementation. We should probably verify what we do here.
2401	 */
2402	adev->gmc.flush_tlb_needs_extra_type_0 =
2403		amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
2404		adev->rev_id == 0;
2405
2406	/* The sequence of these two function calls matters.*/
2407	gmc_v9_0_init_golden_registers(adev);
2408
2409	if (adev->mode_info.num_crtc) {
2410		/* Lockout access through VGA aperture*/
2411		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
2412		/* disable VGA render */
2413		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
2414	}
2415
2416	if (adev->mmhub.funcs->update_power_gating)
2417		adev->mmhub.funcs->update_power_gating(adev, true);
2418
2419	adev->hdp.funcs->init_registers(adev);
2420
2421	/* After HDP is initialized, flush HDP.*/
2422	adev->hdp.funcs->flush_hdp(adev, NULL);
2423
2424	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
2425		value = false;
2426	else
2427		value = true;
2428
2429	if (!amdgpu_sriov_vf(adev)) {
2430		if (!adev->in_s0ix)
2431			adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2432		adev->mmhub.funcs->set_fault_enable_default(adev, value);
2433	}
2434	for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2435		if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0)))
2436			continue;
2437		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
2438	}
2439
2440	if (adev->umc.funcs && adev->umc.funcs->init_registers)
2441		adev->umc.funcs->init_registers(adev);
2442
2443	r = gmc_v9_0_gart_enable(adev);
2444	if (r)
2445		return r;
2446
2447	if (amdgpu_emu_mode == 1)
2448		return amdgpu_gmc_vram_checking(adev);
2449
2450	return 0;
2451}
2452
2453/**
2454 * gmc_v9_0_gart_disable - gart disable
2455 *
2456 * @adev: amdgpu_device pointer
2457 *
2458 * This disables all VM page table.
2459 */
2460static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
2461{
2462	if (!adev->in_s0ix)
2463		adev->gfxhub.funcs->gart_disable(adev);
2464	adev->mmhub.funcs->gart_disable(adev);
2465}
2466
2467static int gmc_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
2468{
2469	struct amdgpu_device *adev = ip_block->adev;
2470
2471	gmc_v9_0_gart_disable(adev);
2472
2473	if (amdgpu_sriov_vf(adev)) {
2474		/* full access mode, so don't touch any GMC register */
2475		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
2476		return 0;
2477	}
2478
2479	/*
2480	 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
2481	 * a correct cached state for GMC. Otherwise, the "gate" again
2482	 * operation on S3 resuming will fail due to wrong cached state.
2483	 */
2484	if (adev->mmhub.funcs->update_power_gating)
2485		adev->mmhub.funcs->update_power_gating(adev, false);
2486
2487	/*
2488	 * For minimal init, late_init is not called, hence VM fault/RAS irqs
2489	 * are not enabled.
2490	 */
2491	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
2492		amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
2493
2494		if (adev->gmc.ecc_irq.funcs &&
2495		    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
2496			amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
2497	}
2498
2499	return 0;
2500}
2501
2502static int gmc_v9_0_suspend(struct amdgpu_ip_block *ip_block)
2503{
2504	return gmc_v9_0_hw_fini(ip_block);
 
 
2505}
2506
2507static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
2508{
2509	struct amdgpu_device *adev = ip_block->adev;
2510	int r;
 
2511
2512	/* If a reset is done for NPS mode switch, read the memory range
2513	 * information again.
2514	 */
2515	if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) {
2516		gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
2517		adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS;
2518	}
2519
2520	r = gmc_v9_0_hw_init(ip_block);
2521	if (r)
2522		return r;
2523
2524	amdgpu_vmid_reset_all(ip_block->adev);
2525
2526	return 0;
2527}
2528
2529static bool gmc_v9_0_is_idle(void *handle)
2530{
2531	/* MC is always ready in GMC v9.*/
2532	return true;
2533}
2534
2535static int gmc_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
2536{
2537	/* There is no need to wait for MC idle in GMC v9.*/
2538	return 0;
2539}
2540
2541static int gmc_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
2542{
2543	/* XXX for emulation.*/
2544	return 0;
2545}
2546
2547static int gmc_v9_0_set_clockgating_state(void *handle,
2548					enum amd_clockgating_state state)
2549{
2550	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2551
2552	adev->mmhub.funcs->set_clockgating(adev, state);
2553
2554	athub_v1_0_set_clockgating(adev, state);
2555
2556	return 0;
2557}
2558
2559static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
2560{
2561	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2562
2563	adev->mmhub.funcs->get_clockgating(adev, flags);
2564
2565	athub_v1_0_get_clockgating(adev, flags);
2566}
2567
2568static int gmc_v9_0_set_powergating_state(void *handle,
2569					enum amd_powergating_state state)
2570{
2571	return 0;
2572}
2573
2574const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
2575	.name = "gmc_v9_0",
2576	.early_init = gmc_v9_0_early_init,
2577	.late_init = gmc_v9_0_late_init,
2578	.sw_init = gmc_v9_0_sw_init,
2579	.sw_fini = gmc_v9_0_sw_fini,
2580	.hw_init = gmc_v9_0_hw_init,
2581	.hw_fini = gmc_v9_0_hw_fini,
2582	.suspend = gmc_v9_0_suspend,
2583	.resume = gmc_v9_0_resume,
2584	.is_idle = gmc_v9_0_is_idle,
2585	.wait_for_idle = gmc_v9_0_wait_for_idle,
2586	.soft_reset = gmc_v9_0_soft_reset,
2587	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
2588	.set_powergating_state = gmc_v9_0_set_powergating_state,
2589	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
2590};
2591
2592const struct amdgpu_ip_block_version gmc_v9_0_ip_block = {
2593	.type = AMD_IP_BLOCK_TYPE_GMC,
2594	.major = 9,
2595	.minor = 0,
2596	.rev = 0,
2597	.funcs = &gmc_v9_0_ip_funcs,
2598};